summaryrefslogtreecommitdiff
path: root/chromium/v8
diff options
context:
space:
mode:
authorAndras Becsi <andras.becsi@digia.com>2014-03-18 13:16:26 +0100
committerFrederik Gladhorn <frederik.gladhorn@digia.com>2014-03-20 15:55:39 +0100
commit3f0f86b0caed75241fa71c95a5d73bc0164348c5 (patch)
tree92b9fb00f2e9e90b0be2262093876d4f43b6cd13 /chromium/v8
parente90d7c4b152c56919d963987e2503f9909a666d2 (diff)
downloadqtwebengine-chromium-3f0f86b0caed75241fa71c95a5d73bc0164348c5.tar.gz
Update to new stable branch 1750
This also includes an updated ninja and chromium dependencies needed on Windows. Change-Id: Icd597d80ed3fa4425933c9f1334c3c2e31291c42 Reviewed-by: Zoltan Arvai <zarvai@inf.u-szeged.hu> Reviewed-by: Zeno Albisser <zeno.albisser@digia.com>
Diffstat (limited to 'chromium/v8')
-rw-r--r--chromium/v8/.clang-format3
-rw-r--r--chromium/v8/ChangeLog407
-rw-r--r--chromium/v8/Makefile39
-rw-r--r--chromium/v8/Makefile.nacl3
-rw-r--r--chromium/v8/OWNERS4
-rw-r--r--chromium/v8/PRESUBMIT.py31
-rw-r--r--[-rwxr-xr-x]chromium/v8/WATCHLISTS (renamed from chromium/v8/tools/status-file-converter.py)27
-rw-r--r--chromium/v8/benchmarks/deltablue.js26
-rw-r--r--chromium/v8/build/all.gyp9
-rw-r--r--chromium/v8/build/features.gypi22
-rw-r--r--chromium/v8/build/standalone.gypi28
-rw-r--r--chromium/v8/build/toolchain.gypi35
-rwxr-xr-xchromium/v8/include/v8-debug.h4
-rw-r--r--chromium/v8/include/v8-platform.h86
-rw-r--r--chromium/v8/include/v8-preparser.h84
-rw-r--r--chromium/v8/include/v8-profiler.h33
-rw-r--r--chromium/v8/include/v8-testing.h4
-rw-r--r--chromium/v8/include/v8.h1238
-rw-r--r--chromium/v8/include/v8config.h26
-rw-r--r--chromium/v8/preparser/preparser-process.cc372
-rw-r--r--chromium/v8/samples/lineprocessor.cc37
-rw-r--r--chromium/v8/samples/process.cc67
-rw-r--r--chromium/v8/samples/samples.gyp8
-rw-r--r--chromium/v8/samples/shell.cc64
-rw-r--r--chromium/v8/src/OWNERS2
-rw-r--r--chromium/v8/src/accessors.cc117
-rw-r--r--chromium/v8/src/accessors.h9
-rw-r--r--chromium/v8/src/allocation-site-scopes.cc102
-rw-r--r--chromium/v8/src/allocation-site-scopes.h124
-rw-r--r--chromium/v8/src/allocation-tracker.cc280
-rw-r--r--chromium/v8/src/allocation-tracker.h137
-rw-r--r--chromium/v8/src/allocation.cc20
-rw-r--r--chromium/v8/src/allocation.h28
-rw-r--r--chromium/v8/src/api.cc1404
-rw-r--r--chromium/v8/src/api.h30
-rw-r--r--chromium/v8/src/apinatives.js1
-rw-r--r--chromium/v8/src/arguments.cc21
-rw-r--r--chromium/v8/src/arguments.h29
-rw-r--r--chromium/v8/src/arm/assembler-arm-inl.h90
-rw-r--r--chromium/v8/src/arm/assembler-arm.cc206
-rw-r--r--chromium/v8/src/arm/assembler-arm.h117
-rw-r--r--chromium/v8/src/arm/builtins-arm.cc169
-rw-r--r--chromium/v8/src/arm/code-stubs-arm.cc1598
-rw-r--r--chromium/v8/src/arm/code-stubs-arm.h30
-rw-r--r--chromium/v8/src/arm/codegen-arm.cc120
-rw-r--r--chromium/v8/src/arm/codegen-arm.h1
-rw-r--r--chromium/v8/src/arm/constants-arm.h3
-rw-r--r--chromium/v8/src/arm/deoptimizer-arm.cc112
-rw-r--r--chromium/v8/src/arm/disasm-arm.cc8
-rw-r--r--chromium/v8/src/arm/frames-arm.h7
-rw-r--r--chromium/v8/src/arm/full-codegen-arm.cc520
-rw-r--r--chromium/v8/src/arm/ic-arm.cc80
-rw-r--r--chromium/v8/src/arm/lithium-arm.cc531
-rw-r--r--chromium/v8/src/arm/lithium-arm.h547
-rw-r--r--chromium/v8/src/arm/lithium-codegen-arm.cc1098
-rw-r--r--chromium/v8/src/arm/lithium-codegen-arm.h77
-rw-r--r--chromium/v8/src/arm/lithium-gap-resolver-arm.cc4
-rw-r--r--chromium/v8/src/arm/macro-assembler-arm.cc595
-rw-r--r--chromium/v8/src/arm/macro-assembler-arm.h112
-rw-r--r--chromium/v8/src/arm/regexp-macro-assembler-arm.h5
-rw-r--r--chromium/v8/src/arm/simulator-arm.cc79
-rw-r--r--chromium/v8/src/arm/simulator-arm.h11
-rw-r--r--chromium/v8/src/arm/stub-cache-arm.cc1109
-rw-r--r--chromium/v8/src/array-iterator.js24
-rw-r--r--chromium/v8/src/array.js80
-rw-r--r--chromium/v8/src/arraybuffer.js10
-rw-r--r--chromium/v8/src/assembler.cc54
-rw-r--r--chromium/v8/src/assembler.h34
-rw-r--r--chromium/v8/src/ast.cc333
-rw-r--r--chromium/v8/src/ast.h657
-rw-r--r--chromium/v8/src/bootstrapper.cc162
-rw-r--r--chromium/v8/src/bootstrapper.h3
-rw-r--r--chromium/v8/src/builtins.cc154
-rw-r--r--chromium/v8/src/builtins.h175
-rw-r--r--chromium/v8/src/checks.cc47
-rw-r--r--chromium/v8/src/checks.h21
-rw-r--r--chromium/v8/src/code-stubs-hydrogen.cc462
-rw-r--r--chromium/v8/src/code-stubs.cc198
-rw-r--r--chromium/v8/src/code-stubs.h558
-rw-r--r--chromium/v8/src/codegen.cc39
-rw-r--r--chromium/v8/src/codegen.h2
-rw-r--r--chromium/v8/src/compiler.cc274
-rw-r--r--chromium/v8/src/compiler.h42
-rw-r--r--chromium/v8/src/contexts.cc2
-rw-r--r--chromium/v8/src/contexts.h8
-rw-r--r--chromium/v8/src/conversions-inl.h4
-rw-r--r--chromium/v8/src/conversions.cc21
-rw-r--r--chromium/v8/src/counters.h33
-rw-r--r--chromium/v8/src/cpu-profiler.cc30
-rw-r--r--chromium/v8/src/cpu-profiler.h2
-rw-r--r--chromium/v8/src/d8-debug.cc39
-rw-r--r--chromium/v8/src/d8-posix.cc158
-rw-r--r--chromium/v8/src/d8-readline.cc23
-rw-r--r--chromium/v8/src/d8-windows.cc2
-rw-r--r--chromium/v8/src/d8.cc287
-rw-r--r--chromium/v8/src/d8.gyp8
-rw-r--r--chromium/v8/src/d8.h14
-rw-r--r--chromium/v8/src/d8.js14
-rw-r--r--chromium/v8/src/date.js35
-rw-r--r--chromium/v8/src/debug-debugger.js4
-rw-r--r--chromium/v8/src/debug.cc38
-rw-r--r--chromium/v8/src/debug.h1
-rw-r--r--chromium/v8/src/default-platform.cc56
-rw-r--r--chromium/v8/src/default-platform.h (renamed from chromium/v8/src/marking-thread.h)41
-rw-r--r--chromium/v8/src/deoptimizer.cc744
-rw-r--r--chromium/v8/src/deoptimizer.h127
-rw-r--r--chromium/v8/src/disassembler.cc4
-rw-r--r--chromium/v8/src/elements-kind.cc8
-rw-r--r--chromium/v8/src/elements-kind.h4
-rw-r--r--chromium/v8/src/elements.cc2
-rw-r--r--chromium/v8/src/execution.cc18
-rw-r--r--chromium/v8/src/execution.h2
-rw-r--r--chromium/v8/src/extensions/externalize-string-extension.cc21
-rw-r--r--chromium/v8/src/extensions/externalize-string-extension.h3
-rw-r--r--chromium/v8/src/extensions/free-buffer-extension.cc60
-rw-r--r--chromium/v8/src/extensions/free-buffer-extension.h49
-rw-r--r--chromium/v8/src/extensions/gc-extension.cc3
-rw-r--r--chromium/v8/src/extensions/gc-extension.h3
-rw-r--r--chromium/v8/src/extensions/statistics-extension.cc102
-rw-r--r--chromium/v8/src/extensions/statistics-extension.h3
-rw-r--r--chromium/v8/src/factory.cc212
-rw-r--r--chromium/v8/src/factory.h138
-rw-r--r--chromium/v8/src/flag-definitions.h195
-rw-r--r--chromium/v8/src/flags.cc37
-rw-r--r--chromium/v8/src/frames.cc19
-rw-r--r--chromium/v8/src/frames.h24
-rw-r--r--chromium/v8/src/full-codegen.cc161
-rw-r--r--chromium/v8/src/full-codegen.h153
-rw-r--r--chromium/v8/src/func-name-inferrer.cc2
-rw-r--r--chromium/v8/src/global-handles.cc2
-rw-r--r--chromium/v8/src/globals.h31
-rw-r--r--chromium/v8/src/handles-inl.h11
-rw-r--r--chromium/v8/src/handles.cc142
-rw-r--r--chromium/v8/src/handles.h41
-rw-r--r--chromium/v8/src/harmony-array.js2
-rw-r--r--chromium/v8/src/harmony-math.js60
-rw-r--r--chromium/v8/src/harmony-string.js2
-rw-r--r--chromium/v8/src/heap-inl.h112
-rw-r--r--chromium/v8/src/heap-profiler.cc35
-rw-r--r--chromium/v8/src/heap-profiler.h30
-rw-r--r--chromium/v8/src/heap-snapshot-generator-inl.h1
-rw-r--r--chromium/v8/src/heap-snapshot-generator.cc433
-rw-r--r--chromium/v8/src/heap-snapshot-generator.h55
-rw-r--r--chromium/v8/src/heap.cc1021
-rw-r--r--chromium/v8/src/heap.h418
-rw-r--r--chromium/v8/src/hydrogen-alias-analysis.h9
-rw-r--r--chromium/v8/src/hydrogen-bce.cc163
-rw-r--r--chromium/v8/src/hydrogen-canonicalize.cc8
-rw-r--r--chromium/v8/src/hydrogen-check-elimination.cc536
-rw-r--r--chromium/v8/src/hydrogen-check-elimination.h80
-rw-r--r--chromium/v8/src/hydrogen-dce.cc74
-rw-r--r--chromium/v8/src/hydrogen-dce.h3
-rw-r--r--chromium/v8/src/hydrogen-deoptimizing-mark.cc126
-rw-r--r--chromium/v8/src/hydrogen-environment-liveness.cc2
-rw-r--r--chromium/v8/src/hydrogen-escape-analysis.cc16
-rw-r--r--chromium/v8/src/hydrogen-flow-engine.h242
-rw-r--r--chromium/v8/src/hydrogen-gvn.cc52
-rw-r--r--chromium/v8/src/hydrogen-instructions.cc438
-rw-r--r--chromium/v8/src/hydrogen-instructions.h1945
-rw-r--r--chromium/v8/src/hydrogen-load-elimination.cc510
-rw-r--r--chromium/v8/src/hydrogen-load-elimination.h (renamed from chromium/v8/src/allocation-inl.h)25
-rw-r--r--chromium/v8/src/hydrogen-mark-unreachable.cc (renamed from chromium/v8/src/marking-thread.cc)86
-rw-r--r--chromium/v8/src/hydrogen-mark-unreachable.h (renamed from chromium/v8/src/hydrogen-deoptimizing-mark.h)19
-rw-r--r--chromium/v8/src/hydrogen-minus-zero.cc8
-rw-r--r--chromium/v8/src/hydrogen-osr.cc27
-rw-r--r--chromium/v8/src/hydrogen-osr.h6
-rw-r--r--chromium/v8/src/hydrogen-redundant-phi.cc70
-rw-r--r--chromium/v8/src/hydrogen-redundant-phi.h3
-rw-r--r--chromium/v8/src/hydrogen-representation-changes.cc5
-rw-r--r--chromium/v8/src/hydrogen.cc3523
-rw-r--r--chromium/v8/src/hydrogen.h676
-rw-r--r--chromium/v8/src/i18n.cc20
-rw-r--r--chromium/v8/src/i18n.js27
-rw-r--r--chromium/v8/src/ia32/assembler-ia32-inl.h33
-rw-r--r--chromium/v8/src/ia32/assembler-ia32.cc193
-rw-r--r--chromium/v8/src/ia32/assembler-ia32.h85
-rw-r--r--chromium/v8/src/ia32/builtins-ia32.cc113
-rw-r--r--chromium/v8/src/ia32/code-stubs-ia32.cc1901
-rw-r--r--chromium/v8/src/ia32/code-stubs-ia32.h31
-rw-r--r--chromium/v8/src/ia32/codegen-ia32.cc61
-rw-r--r--chromium/v8/src/ia32/deoptimizer-ia32.cc102
-rw-r--r--chromium/v8/src/ia32/disasm-ia32.cc52
-rw-r--r--chromium/v8/src/ia32/full-codegen-ia32.cc386
-rw-r--r--chromium/v8/src/ia32/ic-ia32.cc68
-rw-r--r--chromium/v8/src/ia32/lithium-codegen-ia32.cc1032
-rw-r--r--chromium/v8/src/ia32/lithium-codegen-ia32.h87
-rw-r--r--chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc20
-rw-r--r--chromium/v8/src/ia32/lithium-ia32.cc404
-rw-r--r--chromium/v8/src/ia32/lithium-ia32.h199
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.cc456
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.h95
-rw-r--r--chromium/v8/src/ia32/simulator-ia32.cc1
-rw-r--r--chromium/v8/src/ia32/stub-cache-ia32.cc1176
-rw-r--r--chromium/v8/src/ic-inl.h64
-rw-r--r--chromium/v8/src/ic.cc2481
-rw-r--r--chromium/v8/src/ic.h612
-rw-r--r--chromium/v8/src/incremental-marking.cc4
-rw-r--r--chromium/v8/src/isolate-inl.h5
-rw-r--r--chromium/v8/src/isolate.cc381
-rw-r--r--chromium/v8/src/isolate.h231
-rw-r--r--chromium/v8/src/json-stringifier.h1
-rw-r--r--chromium/v8/src/json.js4
-rw-r--r--chromium/v8/src/jsregexp.cc4
-rw-r--r--chromium/v8/src/list.h4
-rw-r--r--chromium/v8/src/lithium-allocator-inl.h10
-rw-r--r--chromium/v8/src/lithium-allocator.cc47
-rw-r--r--chromium/v8/src/lithium-allocator.h15
-rw-r--r--chromium/v8/src/lithium-codegen.cc150
-rw-r--r--chromium/v8/src/lithium-codegen.h96
-rw-r--r--chromium/v8/src/lithium.cc16
-rw-r--r--chromium/v8/src/lithium.h5
-rw-r--r--chromium/v8/src/liveedit-debugger.js76
-rw-r--r--chromium/v8/src/liveedit.cc26
-rw-r--r--chromium/v8/src/log-utils.h3
-rw-r--r--chromium/v8/src/log.cc268
-rw-r--r--chromium/v8/src/log.h9
-rw-r--r--chromium/v8/src/macros.py12
-rw-r--r--chromium/v8/src/mark-compact.cc179
-rw-r--r--chromium/v8/src/mark-compact.h13
-rw-r--r--chromium/v8/src/math.js173
-rw-r--r--chromium/v8/src/messages.js110
-rw-r--r--chromium/v8/src/mips/assembler-mips-inl.h39
-rw-r--r--chromium/v8/src/mips/assembler-mips.cc10
-rw-r--r--chromium/v8/src/mips/assembler-mips.h54
-rw-r--r--chromium/v8/src/mips/builtins-mips.cc167
-rw-r--r--chromium/v8/src/mips/code-stubs-mips.cc1423
-rw-r--r--chromium/v8/src/mips/code-stubs-mips.h45
-rw-r--r--chromium/v8/src/mips/codegen-mips.cc103
-rw-r--r--chromium/v8/src/mips/codegen-mips.h1
-rw-r--r--chromium/v8/src/mips/deoptimizer-mips.cc92
-rw-r--r--chromium/v8/src/mips/frames-mips.h3
-rw-r--r--chromium/v8/src/mips/full-codegen-mips.cc463
-rw-r--r--chromium/v8/src/mips/ic-mips.cc80
-rw-r--r--chromium/v8/src/mips/lithium-codegen-mips.cc1065
-rw-r--r--chromium/v8/src/mips/lithium-codegen-mips.h82
-rw-r--r--chromium/v8/src/mips/lithium-gap-resolver-mips.cc5
-rw-r--r--chromium/v8/src/mips/lithium-mips.cc553
-rw-r--r--chromium/v8/src/mips/lithium-mips.h547
-rw-r--r--chromium/v8/src/mips/macro-assembler-mips.cc524
-rw-r--r--chromium/v8/src/mips/macro-assembler-mips.h131
-rw-r--r--chromium/v8/src/mips/regexp-macro-assembler-mips.cc81
-rw-r--r--chromium/v8/src/mips/regexp-macro-assembler-mips.h8
-rw-r--r--chromium/v8/src/mips/simulator-mips.cc17
-rw-r--r--chromium/v8/src/mips/simulator-mips.h1
-rw-r--r--chromium/v8/src/mips/stub-cache-mips.cc1098
-rw-r--r--chromium/v8/src/mirror-debugger.js8
-rw-r--r--chromium/v8/src/mksnapshot.cc55
-rw-r--r--chromium/v8/src/msan.h (renamed from chromium/v8/src/v8preparserdll-main.cc)32
-rw-r--r--chromium/v8/src/object-observe.js105
-rw-r--r--chromium/v8/src/objects-debug.cc49
-rw-r--r--chromium/v8/src/objects-inl.h592
-rw-r--r--chromium/v8/src/objects-printer.cc70
-rw-r--r--chromium/v8/src/objects-visiting-inl.h54
-rw-r--r--chromium/v8/src/objects-visiting.cc3
-rw-r--r--chromium/v8/src/objects-visiting.h3
-rw-r--r--chromium/v8/src/objects.cc4751
-rw-r--r--chromium/v8/src/objects.h1362
-rw-r--r--chromium/v8/src/optimizing-compiler-thread.cc261
-rw-r--r--chromium/v8/src/optimizing-compiler-thread.h95
-rw-r--r--chromium/v8/src/parser.cc966
-rw-r--r--chromium/v8/src/parser.h130
-rw-r--r--chromium/v8/src/platform-cygwin.cc12
-rw-r--r--chromium/v8/src/platform-freebsd.cc13
-rw-r--r--chromium/v8/src/platform-linux.cc24
-rw-r--r--chromium/v8/src/platform-macos.cc28
-rw-r--r--chromium/v8/src/platform-openbsd.cc35
-rw-r--r--chromium/v8/src/platform-posix.cc53
-rw-r--r--chromium/v8/src/platform-posix.h106
-rw-r--r--chromium/v8/src/platform-solaris.cc20
-rw-r--r--chromium/v8/src/platform-win32.cc202
-rw-r--r--chromium/v8/src/platform.h13
-rw-r--r--chromium/v8/src/platform/elapsed-timer.h6
-rw-r--r--chromium/v8/src/platform/mutex.h4
-rw-r--r--chromium/v8/src/platform/semaphore.h4
-rw-r--r--chromium/v8/src/platform/time.cc134
-rw-r--r--chromium/v8/src/platform/time.h7
-rw-r--r--chromium/v8/src/preparser-api.cc196
-rw-r--r--chromium/v8/src/preparser.cc865
-rw-r--r--chromium/v8/src/preparser.h404
-rw-r--r--chromium/v8/src/prettyprinter.cc68
-rw-r--r--chromium/v8/src/profile-generator-inl.h33
-rw-r--r--chromium/v8/src/profile-generator.cc100
-rw-r--r--chromium/v8/src/profile-generator.h69
-rw-r--r--chromium/v8/src/promise.js305
-rw-r--r--chromium/v8/src/property-details.h60
-rw-r--r--chromium/v8/src/property.cc5
-rw-r--r--chromium/v8/src/property.h47
-rw-r--r--chromium/v8/src/proxy.js4
-rw-r--r--chromium/v8/src/regexp.js8
-rw-r--r--chromium/v8/src/rewriter.cc14
-rw-r--r--chromium/v8/src/runtime-profiler.cc8
-rw-r--r--chromium/v8/src/runtime.cc1183
-rw-r--r--chromium/v8/src/runtime.h61
-rw-r--r--chromium/v8/src/runtime.js16
-rw-r--r--chromium/v8/src/safepoint-table.cc12
-rw-r--r--chromium/v8/src/safepoint-table.h4
-rw-r--r--chromium/v8/src/sampler.cc6
-rw-r--r--chromium/v8/src/scanner.cc140
-rw-r--r--chromium/v8/src/scanner.h55
-rw-r--r--chromium/v8/src/scopeinfo.cc29
-rw-r--r--chromium/v8/src/scopes.cc8
-rw-r--r--chromium/v8/src/serialize.cc196
-rw-r--r--chromium/v8/src/serialize.h15
-rw-r--r--chromium/v8/src/snapshot-common.cc11
-rw-r--r--chromium/v8/src/spaces-inl.h19
-rw-r--r--chromium/v8/src/spaces.cc256
-rw-r--r--chromium/v8/src/spaces.h231
-rw-r--r--chromium/v8/src/store-buffer-inl.h1
-rw-r--r--chromium/v8/src/store-buffer.cc2
-rw-r--r--chromium/v8/src/string-stream.cc5
-rw-r--r--chromium/v8/src/string.js9
-rw-r--r--chromium/v8/src/stub-cache.cc1200
-rw-r--r--chromium/v8/src/stub-cache.h583
-rw-r--r--chromium/v8/src/sweeper-thread.cc10
-rw-r--r--chromium/v8/src/sweeper-thread.h2
-rw-r--r--chromium/v8/src/token.h4
-rw-r--r--chromium/v8/src/transitions-inl.h4
-rw-r--r--chromium/v8/src/trig-table.h61
-rw-r--r--chromium/v8/src/type-info.cc416
-rw-r--r--chromium/v8/src/type-info.h99
-rw-r--r--chromium/v8/src/typedarray.js329
-rw-r--r--chromium/v8/src/types.cc293
-rw-r--r--chromium/v8/src/types.h130
-rw-r--r--chromium/v8/src/typing.cc129
-rw-r--r--chromium/v8/src/unicode.h2
-rw-r--r--chromium/v8/src/unique.h102
-rw-r--r--chromium/v8/src/utils.h6
-rw-r--r--chromium/v8/src/utils/random-number-generator.cc19
-rw-r--r--chromium/v8/src/utils/random-number-generator.h4
-rw-r--r--chromium/v8/src/v8-counters.cc8
-rw-r--r--chromium/v8/src/v8-counters.h42
-rw-r--r--chromium/v8/src/v8.cc128
-rw-r--r--chromium/v8/src/v8.h11
-rw-r--r--chromium/v8/src/v8conversions.h32
-rw-r--r--chromium/v8/src/v8globals.h5
-rw-r--r--chromium/v8/src/v8natives.js91
-rw-r--r--chromium/v8/src/v8threads.cc80
-rw-r--r--chromium/v8/src/v8threads.h28
-rw-r--r--chromium/v8/src/v8utils.h55
-rw-r--r--chromium/v8/src/version.cc6
-rw-r--r--chromium/v8/src/win32-math.cc2
-rw-r--r--chromium/v8/src/win32-math.h4
-rw-r--r--chromium/v8/src/x64/assembler-x64-inl.h38
-rw-r--r--chromium/v8/src/x64/assembler-x64.cc355
-rw-r--r--chromium/v8/src/x64/assembler-x64.h199
-rw-r--r--chromium/v8/src/x64/builtins-x64.cc110
-rw-r--r--chromium/v8/src/x64/code-stubs-x64.cc1260
-rw-r--r--chromium/v8/src/x64/code-stubs-x64.h31
-rw-r--r--chromium/v8/src/x64/codegen-x64.cc44
-rw-r--r--chromium/v8/src/x64/codegen-x64.h2
-rw-r--r--chromium/v8/src/x64/debug-x64.cc6
-rw-r--r--chromium/v8/src/x64/deoptimizer-x64.cc91
-rw-r--r--chromium/v8/src/x64/disasm-x64.cc81
-rw-r--r--chromium/v8/src/x64/frames-x64.h6
-rw-r--r--chromium/v8/src/x64/full-codegen-x64.cc389
-rw-r--r--chromium/v8/src/x64/ic-x64.cc68
-rw-r--r--chromium/v8/src/x64/lithium-codegen-x64.cc1082
-rw-r--r--chromium/v8/src/x64/lithium-codegen-x64.h84
-rw-r--r--chromium/v8/src/x64/lithium-gap-resolver-x64.cc10
-rw-r--r--chromium/v8/src/x64/lithium-x64.cc491
-rw-r--r--chromium/v8/src/x64/lithium-x64.h556
-rw-r--r--chromium/v8/src/x64/macro-assembler-x64.cc810
-rw-r--r--chromium/v8/src/x64/macro-assembler-x64.h176
-rw-r--r--chromium/v8/src/x64/regexp-macro-assembler-x64.cc6
-rw-r--r--chromium/v8/src/x64/simulator-x64.cc1
-rw-r--r--chromium/v8/src/x64/stub-cache-x64.cc1258
-rw-r--r--chromium/v8/src/zone.cc29
-rw-r--r--chromium/v8/test/cctest/cctest.gyp8
-rwxr-xr-xchromium/v8/tools/android-sync.sh3
-rw-r--r--chromium/v8/tools/consarray.js1
-rw-r--r--chromium/v8/tools/gen-postmortem-metadata.py12
-rw-r--r--chromium/v8/tools/generate-trig-table.py83
-rwxr-xr-xchromium/v8/tools/grokdump.py13
-rw-r--r--chromium/v8/tools/gyp/v8.gyp84
-rwxr-xr-x[-rw-r--r--]chromium/v8/tools/js2c.py98
-rw-r--r--chromium/v8/tools/lexer-shell.cc267
-rw-r--r--chromium/v8/tools/lexer-shell.gyp (renamed from chromium/v8/preparser/preparser.gyp)21
-rwxr-xr-xchromium/v8/tools/merge-to-branch.sh48
-rwxr-xr-xchromium/v8/tools/presubmit.py22
-rw-r--r--chromium/v8/tools/profviz/composer.js2
-rwxr-xr-xchromium/v8/tools/push-to-trunk.sh3
-rwxr-xr-xchromium/v8/tools/push-to-trunk/auto_roll.py111
-rw-r--r--chromium/v8/tools/push-to-trunk/common_includes.py486
-rwxr-xr-xchromium/v8/tools/push-to-trunk/push_to_trunk.py581
-rw-r--r--chromium/v8/tools/push-to-trunk/test_scripts.py730
-rwxr-xr-xchromium/v8/tools/run-deopt-fuzzer.py4
-rwxr-xr-xchromium/v8/tools/run-tests.py99
-rw-r--r--chromium/v8/tools/sodium/index.html36
-rw-r--r--chromium/v8/tools/sodium/sodium.js409
-rwxr-xr-xchromium/v8/tools/sodium/styles.css70
-rwxr-xr-xchromium/v8/tools/test-push-to-trunk.sh246
-rw-r--r--chromium/v8/tools/testrunner/README6
-rw-r--r--chromium/v8/tools/testrunner/local/junit_output.py1
-rw-r--r--chromium/v8/tools/testrunner/local/old_statusfile.py462
-rw-r--r--chromium/v8/tools/testrunner/local/progress.py1
-rw-r--r--chromium/v8/tools/testrunner/local/statusfile.py36
-rw-r--r--chromium/v8/tools/testrunner/local/testsuite.py29
-rw-r--r--chromium/v8/tools/testrunner/local/utils.py4
-rw-r--r--chromium/v8/tools/testrunner/objects/context.py7
-rw-r--r--chromium/v8/tools/tickprocessor.js1
-rw-r--r--chromium/v8/tools/v8heapconst.py244
401 files changed, 41466 insertions, 35086 deletions
diff --git a/chromium/v8/.clang-format b/chromium/v8/.clang-format
new file mode 100644
index 00000000000..8fa6b1aaedc
--- /dev/null
+++ b/chromium/v8/.clang-format
@@ -0,0 +1,3 @@
+# Defines the Google C++ style for automatic reformatting.
+# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
+BasedOnStyle: Google
diff --git a/chromium/v8/ChangeLog b/chromium/v8/ChangeLog
index b3eba3661ad..39a6854eaa6 100644
--- a/chromium/v8/ChangeLog
+++ b/chromium/v8/ChangeLog
@@ -1,3 +1,410 @@
+2013-12-03: Version 3.23.17
+
+ Performance and stability improvements on all platforms.
+
+
+2013-12-02: Version 3.23.16
+
+ Array builtins need to be prevented from changing frozen objects, and
+ changing structure on sealed objects (Chromium issue 299979).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-29: Version 3.23.15
+
+ Fix context register allocation in LTransitionElementsKind
+ (Chromium issue 324306).
+
+ Fix bug in inlining Function.apply (Chromium issue 323942).
+
+ Ensure that length is Smi in TypedArrayFromArrayLike constructor
+ (Chromium issue 324028).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-28: Version 3.23.14
+
+ Shorten autogenerated error message (issue 3019).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-27: Version 3.23.13
+
+ Increase precision for base conversion for large integers (issue 3025).
+
+ Flatten cons string for single character substrings (Chromium issue
+ 323041).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-26: Version 3.23.12
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-25: Version 3.23.11
+
+ Deprecate old versions of Isolate::SetData and GetData.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-22: Version 3.23.10
+
+ Remove preemption thread and API.
+ (issue 3004)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-21: Version 3.23.9
+
+ API: Change AdjustAmountOfExternalAllocatedMemory calls to use int64_t
+ instead of intptr_t.
+
+ Remove deprecated v8::SetResourceConstraints without Isolate parameter.
+
+ Remove deprecated v8-defaults.h and defaults.cc.
+ (Chromium issue 312241)
+
+ Make it possible to add more than one piece of embedder data to
+ isolates.
+ (Chromium issue 317398)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-20: Version 3.23.8
+
+ Fixed crashes exposed though fuzzing.
+ (Chromium issue 320948)
+
+ Deprecated v8::External::New without Isolate parameter.
+
+ Made number of available threads isolate-dependent and exposed it to
+ ResourceConstraints.
+ (issue 2991)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-19: Version 3.23.7
+
+ Bugfix: dependent code field in AllocationSite was keeping code objects
+ alive even after context death.
+ (Chromium issue 320532)
+
+ Fixed data view accessors to throw execptions on offsets bigger than
+ size_t.
+ (issue 3013)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-18: Version 3.23.6
+
+ Limit size of dehoistable array indices.
+ (Chromium issues 319835, 319860)
+
+ Limit the size for typed arrays to MaxSmi.
+ (Chromium issue 319722)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-15: Version 3.23.5
+
+ Fixed missing type feedback check for Generic*String addition.
+ (Chromium issue 318671)
+
+ Fixed duplicate check in DependentCode::Insert.
+ (Chromium issue 318454)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-14: Version 3.23.4
+
+ Fixed overflow in TypedArray initialization function.
+ (Chromium issue 319120)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-13: Version 3.23.3
+
+ Fixed compilation with GCC 4.8.
+ (issue 2767, 2149)
+
+ Added explicit Isolate parameter to External::New.
+ (Chromium issue 266838)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-12: Version 3.23.2
+
+ Fixed --extra-code flag for snapshot creation.
+ (issue 2994)
+
+ Fixed error message wording when instanceof throws.
+ (Chromium issue 82797, issue 1593)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-08: Version 3.23.1
+
+ Made HCapturedObjects non-deletable for DCE. (issue 2987)
+
+ Use a fixed random seed per default. (issue 1880, 2885)
+
+ Fixed y-umlaut to uppercase. (issue 2984)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-11-06: Version 3.23.0
+
+ Fixed loading message from an Error object. (Chromium issue 306220)
+
+ Made Object.freeze/seal/preventExtensions observable. (issue 2975, 2941)
+
+ Made snapshots reproducible. (issue 2885)
+
+ Added missing negative dictionary lookup to NonexistentHandlerFrontend.
+ (issue 2980)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-31: Version 3.22.24
+
+ Fixed uint32-to-smi conversion in Lithium.
+ (Chromium issue 309623)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-28: Version 3.22.23
+
+ Renamed deprecated __attribute__((no_address_safety_analysis)) to
+ __attribute__((no_sanitize_address)) (Chromium issue 311283)
+
+ Defined DEBUG for v8_optimized_debug=2
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-25: Version 3.22.22
+
+ Record allocation stack traces. (Chromium issue 277984,v8:2949)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-24: Version 3.22.21
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-24: Version 3.22.20
+
+ Made Array.prototype.pop throw if the last element is not configurable.
+
+ Fixed HObjectAccess for loads from migrating prototypes.
+ (Chromium issue 305309)
+
+ Enabled preaging of code objects when --optimize-for-size.
+ (Chromium issue 280984)
+
+ Exposed v8::Function::GetDisplayName to public API.
+ (Chromium issue 17356)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-23: Version 3.22.19
+
+ Fix materialization of captured objects with field tracking.
+ (Chromium issue 298990)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-22: Version 3.22.18
+
+ Add tool to visualize machine code/lithium.
+
+ Handle misaligned loads and stores in load elimination. Do not track
+ misaligned loads and be conservative about invalidating misaligned
+ stores. (issue 2934)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-21: Version 3.22.17
+
+ Harmony: Implement Math.trunc and Math.sign. (issue 2938)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-21: Version 3.22.16
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-18: Version 3.22.15
+
+ Enabled calling the SetReference* & SetObjectGroupId functions with a
+ Persistent<SubclassOfValue>.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-17: Version 3.22.14
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-16: Version 3.22.13
+
+ Do not look up ArrayBuffer on global object in typed array constructor.
+ (issue 2931)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-15: Version 3.22.12
+
+ Added histograms to track fraction of heap spaces and percentage of
+ generated crankshaft code.
+
+ Moved v8_optimized_debug default value to standalone.gypi.
+
+ Track JS allocations as they arrive with no affection on performance
+ when tracking is switched off (Chromium issue 277984).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-14: Version 3.22.11
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-11: Version 3.22.10
+
+ Fixed timezone issues with date-time/parse-* tests.
+ (Chromium issue 2919)
+
+ Added column getter to CpuProfileNode (Chromium issue 302537)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-10: Version 3.22.9
+
+ Ensure only whitelisted stubs have sse2 versions in the snapshot.
+ (fix for chromium 304565)
+
+ Implement ArrayBuffer.isView.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-04: Version 3.22.8
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-03: Version 3.22.7
+
+ Debug: Allow stepping into on a given call frame
+ (Chromium issue 296963).
+
+ Always use timeGetTime() for TimeTicks::Now() on Windows
+ (Chromium issue 288924).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-02: Version 3.22.6
+
+ Performance and stability improvements on all platforms.
+
+
+2013-10-01: Version 3.22.5
+
+ Disabled externalization of sliced/cons strings in old pointer space
+ (Chromium issue 276357).
+
+ Turned on handle zapping for release builds
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-30: Version 3.22.4
+
+ Function::Call and Object::CallAsFunction APIs should allow v8::Value as
+ a receiver (issue 2915).
+
+ Removed unnecessary mutex (Chromium issue 291236).
+
+ Removed ArrayBufferView::BaseAddress method.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-27: Version 3.22.3
+
+ Added methods to enable configuration of ResourceConstraints based on
+ limits derived at runtime.
+ (Chromium issue 292928)
+
+ Added -optimize-for-size flag to optimize for memory size (will be used
+ by pre-aging CL), and removed the is_memory_constrained
+ ResourceConstraint.
+ (Chromium issue 292928)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-26: Version 3.22.2
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-25: Version 3.22.1
+
+ Sped up creating typed arrays from array-like objects.
+ (Chromium issue 270507)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-23: Version 3.22.0
+
+ LiveEdit to mark more closure functions for re-instantiation when scope
+ layout changes.
+ (issue 2872)
+
+ Made bounds check elimination iterative instead of recursive.
+ (Chromium issue 289706)
+
+ Turned on i18n support by default.
+
+ Set the proper instance-type on HAllocate in BuildFastLiteral.
+ (Chromium issue 284577)
+
+ Performance and stability improvements on all platforms.
+
+
2013-09-18: Version 3.21.17
Implemented local load/store elimination on basic blocks.
diff --git a/chromium/v8/Makefile b/chromium/v8/Makefile
index 288c257396d..2ff2cdbf075 100644
--- a/chromium/v8/Makefile
+++ b/chromium/v8/Makefile
@@ -76,10 +76,10 @@ ifeq ($(snapshot), off)
endif
# extrachecks=on/off
ifeq ($(extrachecks), on)
- GYPFLAGS += -Dv8_enable_extra_checks=1
+ GYPFLAGS += -Dv8_enable_extra_checks=1 -Dv8_enable_handle_zapping=1
endif
ifeq ($(extrachecks), off)
- GYPFLAGS += -Dv8_enable_extra_checks=0
+ GYPFLAGS += -Dv8_enable_extra_checks=0 -Dv8_enable_handle_zapping=0
endif
# gdbjit=on/off
ifeq ($(gdbjit), on)
@@ -94,7 +94,7 @@ ifeq ($(vtunejit), on)
endif
# optdebug=on
ifeq ($(optdebug), on)
- GYPFLAGS += -Dv8_optimized_debug=1
+ GYPFLAGS += -Dv8_optimized_debug=2
endif
# debuggersupport=off
ifeq ($(debuggersupport), off)
@@ -104,6 +104,10 @@ endif
ifeq ($(unalignedaccess), on)
GYPFLAGS += -Dv8_can_use_unaligned_accesses=true
endif
+# randomseed=12345, disable random seed via randomseed=0
+ifdef randomseed
+ GYPFLAGS += -Dv8_random_seed=$(randomseed)
+endif
# soname_version=1.2.3
ifdef soname_version
GYPFLAGS += -Dsoname_version=$(soname_version)
@@ -124,10 +128,15 @@ endif
ifeq ($(regexp), interpreted)
GYPFLAGS += -Dv8_interpreted_regexp=1
endif
-# i18nsupport=on
-ifeq ($(i18nsupport), on)
- GYPFLAGS += -Dv8_enable_i18n_support=1
+# i18nsupport=off
+ifeq ($(i18nsupport), off)
+ GYPFLAGS += -Dv8_enable_i18n_support=0
+ TESTFLAGS += --noi18n
endif
+# deprecation_warnings=on
+ifeq ($(deprecationwarnings), on)
+ GYPFLAGS += -Dv8_deprecation_warnings=1
+endif
# arm specific flags.
# armv7=false/true
ifeq ($(armv7), false)
@@ -217,8 +226,8 @@ NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \
- build/toolchain.gypi preparser/preparser.gyp samples/samples.gyp \
- src/d8.gyp test/cctest/cctest.gyp tools/gyp/v8.gyp
+ build/toolchain.gypi samples/samples.gyp src/d8.gyp \
+ test/cctest/cctest.gyp tools/gyp/v8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on)
@@ -323,7 +332,7 @@ $(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) \
--timeout=600 \
- --command-prefix="tools/android-run.py"
+ --command-prefix="tools/android-run.py" $(TESTFLAGS)
$(addsuffix .check, $(ANDROID_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check
@@ -331,7 +340,7 @@ $(addsuffix .check, $(ANDROID_ARCHES)): \
$(addsuffix .check, $(NACL_BUILDS)): $$(basename $$@)
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) \
- --timeout=600 --nopresubmit \
+ --timeout=600 --nopresubmit --noi18n \
--command-prefix="tools/nacl-run.py"
$(addsuffix .check, $(NACL_ARCHES)): \
@@ -341,6 +350,16 @@ native.check: native
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
+FASTTESTFLAGS = --flaky-tests=skip --slow-tests=skip --pass-fail-tests=skip \
+ --variants=default,stress
+FASTTESTMODES = ia32.release,x64.release,ia32.debug,x64.debug,arm.debug
+
+quickcheck:
+ @$(MAKE) all optdebug=on
+ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ --arch-and-mode=$(FASTTESTMODES) $(FASTTESTFLAGS) $(TESTFLAGS)
+qc: quickcheck
+
# Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)):
rm -f $(OUTDIR)/Makefile.$(basename $@)
diff --git a/chromium/v8/Makefile.nacl b/chromium/v8/Makefile.nacl
index 02e83ef2bca..2c79ef113e3 100644
--- a/chromium/v8/Makefile.nacl
+++ b/chromium/v8/Makefile.nacl
@@ -74,6 +74,9 @@ endif
# For mksnapshot host generation.
GYPENV += host_os=${HOST_OS}
+# ICU doesn't support NaCl.
+GYPENV += v8_enable_i18n_support=0
+
NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_ARCHES))
.SECONDEXPANSION:
# For some reason the $$(basename $$@) expansion didn't work here...
diff --git a/chromium/v8/OWNERS b/chromium/v8/OWNERS
index 6fe40e21e33..450e9b217c9 100644
--- a/chromium/v8/OWNERS
+++ b/chromium/v8/OWNERS
@@ -2,12 +2,14 @@ bmeurer@chromium.org
danno@chromium.org
dslomov@chromium.org
hpayer@chromium.org
+ishell@chromium.org
jkummerow@chromium.org
-mmassi@chromium.org
+machenbach@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
rossberg@chromium.org
svenpanne@chromium.org
+titzer@chromium.org
ulan@chromium.org
vegorov@chromium.org
verwaest@chromium.org
diff --git a/chromium/v8/PRESUBMIT.py b/chromium/v8/PRESUBMIT.py
index 819331f9e5b..fe15157dde7 100644
--- a/chromium/v8/PRESUBMIT.py
+++ b/chromium/v8/PRESUBMIT.py
@@ -58,18 +58,43 @@ def _CommonChecks(input_api, output_api):
return results
+def _SkipTreeCheck(input_api, output_api):
+ """Check the env var whether we want to skip tree check.
+ Only skip if src/version.cc has been updated."""
+ src_version = 'src/version.cc'
+ FilterFile = lambda file: file.LocalPath() == src_version
+ if not input_api.AffectedSourceFiles(
+ lambda file: file.LocalPath() == src_version):
+ return False
+ return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
+
+
+def _CheckChangeLogFlag(input_api, output_api):
+ """Checks usage of LOG= flag in the commit message."""
+ results = []
+ if input_api.change.BUG and not 'LOG' in input_api.change.tags:
+ results.append(output_api.PresubmitError(
+ 'An issue reference (BUG=) requires a change log flag (LOG=). '
+ 'Use LOG=Y for including this commit message in the change log. '
+ 'Use LOG=N or leave blank otherwise.'))
+ return results
+
+
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
+ results.extend(_CheckChangeLogFlag(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
+ results.extend(_CheckChangeLogFlag(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
- results.extend(input_api.canned_checks.CheckTreeIsOpen(
- input_api, output_api,
- json_url='http://v8-status.appspot.com/current?format=json'))
+ if not _SkipTreeCheck(input_api, output_api):
+ results.extend(input_api.canned_checks.CheckTreeIsOpen(
+ input_api, output_api,
+ json_url='http://v8-status.appspot.com/current?format=json'))
return results
diff --git a/chromium/v8/tools/status-file-converter.py b/chromium/v8/WATCHLISTS
index ba063ee8c74..9c2bce9c558 100755..100644
--- a/chromium/v8/tools/status-file-converter.py
+++ b/chromium/v8/WATCHLISTS
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 the V8 project authors. All rights reserved.
+# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -27,13 +25,22 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Watchlist Rules
+# Refer: http://dev.chromium.org/developers/contributing-code/watchlists
-import sys
-from testrunner.local import old_statusfile
+# IMPORTANT: The regular expression filepath is tested against each path using
+# re.search, so it is not usually necessary to add .*.
-if len(sys.argv) != 2:
- print "Usage: %s foo.status" % sys.argv[0]
- print "Will read foo.status and print the converted version to stdout."
- sys.exit(1)
+{
+ 'WATCHLIST_DEFINITIONS': {
+ 'public_api': {
+ 'filepath': 'include/',
+ },
+ },
-print old_statusfile.ConvertNotation(sys.argv[1]).GetOutput()
+ 'WATCHLISTS': {
+ 'public_api': [
+ 'phajdan.jr@chromium.org',
+ ],
+ },
+}
diff --git a/chromium/v8/benchmarks/deltablue.js b/chromium/v8/benchmarks/deltablue.js
index 548fd96ffbd..dacee3f13ff 100644
--- a/chromium/v8/benchmarks/deltablue.js
+++ b/chromium/v8/benchmarks/deltablue.js
@@ -121,23 +121,23 @@ Strength.strongest = function (s1, s2) {
Strength.prototype.nextWeaker = function () {
switch (this.strengthValue) {
- case 0: return Strength.WEAKEST;
- case 1: return Strength.WEAK_DEFAULT;
- case 2: return Strength.NORMAL;
- case 3: return Strength.STRONG_DEFAULT;
- case 4: return Strength.PREFERRED;
- case 5: return Strength.REQUIRED;
+ case 0: return Strength.STRONG_PREFERRED;
+ case 1: return Strength.PREFERRED;
+ case 2: return Strength.STRONG_DEFAULT;
+ case 3: return Strength.NORMAL;
+ case 4: return Strength.WEAK_DEFAULT;
+ case 5: return Strength.WEAKEST;
}
}
// Strength constants.
-Strength.REQUIRED = new Strength(0, "required");
-Strength.STONG_PREFERRED = new Strength(1, "strongPreferred");
-Strength.PREFERRED = new Strength(2, "preferred");
-Strength.STRONG_DEFAULT = new Strength(3, "strongDefault");
-Strength.NORMAL = new Strength(4, "normal");
-Strength.WEAK_DEFAULT = new Strength(5, "weakDefault");
-Strength.WEAKEST = new Strength(6, "weakest");
+Strength.REQUIRED = new Strength(0, "required");
+Strength.STRONG_PREFERRED = new Strength(1, "strongPreferred");
+Strength.PREFERRED = new Strength(2, "preferred");
+Strength.STRONG_DEFAULT = new Strength(3, "strongDefault");
+Strength.NORMAL = new Strength(4, "normal");
+Strength.WEAK_DEFAULT = new Strength(5, "weakDefault");
+Strength.WEAKEST = new Strength(6, "weakest");
/* --- *
* C o n s t r a i n t
diff --git a/chromium/v8/build/all.gyp b/chromium/v8/build/all.gyp
index 4b2fe52989e..5fbd8c28e77 100644
--- a/chromium/v8/build/all.gyp
+++ b/chromium/v8/build/all.gyp
@@ -8,12 +8,17 @@
'target_name': 'All',
'type': 'none',
'dependencies': [
- '../preparser/preparser.gyp:*',
'../samples/samples.gyp:*',
'../src/d8.gyp:d8',
'../test/cctest/cctest.gyp:*',
],
+ 'conditions': [
+ ['component!="shared_library"', {
+ 'dependencies': [
+ '../tools/lexer-shell.gyp:lexer-shell',
+ ],
+ }],
+ ]
}
]
}
-
diff --git a/chromium/v8/build/features.gypi b/chromium/v8/build/features.gypi
index 3c6d25f7587..f0e72120965 100644
--- a/chromium/v8/build/features.gypi
+++ b/chromium/v8/build/features.gypi
@@ -54,7 +54,13 @@
# Enable ECMAScript Internationalization API. Enabling this feature will
# add a dependency on the ICU library.
- 'v8_enable_i18n_support%': 0,
+ 'v8_enable_i18n_support%': 1,
+
+ # Enable compiler warnings when using V8_DEPRECATED apis.
+ 'v8_deprecation_warnings%': 0,
+
+ # Use the v8 provided v8::Platform implementation.
+ 'v8_use_default_platform%': 1,
},
'target_defaults': {
'conditions': [
@@ -76,9 +82,15 @@
['v8_interpreted_regexp==1', {
'defines': ['V8_INTERPRETED_REGEXP',],
}],
+ ['v8_deprecation_warnings==1', {
+ 'defines': ['V8_DEPRECATION_WARNINGS',],
+ }],
['v8_enable_i18n_support==1', {
'defines': ['V8_I18N_SUPPORT',],
}],
+ ['v8_use_default_platform==1', {
+ 'defines': ['V8_USE_DEFAULT_PLATFORM',],
+ }],
['v8_compress_startup_data=="bz2"', {
'defines': [
'COMPRESS_STARTUP_DATA_BZ2',
@@ -89,21 +101,29 @@
'Debug': {
'variables': {
'v8_enable_extra_checks%': 1,
+ 'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
+ ['v8_enable_handle_zapping==1', {
+ 'defines': ['ENABLE_HANDLE_ZAPPING',],
+ }],
],
}, # Debug
'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
+ 'v8_enable_handle_zapping%': 0,
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
+ ['v8_enable_handle_zapping==1', {
+ 'defines': ['ENABLE_HANDLE_ZAPPING',],
+ }],
], # conditions
}, # Release
}, # configurations
diff --git a/chromium/v8/build/standalone.gypi b/chromium/v8/build/standalone.gypi
index 5c017d5f507..1f91f9eb256 100644
--- a/chromium/v8/build/standalone.gypi
+++ b/chromium/v8/build/standalone.gypi
@@ -36,7 +36,8 @@
'clang%': 0,
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
- 'v8_enable_i18n_support%': 0,
+ 'v8_enable_i18n_support%': 1,
+ 'v8_deprecation_warnings': 1,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'variables': {
@@ -77,6 +78,23 @@
# as errors.
'v8_code%': 0,
+ # Speeds up Debug builds:
+ # 0 - Compiler optimizations off (debuggable) (default). This may
+ # be 5x slower than Release (or worse).
+ # 1 - Turn on compiler optimizations. This may be hard or impossible to
+ # debug. This may still be 2x slower than Release (or worse).
+ # 2 - Turn on optimizations, and also #undef DEBUG / #define NDEBUG
+ # (but leave V8_ENABLE_CHECKS and most other assertions enabled.
+ # This may cause some v8 tests to fail in the Debug configuration.
+ # This roughly matches the performance of a Release build and can
+ # be used by embedders that need to build their own code as debug
+ # but don't want or need a debug version of V8. This should produce
+ # near-release speeds.
+ 'v8_optimized_debug%': 0,
+
+ # Relative path to icu.gyp from this file.
+ 'icu_gyp_path': '../third_party/icu/icu.gyp',
+
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
@@ -255,7 +273,6 @@
'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES',
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden
'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics
- 'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES', # -Werror
'GCC_VERSION': 'com.apple.compilers.llvmgcc42',
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
'GCC_WARN_NON_VIRTUAL_DESTRUCTOR': 'YES', # -Wnon-virtual-dtor
@@ -274,6 +291,13 @@
'-Wno-unused-parameter',
],
},
+ 'conditions': [
+ ['werror==""', {
+ 'xcode_settings': {'GCC_TREAT_WARNINGS_AS_ERRORS': 'NO'},
+ }, {
+ 'xcode_settings': {'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES'},
+ }],
+ ],
'target_conditions': [
['_type!="static_library"', {
'xcode_settings': {'OTHER_LDFLAGS': ['-Wl,-search_paths_first']},
diff --git a/chromium/v8/build/toolchain.gypi b/chromium/v8/build/toolchain.gypi
index c1066ebe94b..99f357a965a 100644
--- a/chromium/v8/build/toolchain.gypi
+++ b/chromium/v8/build/toolchain.gypi
@@ -60,20 +60,6 @@
'v8_enable_backtrace%': 0,
- # Speeds up Debug builds:
- # 0 - Compiler optimizations off (debuggable) (default). This may
- # be 5x slower than Release (or worse).
- # 1 - Turn on compiler optimizations. This may be hard or impossible to
- # debug. This may still be 2x slower than Release (or worse).
- # 2 - Turn on optimizations, and also #undef DEBUG / #define NDEBUG
- # (but leave V8_ENABLE_CHECKS and most other assertions enabled.
- # This may cause some v8 tests to fail in the Debug configuration.
- # This roughly matches the performance of a Release build and can
- # be used by embedders that need to build their own code as debug
- # but don't want or need a debug version of V8. This should produce
- # near-release speeds.
- 'v8_optimized_debug%': 0,
-
# Enable profiling support. Only required on Windows.
'v8_enable_prof%': 0,
@@ -390,7 +376,7 @@
'target_conditions': [
['_toolset=="host"', {
'variables': {
- 'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
+ 'm32flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
@@ -400,7 +386,7 @@
}],
['_toolset=="target"', {
'variables': {
- 'm32flag': '<!((echo | $(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
+ 'm32flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)',
'clang%': 0,
},
'conditions': [
@@ -422,14 +408,14 @@
'target_conditions': [
['_toolset=="host"', {
'variables': {
- 'm64flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m64 -E - > /dev/null 2>&1) && echo "-m64" || true)',
+ 'm64flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
},
'cflags': [ '<(m64flag)' ],
'ldflags': [ '<(m64flag)' ],
}],
['_toolset=="target"', {
'variables': {
- 'm64flag': '<!((echo | $(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1) && echo "-m64" || true)',
+ 'm64flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
},
'cflags': [ '<(m64flag)' ],
'ldflags': [ '<(m64flag)' ],
@@ -450,6 +436,7 @@
'V8_ENABLE_CHECKS',
'OBJECT_PRINT',
'VERIFY_HEAP',
+ 'DEBUG'
],
'msvs_settings': {
'VCCLCompilerTool': {
@@ -517,15 +504,6 @@
},
},
'conditions': [
- ['v8_optimized_debug==2', {
- 'defines': [
- 'NDEBUG',
- ],
- }, {
- 'defines': [
- 'DEBUG',
- ],
- }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual',
@@ -567,6 +545,9 @@
'-fdata-sections',
'-ffunction-sections',
],
+ 'defines': [
+ 'OPTIMIZED_DEBUG'
+ ],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
['nacl_target_arch=="none"', {
diff --git a/chromium/v8/include/v8-debug.h b/chromium/v8/include/v8-debug.h
index 053b81d2c58..1a86a061e90 100755
--- a/chromium/v8/include/v8-debug.h
+++ b/chromium/v8/include/v8-debug.h
@@ -212,9 +212,13 @@ class V8_EXPORT Debug {
// If no isolate is provided the default isolate is
// used.
+ // TODO(dcarney): remove
static void SendCommand(const uint16_t* command, int length,
ClientData* client_data = NULL,
Isolate* isolate = NULL);
+ static void SendCommand(Isolate* isolate,
+ const uint16_t* command, int length,
+ ClientData* client_data = NULL);
// Dispatch interface.
static void SetHostDispatchHandler(HostDispatchHandler handler,
diff --git a/chromium/v8/include/v8-platform.h b/chromium/v8/include/v8-platform.h
new file mode 100644
index 00000000000..75fddd59a87
--- /dev/null
+++ b/chromium/v8/include/v8-platform.h
@@ -0,0 +1,86 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_V8_PLATFORM_H_
+#define V8_V8_PLATFORM_H_
+
+#include "v8.h"
+
+namespace v8 {
+
+/**
+ * A Task represents a unit of work.
+ */
+class Task {
+ public:
+ virtual ~Task() {}
+
+ virtual void Run() = 0;
+};
+
+/**
+ * V8 Platform abstraction layer.
+ *
+ * The embedder has to provide an implementation of this interface before
+ * initializing the rest of V8.
+ */
+class Platform {
+ public:
+ /**
+ * This enum is used to indicate whether a task is potentially long running,
+ * or causes a long wait. The embedder might want to use this hint to decide
+ * whether to execute the task on a dedicated thread.
+ */
+ enum ExpectedRuntime {
+ kShortRunningTask,
+ kLongRunningTask
+ };
+
+ /**
+ * Schedules a task to be invoked on a background thread. |expected_runtime|
+ * indicates that the task will run a long time. The Platform implementation
+ * takes ownership of |task|. There is no guarantee about order of execution
+ * of tasks wrt order of scheduling, nor is there a guarantee about the
+ * thread the task will be run on.
+ */
+ virtual void CallOnBackgroundThread(Task* task,
+ ExpectedRuntime expected_runtime) = 0;
+
+ /**
+ * Schedules a task to be invoked on a foreground thread wrt a specific
+ * |isolate|. Tasks posted for the same isolate should be execute in order of
+ * scheduling. The definition of "foreground" is opaque to V8.
+ */
+ virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0;
+
+ protected:
+ virtual ~Platform() {}
+};
+
+} // namespace v8
+
+#endif // V8_V8_PLATFORM_H_
diff --git a/chromium/v8/include/v8-preparser.h b/chromium/v8/include/v8-preparser.h
deleted file mode 100644
index 1da77185af8..00000000000
--- a/chromium/v8/include/v8-preparser.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef PREPARSER_H
-#define PREPARSER_H
-
-#include "v8.h"
-#include "v8stdint.h"
-
-namespace v8 {
-
-// The result of preparsing is either a stack overflow error, or an opaque
-// blob of data that can be passed back into the parser.
-class V8_EXPORT PreParserData {
- public:
- PreParserData(size_t size, const uint8_t* data)
- : data_(data), size_(size) { }
-
- // Create a PreParserData value where stack_overflow reports true.
- static PreParserData StackOverflow() { return PreParserData(0, NULL); }
-
- // Whether the pre-parser stopped due to a stack overflow.
- // If this is the case, size() and data() should not be used.
- bool stack_overflow() { return size_ == 0u; }
-
- // The size of the data in bytes.
- size_t size() const { return size_; }
-
- // Pointer to the data.
- const uint8_t* data() const { return data_; }
-
- private:
- const uint8_t* const data_;
- const size_t size_;
-};
-
-
-// Interface for a stream of Unicode characters.
-class V8_EXPORT UnicodeInputStream { // NOLINT - V8_EXPORT is not a class name.
- public:
- virtual ~UnicodeInputStream();
-
- // Returns the next Unicode code-point in the input, or a negative value when
- // there is no more input in the stream.
- virtual int32_t Next() = 0;
-};
-
-
-// Preparse a JavaScript program. The source code is provided as a
-// UnicodeInputStream. The max_stack_size limits the amount of stack
-// space that the preparser is allowed to use. If the preparser uses
-// more stack space than the limit provided, the result's stack_overflow()
-// method will return true. Otherwise the result contains preparser
-// data that can be used by the V8 parser to speed up parsing.
-PreParserData V8_EXPORT Preparse(UnicodeInputStream* input,
- size_t max_stack_size);
-
-} // namespace v8.
-
-#endif // PREPARSER_H
diff --git a/chromium/v8/include/v8-profiler.h b/chromium/v8/include/v8-profiler.h
index 217a938329e..0ed6c5d216b 100644
--- a/chromium/v8/include/v8-profiler.h
+++ b/chromium/v8/include/v8-profiler.h
@@ -57,16 +57,17 @@ class V8_EXPORT CpuProfileNode {
*/
int GetLineNumber() const;
+ /**
+ * Returns 1-based number of the column where the function originates.
+ * kNoColumnNumberInfo if no column number information is available.
+ */
+ int GetColumnNumber() const;
+
/** Returns bailout reason for the function
* if the optimization was disabled for it.
*/
const char* GetBailoutReason() const;
- /** DEPRECATED. Please use GetHitCount instead.
- * Returns the count of samples where function was currently executing.
- */
- V8_DEPRECATED(double GetSelfSamplesCount() const);
-
/**
* Returns the count of samples where the function was currently executing.
*/
@@ -85,6 +86,7 @@ class V8_EXPORT CpuProfileNode {
const CpuProfileNode* GetChild(int index) const;
static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
+ static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
};
@@ -423,8 +425,12 @@ class V8_EXPORT HeapProfiler {
* Starts tracking of heap objects population statistics. After calling
* this method, all heap objects relocations done by the garbage collector
* are being registered.
+ *
+ * |track_allocations| parameter controls whether stack trace of each
+ * allocation in the heap will be recorded and reported as part of
+ * HeapSnapshot.
*/
- void StartTrackingHeapObjects();
+ void StartTrackingHeapObjects(bool track_allocations = false);
/**
* Adds a new time interval entry to the aggregated statistics array. The
@@ -473,6 +479,21 @@ class V8_EXPORT HeapProfiler {
*/
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
+ /**
+ * Starts recording JS allocations immediately as they arrive and tracking of
+ * heap objects population statistics.
+ */
+ V8_DEPRECATED("Use StartTrackingHeapObjects instead",
+ void StartRecordingHeapAllocations());
+
+ /**
+ * Stops recording JS allocations and tracking of heap objects population
+ * statistics, cleans all collected heap objects population statistics data.
+ */
+ V8_DEPRECATED("Use StopTrackingHeapObjects instead",
+ void StopRecordingHeapAllocations());
+
+
private:
HeapProfiler();
~HeapProfiler();
diff --git a/chromium/v8/include/v8-testing.h b/chromium/v8/include/v8-testing.h
index 97b467a91b1..ba4fcc44ecd 100644
--- a/chromium/v8/include/v8-testing.h
+++ b/chromium/v8/include/v8-testing.h
@@ -68,8 +68,4 @@ class V8_EXPORT Testing {
} // namespace v8
-
-#undef V8_EXPORT
-
-
#endif // V8_V8_TEST_H_
diff --git a/chromium/v8/include/v8.h b/chromium/v8/include/v8.h
index de2733838ff..9bf96081d5a 100644
--- a/chromium/v8/include/v8.h
+++ b/chromium/v8/include/v8.h
@@ -105,6 +105,7 @@ class NumberObject;
class Object;
class ObjectOperationDescriptor;
class ObjectTemplate;
+class Platform;
class Primitive;
class RawOperationDescriptor;
class Signature;
@@ -114,6 +115,7 @@ class String;
class StringObject;
class Symbol;
class SymbolObject;
+class Private;
class Uint32;
class Utils;
class Value;
@@ -121,8 +123,10 @@ template <class T> class Handle;
template <class T> class Local;
template <class T> class Eternal;
template<class T> class NonCopyablePersistentTraits;
+template<class T> class PersistentBase;
template<class T,
class M = NonCopyablePersistentTraits<T> > class Persistent;
+template<class T> class UniquePersistent;
template<class T, class P> class WeakCallbackObject;
class FunctionTemplate;
class ObjectTemplate;
@@ -135,6 +139,7 @@ class DeclaredAccessorDescriptor;
class ObjectOperationDescriptor;
class RawOperationDescriptor;
class CallHandlerHelper;
+class EscapableHandleScope;
namespace internal {
class Arguments;
@@ -254,17 +259,17 @@ template <class T> class Handle {
* The handles' references are not checked.
*/
template <class S> V8_INLINE bool operator==(const Handle<S>& that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(**this);
- internal::Object** b = reinterpret_cast<internal::Object**>(*that);
+ internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
+ internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
if (a == 0) return b == 0;
if (b == 0) return false;
return *a == *b;
}
template <class S> V8_INLINE bool operator==(
- const Persistent<S>& that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(**this);
- internal::Object** b = reinterpret_cast<internal::Object**>(*that);
+ const PersistentBase<S>& that) const {
+ internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
+ internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
if (a == 0) return b == 0;
if (b == 0) return false;
return *a == *b;
@@ -301,7 +306,8 @@ template <class T> class Handle {
V8_INLINE static Handle<T> New(Isolate* isolate, Handle<T> that) {
return New(isolate, that.val_);
}
- V8_INLINE static Handle<T> New(Isolate* isolate, const Persistent<T>& that) {
+ V8_INLINE static Handle<T> New(Isolate* isolate,
+ const PersistentBase<T>& that) {
return New(isolate, that.val_);
}
@@ -317,6 +323,8 @@ template <class T> class Handle {
private:
friend class Utils;
template<class F, class M> friend class Persistent;
+ template<class F> friend class PersistentBase;
+ template<class F> friend class Handle;
template<class F> friend class Local;
template<class F> friend class FunctionCallbackInfo;
template<class F> friend class PropertyCallbackInfo;
@@ -327,6 +335,8 @@ template <class T> class Handle {
friend Handle<Boolean> False(Isolate* isolate);
friend class Context;
friend class HandleScope;
+ friend class Object;
+ friend class Private;
V8_INLINE static Handle<T> New(Isolate* isolate, T* that);
@@ -377,11 +387,9 @@ template <class T> class Local : public Handle<T> {
* The referee is kept alive by the local handle even when
* the original handle is destroyed/disposed.
*/
- V8_INLINE static Local<T> New(Handle<T> that);
V8_INLINE static Local<T> New(Isolate* isolate, Handle<T> that);
- template<class M>
V8_INLINE static Local<T> New(Isolate* isolate,
- const Persistent<T, M>& that);
+ const PersistentBase<T>& that);
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
@@ -392,8 +400,10 @@ template <class T> class Local : public Handle<T> {
private:
friend class Utils;
template<class F> friend class Eternal;
+ template<class F> friend class PersistentBase;
template<class F, class M> friend class Persistent;
template<class F> friend class Handle;
+ template<class F> friend class Local;
template<class F> friend class FunctionCallbackInfo;
template<class F> friend class PropertyCallbackInfo;
friend class String;
@@ -401,6 +411,7 @@ template <class T> class Local : public Handle<T> {
friend class Context;
template<class F> friend class internal::CustomArguments;
friend class HandleScope;
+ friend class EscapableHandleScope;
V8_INLINE static Local<T> New(Isolate* isolate, T* that);
};
@@ -457,100 +468,21 @@ class WeakReferenceCallbacks {
/**
- * Default traits for Persistent. This class does not allow
- * use of the copy constructor or assignment operator.
- * At present kResetInDestructor is not set, but that will change in a future
- * version.
- */
-template<class T>
-class NonCopyablePersistentTraits {
- public:
- typedef Persistent<T, NonCopyablePersistentTraits<T> > NonCopyablePersistent;
- static const bool kResetInDestructor = false;
- template<class S, class M>
- V8_INLINE static void Copy(const Persistent<S, M>& source,
- NonCopyablePersistent* dest) {
- Uncompilable<Object>();
- }
- // TODO(dcarney): come up with a good compile error here.
- template<class O> V8_INLINE static void Uncompilable() {
- TYPE_CHECK(O, Primitive);
- }
-};
-
-
-/**
* An object reference that is independent of any handle scope. Where
* a Local handle only lives as long as the HandleScope in which it was
- * allocated, a Persistent handle remains valid until it is explicitly
+ * allocated, a PersistentBase handle remains valid until it is explicitly
* disposed.
*
* A persistent handle contains a reference to a storage cell within
* the v8 engine which holds an object value and which is updated by
* the garbage collector whenever the object is moved. A new storage
- * cell can be created using the constructor or Persistent::Reset and
- * existing handles can be disposed using Persistent::Reset.
+ * cell can be created using the constructor or PersistentBase::Reset and
+ * existing handles can be disposed using PersistentBase::Reset.
*
- * Copy, assignment and destructor bevavior is controlled by the traits
- * class M.
*/
-template <class T, class M> class Persistent {
+template <class T> class PersistentBase {
public:
/**
- * A Persistent with no storage cell.
- */
- V8_INLINE Persistent() : val_(0) { }
- /**
- * Construct a Persistent from a Handle.
- * When the Handle is non-empty, a new storage cell is created
- * pointing to the same object, and no flags are set.
- */
- template <class S> V8_INLINE Persistent(Isolate* isolate, Handle<S> that)
- : val_(New(isolate, *that)) {
- TYPE_CHECK(T, S);
- }
- /**
- * Construct a Persistent from a Persistent.
- * When the Persistent is non-empty, a new storage cell is created
- * pointing to the same object, and no flags are set.
- */
- template <class S, class M2>
- V8_INLINE Persistent(Isolate* isolate, const Persistent<S, M2>& that)
- : val_(New(isolate, *that)) {
- TYPE_CHECK(T, S);
- }
- /**
- * The copy constructors and assignment operator create a Persistent
- * exactly as the Persistent constructor, but the Copy function from the
- * traits class is called, allowing the setting of flags based on the
- * copied Persistent.
- */
- V8_INLINE Persistent(const Persistent& that) : val_(0) {
- Copy(that);
- }
- template <class S, class M2>
- V8_INLINE Persistent(const Persistent<S, M2>& that) : val_(0) {
- Copy(that);
- }
- V8_INLINE Persistent& operator=(const Persistent& that) { // NOLINT
- Copy(that);
- return *this;
- }
- template <class S, class M2>
- V8_INLINE Persistent& operator=(const Persistent<S, M2>& that) { // NOLINT
- Copy(that);
- return *this;
- }
- /**
- * The destructor will dispose the Persistent based on the
- * kResetInDestructor flags in the traits class. Since not calling dispose
- * can result in a memory leak, it is recommended to always set this flag.
- */
- V8_INLINE ~Persistent() {
- if (M::kResetInDestructor) Reset();
- }
-
- /**
* If non-empty, destroy the underlying storage cell
* IsEmpty() will return true after this call.
*/
@@ -561,53 +493,35 @@ template <class T, class M> class Persistent {
*/
template <class S>
V8_INLINE void Reset(Isolate* isolate, const Handle<S>& other);
+
/**
* If non-empty, destroy the underlying storage cell
* and create a new one with the contents of other if other is non empty
*/
- template <class S, class M2>
- V8_INLINE void Reset(Isolate* isolate, const Persistent<S, M2>& other);
- // TODO(dcarney): deprecate
- V8_INLINE void Dispose() { Reset(); }
- V8_DEPRECATED(V8_INLINE void Dispose(Isolate* isolate)) { Reset(); }
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const PersistentBase<S>& other);
V8_INLINE bool IsEmpty() const { return val_ == 0; }
- // TODO(dcarney): this is pretty useless, fix or remove
template <class S>
- V8_INLINE static Persistent<T>& Cast(Persistent<S>& that) { // NOLINT
-#ifdef V8_ENABLE_CHECKS
- // If we're going to perform the type check then we have to check
- // that the handle isn't empty before doing the checked cast.
- if (!that.IsEmpty()) T::Cast(*that);
-#endif
- return reinterpret_cast<Persistent<T>&>(that);
- }
-
- // TODO(dcarney): this is pretty useless, fix or remove
- template <class S> V8_INLINE Persistent<S>& As() { // NOLINT
- return Persistent<S>::Cast(*this);
- }
-
- template <class S, class M2>
- V8_INLINE bool operator==(const Persistent<S, M2>& that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(**this);
- internal::Object** b = reinterpret_cast<internal::Object**>(*that);
+ V8_INLINE bool operator==(const PersistentBase<S>& that) const {
+ internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
+ internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
if (a == 0) return b == 0;
if (b == 0) return false;
return *a == *b;
}
template <class S> V8_INLINE bool operator==(const Handle<S>& that) const {
- internal::Object** a = reinterpret_cast<internal::Object**>(**this);
- internal::Object** b = reinterpret_cast<internal::Object**>(*that);
+ internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
+ internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
if (a == 0) return b == 0;
if (b == 0) return false;
return *a == *b;
}
- template <class S, class M2>
- V8_INLINE bool operator!=(const Persistent<S, M2>& that) const {
+ template <class S>
+ V8_INLINE bool operator!=(const PersistentBase<S>& that) const {
return !operator==(that);
}
@@ -625,22 +539,8 @@ template <class T, class M> class Persistent {
P* parameter,
typename WeakCallbackData<S, P>::Callback callback);
- // TODO(dcarney): deprecate
- template<typename S, typename P>
- V8_INLINE void MakeWeak(
- P* parameter,
- typename WeakReferenceCallbacks<S, P>::Revivable callback);
-
- // TODO(dcarney): deprecate
- template<typename P>
- V8_INLINE void MakeWeak(
- P* parameter,
- typename WeakReferenceCallbacks<T, P>::Revivable callback);
-
V8_INLINE void ClearWeak();
- V8_DEPRECATED(V8_INLINE void ClearWeak(Isolate* isolate)) { ClearWeak(); }
-
/**
* Marks the reference to this object independent. Garbage collector is free
* to ignore any object groups containing this object. Weak callback for an
@@ -649,10 +549,6 @@ template <class T, class M> class Persistent {
*/
V8_INLINE void MarkIndependent();
- V8_DEPRECATED(V8_INLINE void MarkIndependent(Isolate* isolate)) {
- MarkIndependent();
- }
-
/**
* Marks the reference to this object partially dependent. Partially dependent
* handles only depend on other partially dependent handles and these
@@ -663,80 +559,279 @@ template <class T, class M> class Persistent {
*/
V8_INLINE void MarkPartiallyDependent();
- V8_DEPRECATED(V8_INLINE void MarkPartiallyDependent(Isolate* isolate)) {
- MarkPartiallyDependent();
- }
-
V8_INLINE bool IsIndependent() const;
- V8_DEPRECATED(V8_INLINE bool IsIndependent(Isolate* isolate) const) {
- return IsIndependent();
- }
-
/** Checks if the handle holds the only reference to an object. */
V8_INLINE bool IsNearDeath() const;
- V8_DEPRECATED(V8_INLINE bool IsNearDeath(Isolate* isolate) const) {
- return IsNearDeath();
- }
-
/** Returns true if the handle's reference is weak. */
V8_INLINE bool IsWeak() const;
- V8_DEPRECATED(V8_INLINE bool IsWeak(Isolate* isolate) const) {
- return IsWeak();
- }
-
/**
* Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface
* description in v8-profiler.h for details.
*/
V8_INLINE void SetWrapperClassId(uint16_t class_id);
- V8_DEPRECATED(
- V8_INLINE void SetWrapperClassId(Isolate * isolate, uint16_t class_id)) {
- SetWrapperClassId(class_id);
- }
-
/**
* Returns the class ID previously assigned to this handle or 0 if no class ID
* was previously assigned.
*/
V8_INLINE uint16_t WrapperClassId() const;
- V8_DEPRECATED(V8_INLINE uint16_t WrapperClassId(Isolate* isolate) const) {
- return WrapperClassId();
+ private:
+ friend class Isolate;
+ friend class Utils;
+ template<class F> friend class Handle;
+ template<class F> friend class Local;
+ template<class F1, class F2> friend class Persistent;
+ template<class F> friend class UniquePersistent;
+ template<class F> friend class PersistentBase;
+ template<class F> friend class ReturnValue;
+
+ explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
+ PersistentBase(PersistentBase& other); // NOLINT
+ void operator=(PersistentBase&);
+ V8_INLINE static T* New(Isolate* isolate, T* that);
+
+ T* val_;
+};
+
+
+/**
+ * Default traits for Persistent. This class does not allow
+ * use of the copy constructor or assignment operator.
+ * At present kResetInDestructor is not set, but that will change in a future
+ * version.
+ */
+template<class T>
+class NonCopyablePersistentTraits {
+ public:
+ typedef Persistent<T, NonCopyablePersistentTraits<T> > NonCopyablePersistent;
+ static const bool kResetInDestructor = false;
+ template<class S, class M>
+ V8_INLINE static void Copy(const Persistent<S, M>& source,
+ NonCopyablePersistent* dest) {
+ Uncompilable<Object>();
+ }
+ // TODO(dcarney): come up with a good compile error here.
+ template<class O> V8_INLINE static void Uncompilable() {
+ TYPE_CHECK(O, Primitive);
+ }
+};
+
+
+/**
+ * Helper class traits to allow copying and assignment of Persistent.
+ * This will clone the contents of storage cell, but not any of the flags, etc.
+ */
+template<class T>
+struct CopyablePersistentTraits {
+ typedef Persistent<T, CopyablePersistentTraits<T> > CopyablePersistent;
+ static const bool kResetInDestructor = true;
+ template<class S, class M>
+ static V8_INLINE void Copy(const Persistent<S, M>& source,
+ CopyablePersistent* dest) {
+ // do nothing, just allow copy
}
+};
- // TODO(dcarney): remove
+
+/**
+ * A PersistentBase which allows copy and assignment.
+ *
+ * Copy, assignment and destructor bevavior is controlled by the traits
+ * class M.
+ *
+ * Note: Persistent class hierarchy is subject to future changes.
+ */
+template <class T, class M> class Persistent : public PersistentBase<T> {
+ public:
+ /**
+ * A Persistent with no storage cell.
+ */
+ V8_INLINE Persistent() : PersistentBase<T>(0) { }
+ /**
+ * Construct a Persistent from a Handle.
+ * When the Handle is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S> V8_INLINE Persistent(Isolate* isolate, Handle<S> that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
+ TYPE_CHECK(T, S);
+ }
+ /**
+ * Construct a Persistent from a Persistent.
+ * When the Persistent is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S, class M2>
+ V8_INLINE Persistent(Isolate* isolate, const Persistent<S, M2>& that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
+ TYPE_CHECK(T, S);
+ }
+ /**
+ * The copy constructors and assignment operator create a Persistent
+ * exactly as the Persistent constructor, but the Copy function from the
+ * traits class is called, allowing the setting of flags based on the
+ * copied Persistent.
+ */
+ V8_INLINE Persistent(const Persistent& that) : PersistentBase<T>(0) {
+ Copy(that);
+ }
+ template <class S, class M2>
+ V8_INLINE Persistent(const Persistent<S, M2>& that) : PersistentBase<T>(0) {
+ Copy(that);
+ }
+ V8_INLINE Persistent& operator=(const Persistent& that) { // NOLINT
+ Copy(that);
+ return *this;
+ }
+ template <class S, class M2>
+ V8_INLINE Persistent& operator=(const Persistent<S, M2>& that) { // NOLINT
+ Copy(that);
+ return *this;
+ }
+ /**
+ * The destructor will dispose the Persistent based on the
+ * kResetInDestructor flags in the traits class. Since not calling dispose
+ * can result in a memory leak, it is recommended to always set this flag.
+ */
+ V8_INLINE ~Persistent() {
+ if (M::kResetInDestructor) this->Reset();
+ }
+
+ V8_DEPRECATED("Use Reset instead",
+ V8_INLINE void Dispose()) { this->Reset(); }
+
+ // TODO(dcarney): this is pretty useless, fix or remove
+ template <class S>
+ V8_INLINE static Persistent<T>& Cast(Persistent<S>& that) { // NOLINT
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
+ if (!that.IsEmpty()) T::Cast(*that);
+#endif
+ return reinterpret_cast<Persistent<T>&>(that);
+ }
+
+ // TODO(dcarney): this is pretty useless, fix or remove
+ template <class S> V8_INLINE Persistent<S>& As() { // NOLINT
+ return Persistent<S>::Cast(*this);
+ }
+
+ template<typename S, typename P>
+ V8_DEPRECATED(
+ "Use SetWeak instead",
+ V8_INLINE void MakeWeak(
+ P* parameter,
+ typename WeakReferenceCallbacks<S, P>::Revivable callback));
+
+ template<typename P>
+ V8_DEPRECATED(
+ "Use SetWeak instead",
+ V8_INLINE void MakeWeak(
+ P* parameter,
+ typename WeakReferenceCallbacks<T, P>::Revivable callback));
+
+ // This will be removed.
V8_INLINE T* ClearAndLeak();
- // TODO(dcarney): remove
- V8_INLINE void Clear() { val_ = 0; }
+ V8_DEPRECATED("This will be removed",
+ V8_INLINE void Clear()) { this->val_ = 0; }
// TODO(dcarney): remove
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
private:
#endif
- template <class S> V8_INLINE Persistent(S* that) : val_(that) { }
+ template <class S> V8_INLINE Persistent(S* that) : PersistentBase<T>(that) { }
- V8_INLINE T* operator*() const { return val_; }
+ V8_INLINE T* operator*() const { return this->val_; }
private:
+ friend class Isolate;
friend class Utils;
template<class F> friend class Handle;
template<class F> friend class Local;
template<class F1, class F2> friend class Persistent;
template<class F> friend class ReturnValue;
- V8_INLINE static T* New(Isolate* isolate, T* that);
template<class S, class M2>
V8_INLINE void Copy(const Persistent<S, M2>& that);
+};
- T* val_;
+
+/**
+ * A PersistentBase which has move semantics.
+ *
+ * Note: Persistent class hierarchy is subject to future changes.
+ */
+template<class T>
+class UniquePersistent : public PersistentBase<T> {
+ struct RValue {
+ V8_INLINE explicit RValue(UniquePersistent* object) : object(object) {}
+ UniquePersistent* object;
+ };
+
+ public:
+ /**
+ * A UniquePersistent with no storage cell.
+ */
+ V8_INLINE UniquePersistent() : PersistentBase<T>(0) { }
+ /**
+ * Construct a UniquePersistent from a Handle.
+ * When the Handle is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S>
+ V8_INLINE UniquePersistent(Isolate* isolate, Handle<S> that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
+ TYPE_CHECK(T, S);
+ }
+ /**
+ * Construct a UniquePersistent from a PersistentBase.
+ * When the Persistent is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S>
+ V8_INLINE UniquePersistent(Isolate* isolate, const PersistentBase<S>& that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, that.val_)) {
+ TYPE_CHECK(T, S);
+ }
+ /**
+ * Move constructor.
+ */
+ V8_INLINE UniquePersistent(RValue rvalue)
+ : PersistentBase<T>(rvalue.object->val_) {
+ rvalue.object->val_ = 0;
+ }
+ V8_INLINE ~UniquePersistent() { this->Reset(); }
+ /**
+ * Move via assignment.
+ */
+ template<class S>
+ V8_INLINE UniquePersistent& operator=(UniquePersistent<S> rhs) {
+ TYPE_CHECK(T, S);
+ this->val_ = rhs.val_;
+ rhs.val_ = 0;
+ return *this;
+ }
+ /**
+ * Cast operator for moves.
+ */
+ V8_INLINE operator RValue() { return RValue(this); }
+ /**
+ * Pass allows returning uniques from functions, etc.
+ */
+ V8_INLINE UniquePersistent Pass() { return UniquePersistent(RValue(this)); }
+
+ private:
+ UniquePersistent(UniquePersistent&);
+ void operator=(UniquePersistent&);
};
+
/**
* A stack-allocated class that governs a number of local handles.
* After a handle scope has been created, all local handles will be
@@ -757,27 +852,28 @@ class V8_EXPORT HandleScope {
~HandleScope();
- /**
- * Closes the handle scope and returns the value as a handle in the
- * previous scope, which is the new current scope after the call.
- */
- template <class T> Local<T> Close(Handle<T> value);
+ template <class T>
+ V8_DEPRECATED("Use EscapableHandleScope::Escape instead",
+ Local<T> Close(Handle<T> value));
/**
* Counts the number of allocated handles.
*/
static int NumberOfHandles();
+ private:
/**
* Creates a new handle with the given value.
*/
- static internal::Object** CreateHandle(internal::Object* value);
static internal::Object** CreateHandle(internal::Isolate* isolate,
internal::Object* value);
- // Faster version, uses HeapObject to obtain the current Isolate.
- static internal::Object** CreateHandle(internal::HeapObject* value);
+ // Uses HeapObject to obtain the current Isolate.
+ static internal::Object** CreateHandle(internal::HeapObject* heap_object,
+ internal::Object* value);
+
+ V8_INLINE HandleScope() {}
+ void Initialize(Isolate* isolate);
- private:
// Make it hard to create heap-allocated or illegal handle scopes by
// disallowing certain operations.
HandleScope(const HandleScope&);
@@ -798,19 +894,58 @@ class V8_EXPORT HandleScope {
}
};
- void Initialize(Isolate* isolate);
void Leave();
internal::Isolate* isolate_;
internal::Object** prev_next_;
internal::Object** prev_limit_;
+ // TODO(dcarney): remove this field
// Allow for the active closing of HandleScopes which allows to pass a handle
// from the HandleScope being closed to the next top most HandleScope.
bool is_closed_;
internal::Object** RawClose(internal::Object** value);
friend class ImplementationUtilities;
+ friend class EscapableHandleScope;
+ template<class F> friend class Handle;
+ template<class F> friend class Local;
+ friend class Object;
+ friend class Context;
+};
+
+
+/**
+ * A HandleScope which first allocates a handle in the current scope
+ * which will be later filled with the escape value.
+ */
+class V8_EXPORT EscapableHandleScope : public HandleScope {
+ public:
+ EscapableHandleScope(Isolate* isolate);
+ V8_INLINE ~EscapableHandleScope() {}
+
+ /**
+ * Pushes the value into the previous scope and returns a handle to it.
+ * Cannot be called twice.
+ */
+ template <class T>
+ V8_INLINE Local<T> Escape(Local<T> value) {
+ internal::Object** slot =
+ Escape(reinterpret_cast<internal::Object**>(*value));
+ return Local<T>(reinterpret_cast<T*>(slot));
+ }
+
+ private:
+ internal::Object** Escape(internal::Object** escape_value);
+
+ // Make it hard to create heap-allocated or illegal handle scopes by
+ // disallowing certain operations.
+ EscapableHandleScope(const EscapableHandleScope&);
+ void operator=(const EscapableHandleScope&);
+ void* operator new(size_t size);
+ void operator delete(void*, size_t);
+
+ internal::Object** escape_slot_;
};
@@ -857,7 +992,9 @@ class V8_EXPORT ScriptData { // NOLINT
* \param input Pointer to UTF-8 script source code.
* \param length Length of UTF-8 script source code.
*/
- static ScriptData* PreCompile(const char* input, int length);
+ static ScriptData* PreCompile(Isolate* isolate,
+ const char* input,
+ int length);
/**
* Pre-compiles the specified script (context-independent).
@@ -1009,9 +1146,8 @@ class V8_EXPORT Script {
/**
* Returns the script id value.
- * DEPRECATED: Please use GetId().
*/
- Local<Value> Id();
+ V8_DEPRECATED("Use GetId instead", Local<Value> Id());
/**
* Returns the script id.
@@ -1103,7 +1239,9 @@ class V8_EXPORT Message {
bool IsSharedCrossOrigin() const;
// TODO(1245381): Print to a string instead of on a FILE.
- static void PrintCurrentStackTrace(FILE* out);
+ static void PrintCurrentStackTrace(Isolate* isolate, FILE* out);
+ V8_DEPRECATED("Will be removed",
+ static void PrintCurrentStackTrace(FILE* out));
static const int kNoLineNumberInfo = 0;
static const int kNoColumnInfo = 0;
@@ -1158,8 +1296,12 @@ class V8_EXPORT StackTrace {
* StackFrame.
*/
static Local<StackTrace> CurrentStackTrace(
+ Isolate* isolate,
int frame_limit,
StackTraceOptions options = kOverview);
+ V8_DEPRECATED("Will be removed",
+ static Local<StackTrace> CurrentStackTrace(
+ int frame_limit, StackTraceOptions options = kOverview));
};
@@ -1463,6 +1605,7 @@ class V8_EXPORT Value : public Data {
/** JS == */
bool Equals(Handle<Value> that) const;
bool StrictEquals(Handle<Value> that) const;
+ bool SameValue(Handle<Value> that) const;
template <class T> V8_INLINE static Value* Cast(T* value);
@@ -1489,7 +1632,9 @@ class V8_EXPORT Primitive : public Value { };
class V8_EXPORT Boolean : public Primitive {
public:
bool Value() const;
- V8_INLINE static Handle<Boolean> New(bool value);
+ V8_INLINE static Handle<Boolean> New(Isolate* isolate, bool value);
+ V8_DEPRECATED("Will be removed",
+ V8_INLINE static Handle<Boolean> New(bool value));
};
@@ -1516,11 +1661,6 @@ class V8_EXPORT String : public Primitive {
int Utf8Length() const;
/**
- * This function is no longer useful.
- */
- V8_DEPRECATED(V8_INLINE bool MayContainNonAscii() const) { return true; }
-
- /**
* Returns whether this string is known to contain only one byte data.
* Does not read the string.
* False negatives are possible.
@@ -1570,11 +1710,6 @@ class V8_EXPORT String : public Primitive {
int start = 0,
int length = -1,
int options = NO_OPTIONS) const;
- // ASCII characters.
- V8_DEPRECATED(int WriteAscii(char* buffer,
- int start = 0,
- int length = -1,
- int options = NO_OPTIONS) const);
// One byte characters.
int WriteOneByte(uint8_t* buffer,
int start = 0,
@@ -1705,24 +1840,29 @@ class V8_EXPORT String : public Primitive {
V8_INLINE static String* Cast(v8::Value* obj);
- // TODO(dcarney): deprecate
/**
* Allocates a new string from either UTF-8 encoded or ASCII data.
* The second parameter 'length' gives the buffer length. If omitted,
* the function calls 'strlen' to determine the buffer length.
*/
- V8_INLINE static Local<String> New(const char* data, int length = -1);
+ V8_DEPRECATED(
+ "Use NewFromUtf8 instead",
+ V8_INLINE static Local<String> New(const char* data, int length = -1));
- // TODO(dcarney): deprecate
/** Allocates a new string from 16-bit character codes.*/
- V8_INLINE static Local<String> New(const uint16_t* data, int length = -1);
+ V8_DEPRECATED(
+ "Use NewFromTwoByte instead",
+ V8_INLINE static Local<String> New(
+ const uint16_t* data, int length = -1));
- // TODO(dcarney): deprecate
/**
* Creates an internalized string (historically called a "symbol",
* not to be confused with ES6 symbols). Returns one if it exists already.
*/
- V8_INLINE static Local<String> NewSymbol(const char* data, int length = -1);
+ V8_DEPRECATED(
+ "Use NewFromUtf8 instead",
+ V8_INLINE static Local<String> NewSymbol(
+ const char* data, int length = -1));
enum NewStringType {
kNormalString, kInternalizedString, kUndetectableString
@@ -1762,7 +1902,10 @@ class V8_EXPORT String : public Primitive {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
- static Local<String> NewExternal(ExternalStringResource* resource);
+ static Local<String> NewExternal(Isolate* isolate,
+ ExternalStringResource* resource);
+ V8_DEPRECATED("Will be removed", static Local<String> NewExternal(
+ ExternalStringResource* resource));
/**
* Associate an external string resource with this string by transforming it
@@ -1783,7 +1926,10 @@ class V8_EXPORT String : public Primitive {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
- static Local<String> NewExternal(ExternalAsciiStringResource* resource);
+ static Local<String> NewExternal(Isolate* isolate,
+ ExternalAsciiStringResource* resource);
+ V8_DEPRECATED("Will be removed", static Local<String> NewExternal(
+ ExternalAsciiStringResource* resource));
/**
* Associate an external string resource with this string by transforming it
@@ -1801,15 +1947,17 @@ class V8_EXPORT String : public Primitive {
*/
bool CanMakeExternal();
- // TODO(dcarney): deprecate
/** Creates an undetectable string from the supplied ASCII or UTF-8 data.*/
- V8_INLINE static Local<String> NewUndetectable(const char* data,
- int length = -1);
+ V8_DEPRECATED(
+ "Use NewFromUtf8 instead",
+ V8_INLINE static Local<String> NewUndetectable(const char* data,
+ int length = -1));
- // TODO(dcarney): deprecate
/** Creates an undetectable string from the supplied 16-bit character codes.*/
- V8_INLINE static Local<String> NewUndetectable(const uint16_t* data,
- int length = -1);
+ V8_DEPRECATED(
+ "Use NewFromTwoByte instead",
+ V8_INLINE static Local<String> NewUndetectable(const uint16_t* data,
+ int length = -1));
/**
* Converts an object to a UTF-8-encoded character array. Useful if
@@ -1843,8 +1991,8 @@ class V8_EXPORT String : public Primitive {
*/
class V8_EXPORT AsciiValue {
public:
- // TODO(dcarney): deprecate
- explicit AsciiValue(Handle<v8::Value> obj);
+ V8_DEPRECATED("Use Utf8Value instead",
+ explicit AsciiValue(Handle<v8::Value> obj));
~AsciiValue();
char* operator*() { return str_; }
const char* operator*() const { return str_; }
@@ -1898,11 +2046,9 @@ class V8_EXPORT Symbol : public Primitive {
// Returns the print name string of the symbol, or undefined if none.
Local<Value> Name() const;
- // Create a symbol without a print name.
- static Local<Symbol> New(Isolate* isolate);
-
- // Create a symbol with a print name.
- static Local<Symbol> New(Isolate *isolate, const char* data, int length = -1);
+ // Create a symbol. If data is not NULL, it will be used as a print name.
+ static Local<Symbol> New(
+ Isolate *isolate, const char* data = NULL, int length = -1);
V8_INLINE static Symbol* Cast(v8::Value* obj);
private:
@@ -1912,13 +2058,33 @@ class V8_EXPORT Symbol : public Primitive {
/**
+ * A private symbol
+ *
+ * This is an experimental feature. Use at your own risk.
+ */
+class V8_EXPORT Private : public Data {
+ public:
+ // Returns the print name string of the private symbol, or undefined if none.
+ Local<Value> Name() const;
+
+ // Create a private symbol. If data is not NULL, it will be the print name.
+ static Local<Private> New(
+ Isolate *isolate, const char* data = NULL, int length = -1);
+
+ private:
+ Private();
+};
+
+
+/**
* A JavaScript number value (ECMA-262, 4.3.20)
*/
class V8_EXPORT Number : public Primitive {
public:
double Value() const;
- static Local<Number> New(double value);
static Local<Number> New(Isolate* isolate, double value);
+ // Will be deprecated soon.
+ static Local<Number> New(double value);
V8_INLINE static Number* Cast(v8::Value* obj);
private:
Number();
@@ -1931,10 +2097,13 @@ class V8_EXPORT Number : public Primitive {
*/
class V8_EXPORT Integer : public Number {
public:
- static Local<Integer> New(int32_t value);
- static Local<Integer> NewFromUnsigned(uint32_t value);
+ static Local<Integer> New(Isolate* isolate, int32_t value);
+ static Local<Integer> NewFromUnsigned(Isolate* isolate, uint32_t value);
+ // Will be deprecated soon.
static Local<Integer> New(int32_t value, Isolate*);
static Local<Integer> NewFromUnsigned(uint32_t value, Isolate*);
+ static Local<Integer> New(int32_t value);
+ static Local<Integer> NewFromUnsigned(uint32_t value);
int64_t Value() const;
V8_INLINE static Integer* Cast(v8::Value* obj);
private:
@@ -2081,6 +2250,17 @@ class V8_EXPORT Object : public Value {
AccessControl settings = DEFAULT);
/**
+ * Functionality for private properties.
+ * This is an experimental feature, use at your own risk.
+ * Note: Private properties are inherited. Do not rely on this, since it may
+ * change.
+ */
+ bool HasPrivate(Handle<Private> key);
+ bool SetPrivate(Handle<Private> key, Handle<Value> value);
+ bool DeletePrivate(Handle<Private> key);
+ Local<Value> GetPrivate(Handle<Private> key);
+
+ /**
* Returns an array containing the names of the enumerable properties
* of this object, including properties from prototype objects. The
* array returned by this method contains the same values as would
@@ -2265,7 +2445,7 @@ class V8_EXPORT Object : public Value {
* Call an Object as a function if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
*/
- Local<Value> CallAsFunction(Handle<Object> recv,
+ Local<Value> CallAsFunction(Handle<Value> recv,
int argc,
Handle<Value> argv[]);
@@ -2276,6 +2456,8 @@ class V8_EXPORT Object : public Value {
*/
Local<Value> CallAsConstructor(int argc, Handle<Value> argv[]);
+ static Local<Object> New(Isolate* isolate);
+ // Will be deprecated soon.
static Local<Object> New();
V8_INLINE static Object* Cast(Value* obj);
@@ -2304,7 +2486,8 @@ class V8_EXPORT Array : public Object {
* Creates a JavaScript array with the given length. If the length
* is negative the returned array will have length 0.
*/
- static Local<Array> New(int length = 0);
+ static Local<Array> New(Isolate* isolate, int length = 0);
+ V8_DEPRECATED("Will be removed", static Local<Array> New(int length = 0));
V8_INLINE static Array* Cast(Value* obj);
private:
@@ -2364,17 +2547,18 @@ class FunctionCallbackInfo {
V8_INLINE Isolate* GetIsolate() const;
V8_INLINE ReturnValue<T> GetReturnValue() const;
// This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 6;
+ static const int kArgsLength = 7;
protected:
friend class internal::FunctionCallbackArguments;
friend class internal::CustomArguments<FunctionCallbackInfo>;
- static const int kReturnValueIndex = 0;
- static const int kReturnValueDefaultValueIndex = -1;
- static const int kIsolateIndex = -2;
- static const int kDataIndex = -3;
- static const int kCalleeIndex = -4;
- static const int kHolderIndex = -5;
+ static const int kHolderIndex = 0;
+ static const int kIsolateIndex = 1;
+ static const int kReturnValueDefaultValueIndex = 2;
+ static const int kReturnValueIndex = 3;
+ static const int kDataIndex = 4;
+ static const int kCalleeIndex = 5;
+ static const int kContextSaveIndex = 6;
V8_INLINE FunctionCallbackInfo(internal::Object** implicit_args,
internal::Object** values,
@@ -2406,12 +2590,12 @@ class PropertyCallbackInfo {
friend class MacroAssembler;
friend class internal::PropertyCallbackArguments;
friend class internal::CustomArguments<PropertyCallbackInfo>;
- static const int kThisIndex = 0;
- static const int kDataIndex = -1;
- static const int kReturnValueIndex = -2;
- static const int kReturnValueDefaultValueIndex = -3;
- static const int kIsolateIndex = -4;
- static const int kHolderIndex = -5;
+ static const int kHolderIndex = 0;
+ static const int kIsolateIndex = 1;
+ static const int kReturnValueDefaultValueIndex = 2;
+ static const int kReturnValueIndex = 3;
+ static const int kDataIndex = 4;
+ static const int kThisIndex = 5;
V8_INLINE PropertyCallbackInfo(internal::Object** args) : args_(args) {}
internal::Object** args_;
@@ -2437,7 +2621,7 @@ class V8_EXPORT Function : public Object {
Local<Object> NewInstance() const;
Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
- Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
+ Local<Value> Call(Handle<Value> recv, int argc, Handle<Value> argv[]);
void SetName(Handle<String> name);
Handle<Value> GetName() const;
@@ -2450,6 +2634,12 @@ class V8_EXPORT Function : public Object {
Handle<Value> GetInferredName() const;
/**
+ * User-defined name assigned to the "displayName" property of this function.
+ * Used to facilitate debugging and profiling of JavaScript code.
+ */
+ Handle<Value> GetDisplayName() const;
+
+ /**
* Returns zero based line number of function body and
* kLineOffsetNotFound if no information available.
*/
@@ -2461,10 +2651,14 @@ class V8_EXPORT Function : public Object {
int GetScriptColumnNumber() const;
/**
+ * Tells whether this function is builtin.
+ */
+ bool IsBuiltin() const;
+
+ /**
* Returns scriptId object.
- * DEPRECATED: use ScriptId() instead.
*/
- Handle<Value> GetScriptId() const;
+ V8_DEPRECATED("Use ScriptId instead", Handle<Value> GetScriptId() const);
/**
* Returns scriptId.
@@ -2556,7 +2750,9 @@ class V8_EXPORT ArrayBuffer : public Object {
* will be deallocated when it is garbage-collected,
* unless the object is externalized.
*/
- static Local<ArrayBuffer> New(size_t byte_length);
+ static Local<ArrayBuffer> New(Isolate* isolate, size_t byte_length);
+ V8_DEPRECATED("Will be removed",
+ static Local<ArrayBuffer> New(size_t byte_length));
/**
* Create a new ArrayBuffer over an existing memory block.
@@ -2564,7 +2760,10 @@ class V8_EXPORT ArrayBuffer : public Object {
* The memory block will not be reclaimed when a created ArrayBuffer
* is garbage-collected.
*/
- static Local<ArrayBuffer> New(void* data, size_t byte_length);
+ static Local<ArrayBuffer> New(Isolate* isolate, void* data,
+ size_t byte_length);
+ V8_DEPRECATED("Will be removed",
+ static Local<ArrayBuffer> New(void* data, size_t byte_length));
/**
* Returns true if ArrayBuffer is extrenalized, that is, does not
@@ -2627,10 +2826,6 @@ class V8_EXPORT ArrayBufferView : public Object {
* Size of a view in bytes.
*/
size_t ByteLength();
- /**
- * Base address of a view.
- */
- void* BaseAddress();
V8_INLINE static ArrayBufferView* Cast(Value* obj);
@@ -2828,11 +3023,12 @@ class V8_EXPORT DataView : public ArrayBufferView {
*/
class V8_EXPORT Date : public Object {
public:
- static Local<Value> New(double time);
+ static Local<Value> New(Isolate* isolate, double time);
+ V8_DEPRECATED("Will be removed", static Local<Value> New(double time));
- // Deprecated, use Date::ValueOf() instead.
- // TODO(svenpanne) Actually deprecate when Chrome is adapted.
- double NumberValue() const { return ValueOf(); }
+ V8_DEPRECATED(
+ "Use ValueOf instead",
+ double NumberValue() const) { return ValueOf(); }
/**
* A specialization of Value::NumberValue that is more efficient
@@ -2854,7 +3050,9 @@ class V8_EXPORT Date : public Object {
* This API should not be called more than needed as it will
* negatively impact the performance of date operations.
*/
- static void DateTimeConfigurationChangeNotification();
+ static void DateTimeConfigurationChangeNotification(Isolate* isolate);
+ V8_DEPRECATED("Will be removed",
+ static void DateTimeConfigurationChangeNotification());
private:
static void CheckCast(v8::Value* obj);
@@ -2866,11 +3064,12 @@ class V8_EXPORT Date : public Object {
*/
class V8_EXPORT NumberObject : public Object {
public:
- static Local<Value> New(double value);
+ static Local<Value> New(Isolate* isolate, double value);
+ V8_DEPRECATED("Will be removed", static Local<Value> New(double value));
- // Deprecated, use NumberObject::ValueOf() instead.
- // TODO(svenpanne) Actually deprecate when Chrome is adapted.
- double NumberValue() const { return ValueOf(); }
+ V8_DEPRECATED(
+ "Use ValueOf instead",
+ double NumberValue() const) { return ValueOf(); }
/**
* Returns the Number held by the object.
@@ -2891,9 +3090,9 @@ class V8_EXPORT BooleanObject : public Object {
public:
static Local<Value> New(bool value);
- // Deprecated, use BooleanObject::ValueOf() instead.
- // TODO(svenpanne) Actually deprecate when Chrome is adapted.
- bool BooleanValue() const { return ValueOf(); }
+ V8_DEPRECATED(
+ "Use ValueOf instead",
+ bool BooleanValue() const) { return ValueOf(); }
/**
* Returns the Boolean held by the object.
@@ -2914,9 +3113,9 @@ class V8_EXPORT StringObject : public Object {
public:
static Local<Value> New(Handle<String> value);
- // Deprecated, use StringObject::ValueOf() instead.
- // TODO(svenpanne) Actually deprecate when Chrome is adapted.
- Local<String> StringValue() const { return ValueOf(); }
+ V8_DEPRECATED(
+ "Use ValueOf instead",
+ Local<String> StringValue() const) { return ValueOf(); }
/**
* Returns the String held by the object.
@@ -2939,9 +3138,9 @@ class V8_EXPORT SymbolObject : public Object {
public:
static Local<Value> New(Isolate* isolate, Handle<Symbol> value);
- // Deprecated, use SymbolObject::ValueOf() instead.
- // TODO(svenpanne) Actually deprecate when Chrome is adapted.
- Local<Symbol> SymbolValue() const { return ValueOf(); }
+ V8_DEPRECATED(
+ "Use ValueOf instead",
+ Local<Symbol> SymbolValue() const) { return ValueOf(); }
/**
* Returns the Symbol held by the object.
@@ -3007,7 +3206,8 @@ class V8_EXPORT RegExp : public Object {
*/
class V8_EXPORT External : public Value {
public:
- static Local<External> New(void* value);
+ static Local<External> New(Isolate* isolate, void* value);
+ V8_DEPRECATED("Will be removed", static Local<External> New(void *value));
V8_INLINE static External* Cast(Value* obj);
void* Value() const;
private:
@@ -3026,7 +3226,9 @@ class V8_EXPORT Template : public Data {
/** Adds a property to each instance created by this template.*/
void Set(Handle<String> name, Handle<Data> value,
PropertyAttribute attributes = None);
- V8_INLINE void Set(const char* name, Handle<Data> value);
+ V8_INLINE void Set(Isolate* isolate, const char* name, Handle<Data> value);
+ V8_DEPRECATED("Will be removed",
+ V8_INLINE void Set(const char* name, Handle<Data> value));
void SetAccessorProperty(
Local<String> name,
@@ -3309,6 +3511,13 @@ class V8_EXPORT FunctionTemplate : public Template {
public:
/** Creates a function template.*/
static Local<FunctionTemplate> New(
+ Isolate* isolate,
+ FunctionCallback callback = 0,
+ Handle<Value> data = Handle<Value>(),
+ Handle<Signature> signature = Handle<Signature>(),
+ int length = 0);
+ // Will be deprecated soon.
+ static Local<FunctionTemplate> New(
FunctionCallback callback = 0,
Handle<Value> data = Handle<Value>(),
Handle<Signature> signature = Handle<Signature>(),
@@ -3395,6 +3604,8 @@ class V8_EXPORT FunctionTemplate : public Template {
class V8_EXPORT ObjectTemplate : public Template {
public:
/** Creates an ObjectTemplate. */
+ static Local<ObjectTemplate> New(Isolate* isolate);
+ // Will be deprecated soon.
static Local<ObjectTemplate> New();
/** Creates a new instance of this template.*/
@@ -3536,7 +3747,8 @@ class V8_EXPORT ObjectTemplate : public Template {
private:
ObjectTemplate();
- static Local<ObjectTemplate> New(Handle<FunctionTemplate> constructor);
+ static Local<ObjectTemplate> New(internal::Isolate* isolate,
+ Handle<FunctionTemplate> constructor);
friend class FunctionTemplate;
};
@@ -3547,10 +3759,18 @@ class V8_EXPORT ObjectTemplate : public Template {
*/
class V8_EXPORT Signature : public Data {
public:
- static Local<Signature> New(Handle<FunctionTemplate> receiver =
+ static Local<Signature> New(Isolate* isolate,
+ Handle<FunctionTemplate> receiver =
Handle<FunctionTemplate>(),
int argc = 0,
Handle<FunctionTemplate> argv[] = 0);
+ V8_DEPRECATED("Will be removed",
+ static Local<Signature> New(Handle<FunctionTemplate> receiver =
+ Handle<FunctionTemplate>(),
+ int argc = 0,
+ Handle<FunctionTemplate> argv[] =
+ 0));
+
private:
Signature();
};
@@ -3562,8 +3782,13 @@ class V8_EXPORT Signature : public Data {
*/
class V8_EXPORT AccessorSignature : public Data {
public:
- static Local<AccessorSignature> New(Handle<FunctionTemplate> receiver =
+ static Local<AccessorSignature> New(Isolate* isolate,
+ Handle<FunctionTemplate> receiver =
Handle<FunctionTemplate>());
+ V8_DEPRECATED("Will be removed", static Local<AccessorSignature> New(
+ Handle<FunctionTemplate> receiver =
+ Handle<FunctionTemplate>()));
+
private:
AccessorSignature();
};
@@ -3667,8 +3892,18 @@ class V8_EXPORT Extension { // NOLINT
const char** deps = 0,
int source_length = -1);
virtual ~Extension() { }
- virtual v8::Handle<v8::FunctionTemplate>
- GetNativeFunction(v8::Handle<v8::String> name) {
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Handle<v8::String> name) {
+#if defined(V8_DEPRECATION_WARNINGS)
+ return v8::Handle<v8::FunctionTemplate>();
+#else
+ return GetNativeFunction(name);
+#endif
+ }
+
+ V8_DEPRECATED("Will be removed",
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name)) {
return v8::Handle<v8::FunctionTemplate>();
}
@@ -3711,17 +3946,16 @@ class V8_EXPORT DeclareExtension {
// --- Statics ---
-
-Handle<Primitive> V8_EXPORT Undefined();
-Handle<Primitive> V8_EXPORT Null();
-Handle<Boolean> V8_EXPORT True();
-Handle<Boolean> V8_EXPORT False();
-
V8_INLINE Handle<Primitive> Undefined(Isolate* isolate);
V8_INLINE Handle<Primitive> Null(Isolate* isolate);
V8_INLINE Handle<Boolean> True(Isolate* isolate);
V8_INLINE Handle<Boolean> False(Isolate* isolate);
+V8_DEPRECATED("Will be removed", Handle<Primitive> V8_EXPORT Undefined());
+V8_DEPRECATED("Will be removed", Handle<Primitive> V8_EXPORT Null());
+V8_DEPRECATED("Will be removed", Handle<Boolean> V8_EXPORT True());
+V8_DEPRECATED("Will be removed", Handle<Boolean> V8_EXPORT False());
+
/**
* A set of constraints that specifies the limits of the runtime's memory use.
@@ -3735,21 +3969,34 @@ V8_INLINE Handle<Boolean> False(Isolate* isolate);
class V8_EXPORT ResourceConstraints {
public:
ResourceConstraints();
+
+ /**
+ * Configures the constraints with reasonable default values based on the
+ * capabilities of the current device the VM is running on.
+ *
+ * \param physical_memory The total amount of physical memory on the current
+ * device, in bytes.
+ * \param number_of_processors The number of CPUs available on the current
+ * device.
+ */
+ void ConfigureDefaults(uint64_t physical_memory,
+ uint32_t number_of_processors);
+ V8_DEPRECATED("Will be removed",
+ void ConfigureDefaults(uint64_t physical_memory));
+
int max_young_space_size() const { return max_young_space_size_; }
void set_max_young_space_size(int value) { max_young_space_size_ = value; }
int max_old_space_size() const { return max_old_space_size_; }
void set_max_old_space_size(int value) { max_old_space_size_ = value; }
- int max_executable_size() { return max_executable_size_; }
+ int max_executable_size() const { return max_executable_size_; }
void set_max_executable_size(int value) { max_executable_size_ = value; }
uint32_t* stack_limit() const { return stack_limit_; }
// Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
- Maybe<bool> is_memory_constrained() const { return is_memory_constrained_; }
- // If set to true, V8 will limit it's memory usage, at the potential cost of
- // lower performance. Note, this option is a tentative addition to the API
- // and may be removed or modified without warning.
- void set_memory_constrained(bool value) {
- is_memory_constrained_ = Maybe<bool>(value);
+ int max_available_threads() const { return max_available_threads_; }
+ // Set the number of threads available to V8, assuming at least 1.
+ void set_max_available_threads(int value) {
+ max_available_threads_ = value;
}
private:
@@ -3757,11 +4004,15 @@ class V8_EXPORT ResourceConstraints {
int max_old_space_size_;
int max_executable_size_;
uint32_t* stack_limit_;
- Maybe<bool> is_memory_constrained_;
+ int max_available_threads_;
};
-bool V8_EXPORT SetResourceConstraints(ResourceConstraints* constraints);
+/**
+ * Sets the given ResourceConstraints on the given Isolate.
+ */
+bool V8_EXPORT SetResourceConstraints(Isolate* isolate,
+ ResourceConstraints* constraints);
// --- Exceptions ---
@@ -3773,13 +4024,9 @@ typedef void (*FatalErrorCallback)(const char* location, const char* message);
typedef void (*MessageCallback)(Handle<Message> message, Handle<Value> error);
-/**
- * Schedules an exception to be thrown when returning to JavaScript. When an
- * exception has been scheduled it is illegal to invoke any JavaScript
- * operation; the caller must return immediately and only after the exception
- * has been handled does it become legal to invoke JavaScript operations.
- */
-Handle<Value> V8_EXPORT ThrowException(Handle<Value> exception);
+V8_DEPRECATED(
+ "Use Isolate::ThrowException instead",
+ Handle<Value> V8_EXPORT ThrowException(Handle<Value> exception));
/**
* Create new error objects by calling the corresponding error object
@@ -3870,8 +4117,6 @@ enum GCCallbackFlags {
typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags);
typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags);
-typedef void (*GCCallback)();
-
/**
* Collection of V8 heap information.
@@ -3975,16 +4220,27 @@ class V8_EXPORT Isolate {
*/
void Dispose();
+ V8_DEPRECATED("Use SetData(0, data) instead.",
+ V8_INLINE void SetData(void* data));
+ V8_DEPRECATED("Use GetData(0) instead.", V8_INLINE void* GetData());
+
/**
- * Associate embedder-specific data with the isolate
+ * Associate embedder-specific data with the isolate. |slot| has to be
+ * between 0 and GetNumberOfDataSlots() - 1.
*/
- V8_INLINE void SetData(void* data);
+ V8_INLINE void SetData(uint32_t slot, void* data);
/**
* Retrieve embedder-specific data from the isolate.
- * Returns NULL if SetData has never been called.
+ * Returns NULL if SetData has never been called for the given |slot|.
+ */
+ V8_INLINE void* GetData(uint32_t slot);
+
+ /**
+ * Returns the maximum number of available embedder data slots. Valid slots
+ * are in the range of 0 - GetNumberOfDataSlots() - 1.
*/
- V8_INLINE void* GetData();
+ V8_INLINE static uint32_t GetNumberOfDataSlots();
/**
* Get statistics about the heap memory usage.
@@ -4004,7 +4260,7 @@ class V8_EXPORT Isolate {
* kept alive by JavaScript objects.
* \returns the adjusted value.
*/
- intptr_t AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes);
+ int64_t AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes);
/**
* Returns heap profiler for this isolate. Will return NULL until the isolate
@@ -4019,10 +4275,31 @@ class V8_EXPORT Isolate {
*/
CpuProfiler* GetCpuProfiler();
+ /** Returns true if this isolate has a current context. */
+ bool InContext();
+
/** Returns the context that is on the top of the stack. */
Local<Context> GetCurrentContext();
/**
+ * Returns the context of the calling JavaScript code. That is the
+ * context of the top-most JavaScript frame. If there are no
+ * JavaScript frames an empty handle is returned.
+ */
+ Local<Context> GetCallingContext();
+
+ /** Returns the last entered context. */
+ Local<Context> GetEnteredContext();
+
+ /**
+ * Schedules an exception to be thrown when returning to JavaScript. When an
+ * exception has been scheduled it is illegal to invoke any JavaScript
+ * operation; the caller must return immediately and only after the exception
+ * has been handled does it become legal to invoke JavaScript operations.
+ */
+ Local<Value> ThrowException(Local<Value> exception);
+
+ /**
* Allows the host application to group objects together. If one
* object in the group is alive, all objects in the group are alive.
* After each garbage collection, object groups are removed. It is
@@ -4033,8 +4310,8 @@ class V8_EXPORT Isolate {
* garbage collection types it is sufficient to provide object groups
* for partially dependent handles only.
*/
- void SetObjectGroupId(const Persistent<Value>& object,
- UniqueId id);
+ template<typename T> void SetObjectGroupId(const Persistent<T>& object,
+ UniqueId id);
/**
* Allows the host application to declare implicit references from an object
@@ -4043,8 +4320,8 @@ class V8_EXPORT Isolate {
* are removed. It is intended to be used in the before-garbage-collection
* callback function.
*/
- void SetReferenceFromGroup(UniqueId id,
- const Persistent<Value>& child);
+ template<typename T> void SetReferenceFromGroup(UniqueId id,
+ const Persistent<T>& child);
/**
* Allows the host application to declare implicit references from an object
@@ -4052,8 +4329,53 @@ class V8_EXPORT Isolate {
* too. After each garbage collection, all implicit references are removed. It
* is intended to be used in the before-garbage-collection callback function.
*/
- void SetReference(const Persistent<Object>& parent,
- const Persistent<Value>& child);
+ template<typename T, typename S>
+ void SetReference(const Persistent<T>& parent, const Persistent<S>& child);
+
+ typedef void (*GCPrologueCallback)(Isolate* isolate,
+ GCType type,
+ GCCallbackFlags flags);
+ typedef void (*GCEpilogueCallback)(Isolate* isolate,
+ GCType type,
+ GCCallbackFlags flags);
+
+ /**
+ * Enables the host application to receive a notification before a
+ * garbage collection. Allocations are not allowed in the
+ * callback function, you therefore cannot manipulate objects (set
+ * or delete properties for example) since it is possible such
+ * operations will result in the allocation of objects. It is possible
+ * to specify the GCType filter for your callback. But it is not possible to
+ * register the same callback function two times with different
+ * GCType filters.
+ */
+ void AddGCPrologueCallback(
+ GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
+
+ /**
+ * This function removes callback which was installed by
+ * AddGCPrologueCallback function.
+ */
+ void RemoveGCPrologueCallback(GCPrologueCallback callback);
+
+ /**
+ * Enables the host application to receive a notification after a
+ * garbage collection. Allocations are not allowed in the
+ * callback function, you therefore cannot manipulate objects (set
+ * or delete properties for example) since it is possible such
+ * operations will result in the allocation of objects. It is possible
+ * to specify the GCType filter for your callback. But it is not possible to
+ * register the same callback function two times with different
+ * GCType filters.
+ */
+ void AddGCEpilogueCallback(
+ GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
+
+ /**
+ * This function removes callback which was installed by
+ * AddGCEpilogueCallback function.
+ */
+ void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
private:
Isolate();
@@ -4062,8 +4384,11 @@ class V8_EXPORT Isolate {
Isolate& operator=(const Isolate&);
void* operator new(size_t size);
void operator delete(void*, size_t);
-};
+ void SetObjectGroupId(internal::Object** object, UniqueId id);
+ void SetReferenceFromGroup(UniqueId id, internal::Object** object);
+ void SetReference(internal::Object** parent, internal::Object** child);
+};
class V8_EXPORT StartupData {
public:
@@ -4412,16 +4737,6 @@ class V8_EXPORT V8 {
static void RemoveGCPrologueCallback(GCPrologueCallback callback);
/**
- * The function is deprecated. Please use AddGCPrologueCallback instead.
- * Enables the host application to receive a notification before a
- * garbage collection. Allocations are not allowed in the
- * callback function, you therefore cannot manipulate objects (set
- * or delete properties for example) since it is possible such
- * operations will result in the allocation of objects.
- */
- V8_DEPRECATED(static void SetGlobalGCPrologueCallback(GCCallback));
-
- /**
* Enables the host application to receive a notification after a
* garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set
@@ -4441,16 +4756,6 @@ class V8_EXPORT V8 {
static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
/**
- * The function is deprecated. Please use AddGCEpilogueCallback instead.
- * Enables the host application to receive a notification after a
- * major garbage collection. Allocations are not allowed in the
- * callback function, you therefore cannot manipulate objects (set
- * or delete properties for example) since it is possible such
- * operations will result in the allocation of objects.
- */
- V8_DEPRECATED(static void SetGlobalGCEpilogueCallback(GCCallback));
-
- /**
* Enables the host application to provide a mechanism to be notified
* and perform custom logging when V8 Allocates Executable Memory.
*/
@@ -4498,11 +4803,6 @@ class V8_EXPORT V8 {
ReturnAddressLocationResolver return_address_resolver);
/**
- * Deprecated, use the variant with the Isolate parameter below instead.
- */
- V8_DEPRECATED(static bool SetFunctionEntryHook(FunctionEntryHook entry_hook));
-
- /**
* Allows the host application to provide the address of a function that's
* invoked on entry to every V8-generated function.
* Note that \p entry_hook is invoked at the very start of each
@@ -4541,10 +4841,10 @@ class V8_EXPORT V8 {
static void SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler);
- // TODO(svenpanne) Really deprecate me when Chrome is fixed.
- /** Deprecated. Use Isolate::AdjustAmountOfExternalAllocatedMemory instead. */
- static intptr_t AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes);
+ V8_DEPRECATED(
+ "Use Isolate::AdjustAmountOfExternalAllocatedMemory instead",
+ static int64_t AdjustAmountOfExternalAllocatedMemory(
+ int64_t change_in_bytes));
/**
* Forcefully terminate the current thread of JavaScript execution
@@ -4599,9 +4899,6 @@ class V8_EXPORT V8 {
*/
static bool Dispose();
- /** Deprecated. Use Isolate::GetHeapStatistics instead. */
- V8_DEPRECATED(static void GetHeapStatistics(HeapStatistics* heap_statistics));
-
/**
* Iterates through all external resources referenced from current isolate
* heap. GC is not invoked prior to iterating, therefore there is no
@@ -4659,6 +4956,18 @@ class V8_EXPORT V8 {
*/
static bool InitializeICU();
+ /**
+ * Sets the v8::Platform to use. This should be invoked before V8 is
+ * initialized.
+ */
+ static void InitializePlatform(Platform* platform);
+
+ /**
+ * Clears all references to the v8::Platform. This should be invoked after
+ * V8 was disposed.
+ */
+ static void ShutdownPlatform();
+
private:
V8();
@@ -4681,6 +4990,7 @@ class V8_EXPORT V8 {
template <class T> friend class Handle;
template <class T> friend class Local;
template <class T> friend class Eternal;
+ template <class T> friend class PersistentBase;
template <class T, class M> friend class Persistent;
friend class Context;
};
@@ -4840,20 +5150,16 @@ class V8_EXPORT ExtensionConfiguration {
class V8_EXPORT Context {
public:
/**
- * Returns the global proxy object or global object itself for
- * detached contexts.
+ * Returns the global proxy object.
*
- * Global proxy object is a thin wrapper whose prototype points to
- * actual context's global object with the properties like Object, etc.
- * This is done that way for security reasons (for more details see
+ * Global proxy object is a thin wrapper whose prototype points to actual
+ * context's global object with the properties like Object, etc. This is done
+ * that way for security reasons (for more details see
* https://wiki.mozilla.org/Gecko:SplitWindow).
*
* Please note that changes to global proxy object prototype most probably
- * would break VM---v8 expects only global object as a prototype of
- * global proxy object.
- *
- * If DetachGlobal() has been invoked, Global() would return actual global
- * object until global is reattached with ReattachGlobal().
+ * would break VM---v8 expects only global object as a prototype of global
+ * proxy object.
*/
Local<Object> Global();
@@ -4864,18 +5170,6 @@ class V8_EXPORT Context {
void DetachGlobal();
/**
- * Reattaches a global object to a context. This can be used to
- * restore the connection between a global object and a context
- * after DetachGlobal has been called.
- *
- * \param global_object The global object to reattach to the
- * context. For this to work, the global object must be the global
- * object that was associated with this context before a call to
- * DetachGlobal.
- */
- void ReattachGlobal(Handle<Object> global_object);
-
- /**
* Creates a new context and returns a handle to the newly allocated
* context.
*
@@ -4899,25 +5193,14 @@ class V8_EXPORT Context {
Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
Handle<Value> global_object = Handle<Value>());
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(static Persistent<Context> New(
- ExtensionConfiguration* extensions = NULL,
- Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
- Handle<Value> global_object = Handle<Value>()));
+ V8_DEPRECATED("Use Isolate::GetEnteredContext instead",
+ static Local<Context> GetEntered());
- /** Returns the last entered context. */
- static Local<Context> GetEntered();
+ V8_DEPRECATED("Use Isolate::GetCurrentContext instead",
+ static Local<Context> GetCurrent());
- // TODO(svenpanne) Actually deprecate this.
- /** Deprecated. Use Isolate::GetCurrentContext instead. */
- static Local<Context> GetCurrent();
-
- /**
- * Returns the context of the calling JavaScript code. That is the
- * context of the top-most JavaScript frame. If there are no
- * JavaScript frames an empty handle is returned.
- */
- static Local<Context> GetCalling();
+ V8_DEPRECATED("Use Isolate::GetCallingContext instead",
+ static Local<Context> GetCalling());
/**
* Sets the security token for the context. To access an object in
@@ -4948,8 +5231,8 @@ class V8_EXPORT Context {
/** Returns true if the context has experienced an out of memory situation. */
bool HasOutOfMemoryException();
- /** Returns true if V8 has a current context. */
- static bool InContext();
+ V8_DEPRECATED("Use Isolate::InContext instead",
+ static bool InContext());
/** Returns an isolate associated with a current context. */
v8::Isolate* GetIsolate();
@@ -5020,8 +5303,9 @@ class V8_EXPORT Context {
explicit V8_INLINE Scope(Handle<Context> context) : context_(context) {
context_->Enter();
}
- // TODO(dcarney): deprecate
- V8_INLINE Scope(Isolate* isolate, Persistent<Context>& context) // NOLINT
+ V8_DEPRECATED(
+ "Use Handle version instead",
+ V8_INLINE Scope(Isolate* isolate, Persistent<Context>& context)) // NOLINT
: context_(Handle<Context>::New(isolate, context)) {
context_->Enter();
}
@@ -5125,9 +5409,6 @@ class V8_EXPORT Unlocker {
*/
V8_INLINE explicit Unlocker(Isolate* isolate) { Initialize(isolate); }
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(Unlocker());
-
~Unlocker();
private:
void Initialize(Isolate* isolate);
@@ -5143,26 +5424,9 @@ class V8_EXPORT Locker {
*/
V8_INLINE explicit Locker(Isolate* isolate) { Initialize(isolate); }
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(Locker());
-
~Locker();
/**
- * Start preemption.
- *
- * When preemption is started, a timer is fired every n milliseconds
- * that will switch between multiple threads that are in contention
- * for the V8 lock.
- */
- static void StartPreemption(int every_n_ms);
-
- /**
- * Stop preemption.
- */
- static void StopPreemption();
-
- /**
* Returns whether or not the locker for a given isolate, is locked by the
* current thread.
*/
@@ -5353,13 +5617,13 @@ class Internals {
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kExternalAsciiRepresentationTag = 0x06;
- static const int kIsolateEmbedderDataOffset = 1 * kApiPointerSize;
- static const int kIsolateRootsOffset = 3 * kApiPointerSize;
+ static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
+ static const int kIsolateRootsOffset = 5 * kApiPointerSize;
static const int kUndefinedValueRootIndex = 5;
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 131;
+ static const int kEmptyStringRootIndex = 134;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@@ -5370,7 +5634,7 @@ class Internals {
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
- static const int kJSObjectType = 0xb1;
+ static const int kJSObjectType = 0xb2;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@@ -5378,7 +5642,9 @@ class Internals {
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
- static void CheckInitializedImpl(v8::Isolate* isolate);
+ static const uint32_t kNumIsolateDataSlots = 4;
+
+ V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
#ifdef V8_ENABLE_CHECKS
CheckInitializedImpl(isolate);
@@ -5441,15 +5707,17 @@ class Internals {
*addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
}
- V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, void* data) {
- uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
- kIsolateEmbedderDataOffset;
+ V8_INLINE static void SetEmbedderData(v8::Isolate *isolate,
+ uint32_t slot,
+ void *data) {
+ uint8_t *addr = reinterpret_cast<uint8_t *>(isolate) +
+ kIsolateEmbedderDataOffset + slot * kApiPointerSize;
*reinterpret_cast<void**>(addr) = data;
}
- V8_INLINE static void* GetEmbedderData(v8::Isolate* isolate) {
+ V8_INLINE static void* GetEmbedderData(v8::Isolate* isolate, uint32_t slot) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
- kIsolateEmbedderDataOffset;
+ kIsolateEmbedderDataOffset + slot * kApiPointerSize;
return *reinterpret_cast<void**>(addr);
}
@@ -5494,26 +5762,12 @@ Local<T>::Local() : Handle<T>() { }
template <class T>
-Local<T> Local<T>::New(Handle<T> that) {
- if (that.IsEmpty()) return Local<T>();
- T* that_ptr = *that;
- internal::Object** p = reinterpret_cast<internal::Object**>(that_ptr);
- if (internal::Internals::CanCastToHeapObject(that_ptr)) {
- return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
- reinterpret_cast<internal::HeapObject*>(*p))));
- }
- return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(*p)));
-}
-
-
-template <class T>
Local<T> Local<T>::New(Isolate* isolate, Handle<T> that) {
return New(isolate, that.val_);
}
template <class T>
-template <class M>
-Local<T> Local<T>::New(Isolate* isolate, const Persistent<T, M>& that) {
+Local<T> Local<T>::New(Isolate* isolate, const PersistentBase<T>& that) {
return New(isolate, that.val_);
}
@@ -5551,8 +5805,8 @@ Local<T> Eternal<T>::Get(Isolate* isolate) {
}
-template <class T, class M>
-T* Persistent<T, M>::New(Isolate* isolate, T* that) {
+template <class T>
+T* PersistentBase<T>::New(Isolate* isolate, T* that) {
if (that == NULL) return NULL;
internal::Object** p = reinterpret_cast<internal::Object**>(that);
return reinterpret_cast<T*>(
@@ -5565,7 +5819,7 @@ template <class T, class M>
template <class S, class M2>
void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
TYPE_CHECK(T, S);
- Reset();
+ this->Reset();
if (that.IsEmpty()) return;
internal::Object** p = reinterpret_cast<internal::Object**>(that.val_);
this->val_ = reinterpret_cast<T*>(V8::CopyPersistent(p));
@@ -5573,8 +5827,8 @@ void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
}
-template <class T, class M>
-bool Persistent<T, M>::IsIndependent() const {
+template <class T>
+bool PersistentBase<T>::IsIndependent() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
return I::GetNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
@@ -5582,8 +5836,8 @@ bool Persistent<T, M>::IsIndependent() const {
}
-template <class T, class M>
-bool Persistent<T, M>::IsNearDeath() const {
+template <class T>
+bool PersistentBase<T>::IsNearDeath() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
uint8_t node_state =
@@ -5593,8 +5847,8 @@ bool Persistent<T, M>::IsNearDeath() const {
}
-template <class T, class M>
-bool Persistent<T, M>::IsWeak() const {
+template <class T>
+bool PersistentBase<T>::IsWeak() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
return I::GetNodeState(reinterpret_cast<internal::Object**>(this->val_)) ==
@@ -5602,17 +5856,17 @@ bool Persistent<T, M>::IsWeak() const {
}
-template <class T, class M>
-void Persistent<T, M>::Reset() {
+template <class T>
+void PersistentBase<T>::Reset() {
if (this->IsEmpty()) return;
V8::DisposeGlobal(reinterpret_cast<internal::Object**>(this->val_));
val_ = 0;
}
-template <class T, class M>
+template <class T>
template <class S>
-void Persistent<T, M>::Reset(Isolate* isolate, const Handle<S>& other) {
+void PersistentBase<T>::Reset(Isolate* isolate, const Handle<S>& other) {
TYPE_CHECK(T, S);
Reset();
if (other.IsEmpty()) return;
@@ -5620,10 +5874,10 @@ void Persistent<T, M>::Reset(Isolate* isolate, const Handle<S>& other) {
}
-template <class T, class M>
-template <class S, class M2>
-void Persistent<T, M>::Reset(Isolate* isolate,
- const Persistent<S, M2>& other) {
+template <class T>
+template <class S>
+void PersistentBase<T>::Reset(Isolate* isolate,
+ const PersistentBase<S>& other) {
TYPE_CHECK(T, S);
Reset();
if (other.IsEmpty()) return;
@@ -5631,9 +5885,9 @@ void Persistent<T, M>::Reset(Isolate* isolate,
}
-template <class T, class M>
+template <class T>
template <typename S, typename P>
-void Persistent<T, M>::SetWeak(
+void PersistentBase<T>::SetWeak(
P* parameter,
typename WeakCallbackData<S, P>::Callback callback) {
TYPE_CHECK(S, T);
@@ -5645,9 +5899,9 @@ void Persistent<T, M>::SetWeak(
}
-template <class T, class M>
+template <class T>
template <typename P>
-void Persistent<T, M>::SetWeak(
+void PersistentBase<T>::SetWeak(
P* parameter,
typename WeakCallbackData<T, P>::Callback callback) {
SetWeak<T, P>(parameter, callback);
@@ -5677,14 +5931,14 @@ void Persistent<T, M>::MakeWeak(
}
-template <class T, class M>
-void Persistent<T, M>::ClearWeak() {
+template <class T>
+void PersistentBase<T>::ClearWeak() {
V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_));
}
-template <class T, class M>
-void Persistent<T, M>::MarkIndependent() {
+template <class T>
+void PersistentBase<T>::MarkIndependent() {
typedef internal::Internals I;
if (this->IsEmpty()) return;
I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
@@ -5693,8 +5947,8 @@ void Persistent<T, M>::MarkIndependent() {
}
-template <class T, class M>
-void Persistent<T, M>::MarkPartiallyDependent() {
+template <class T>
+void PersistentBase<T>::MarkPartiallyDependent() {
typedef internal::Internals I;
if (this->IsEmpty()) return;
I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
@@ -5706,14 +5960,14 @@ void Persistent<T, M>::MarkPartiallyDependent() {
template <class T, class M>
T* Persistent<T, M>::ClearAndLeak() {
T* old;
- old = val_;
- val_ = NULL;
+ old = this->val_;
+ this->val_ = NULL;
return old;
}
-template <class T, class M>
-void Persistent<T, M>::SetWrapperClassId(uint16_t class_id) {
+template <class T>
+void PersistentBase<T>::SetWrapperClassId(uint16_t class_id) {
typedef internal::Internals I;
if (this->IsEmpty()) return;
internal::Object** obj = reinterpret_cast<internal::Object**>(this->val_);
@@ -5722,8 +5976,8 @@ void Persistent<T, M>::SetWrapperClassId(uint16_t class_id) {
}
-template <class T, class M>
-uint16_t Persistent<T, M>::WrapperClassId() const {
+template <class T>
+uint16_t PersistentBase<T>::WrapperClassId() const {
typedef internal::Internals I;
if (this->IsEmpty()) return 0;
internal::Object** obj = reinterpret_cast<internal::Object**>(this->val_);
@@ -5777,7 +6031,6 @@ void ReturnValue<T>::Set(int32_t i) {
template<typename T>
void ReturnValue<T>::Set(uint32_t i) {
TYPE_CHECK(T, Integer);
- typedef internal::Internals I;
// Can't simply use INT32_MAX here for whatever reason.
bool fits_into_int32_t = (i & (1U << 31)) == 0;
if (V8_LIKELY(fits_into_int32_t)) {
@@ -5847,7 +6100,7 @@ FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Object** implicit_args,
template<typename T>
Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
- if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
+ if (i < 0 || length_ <= i) return Local<Value>(*Undefined(GetIsolate()));
return Local<Value>(reinterpret_cast<Value*>(values_ - i));
}
@@ -5928,19 +6181,30 @@ Handle<Boolean> ScriptOrigin::ResourceIsSharedCrossOrigin() const {
}
+Handle<Boolean> Boolean::New(Isolate* isolate, bool value) {
+ return value ? True(isolate) : False(isolate);
+}
+
+
Handle<Boolean> Boolean::New(bool value) {
- return value ? True() : False();
+ return Boolean::New(Isolate::GetCurrent(), value);
+}
+
+
+void Template::Set(Isolate* isolate, const char* name, v8::Handle<Data> value) {
+ Set(v8::String::NewFromUtf8(isolate, name), value);
}
void Template::Set(const char* name, v8::Handle<Data> value) {
- Set(v8::String::New(name), value);
+ Set(Isolate::GetCurrent(), name, value);
}
Local<Value> Object::GetInternalField(int index) {
#ifndef V8_ENABLE_CHECKS
typedef internal::Object O;
+ typedef internal::HeapObject HO;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
@@ -5948,7 +6212,7 @@ Local<Value> Object::GetInternalField(int index) {
if (I::GetInstanceType(obj) == I::kJSObjectType) {
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
O* value = I::ReadField<O*>(obj, offset);
- O** result = HandleScope::CreateHandle(value);
+ O** result = HandleScope::CreateHandle(reinterpret_cast<HO*>(obj), value);
return Local<Value>(reinterpret_cast<Value*>(result));
}
#endif
@@ -6387,21 +6651,69 @@ Handle<Boolean> False(Isolate* isolate) {
void Isolate::SetData(void* data) {
typedef internal::Internals I;
- I::SetEmbedderData(this, data);
+ I::SetEmbedderData(this, 0, data);
}
void* Isolate::GetData() {
typedef internal::Internals I;
- return I::GetEmbedderData(this);
+ return I::GetEmbedderData(this, 0);
+}
+
+
+void Isolate::SetData(uint32_t slot, void* data) {
+ typedef internal::Internals I;
+ I::SetEmbedderData(this, slot, data);
+}
+
+
+void* Isolate::GetData(uint32_t slot) {
+ typedef internal::Internals I;
+ return I::GetEmbedderData(this, slot);
+}
+
+
+uint32_t Isolate::GetNumberOfDataSlots() {
+ typedef internal::Internals I;
+ return I::kNumIsolateDataSlots;
+}
+
+
+template<typename T>
+void Isolate::SetObjectGroupId(const Persistent<T>& object,
+ UniqueId id) {
+ TYPE_CHECK(Value, T);
+ SetObjectGroupId(reinterpret_cast<v8::internal::Object**>(object.val_), id);
+}
+
+
+template<typename T>
+void Isolate::SetReferenceFromGroup(UniqueId id,
+ const Persistent<T>& object) {
+ TYPE_CHECK(Value, T);
+ SetReferenceFromGroup(id,
+ reinterpret_cast<v8::internal::Object**>(object.val_));
+}
+
+
+template<typename T, typename S>
+void Isolate::SetReference(const Persistent<T>& parent,
+ const Persistent<S>& child) {
+ TYPE_CHECK(Object, T);
+ TYPE_CHECK(Value, S);
+ SetReference(reinterpret_cast<v8::internal::Object**>(parent.val_),
+ reinterpret_cast<v8::internal::Object**>(child.val_));
}
Local<Value> Context::GetEmbedderData(int index) {
#ifndef V8_ENABLE_CHECKS
typedef internal::Object O;
+ typedef internal::HeapObject HO;
typedef internal::Internals I;
- O** result = HandleScope::CreateHandle(I::ReadEmbedderData<O*>(this, index));
+ HO* context = *reinterpret_cast<HO**>(this);
+ O** result =
+ HandleScope::CreateHandle(context, I::ReadEmbedderData<O*>(this, index));
return Local<Value>(reinterpret_cast<Value*>(result));
#else
return SlowGetEmbedderData(index);
diff --git a/chromium/v8/include/v8config.h b/chromium/v8/include/v8config.h
index 6fe5c5aabc7..631ad0d0479 100644
--- a/chromium/v8/include/v8config.h
+++ b/chromium/v8/include/v8config.h
@@ -187,6 +187,7 @@
// supported
// V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported
// V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported
+// V8_HAS_ATTRIBUTE_UNUSED - __attribute__((unused)) supported
// V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported
// V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
// supported
@@ -216,6 +217,7 @@
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
+# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(__has_attribute(warn_unused_result))
@@ -245,7 +247,9 @@
// older compilers.
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (V8_GNUC_PREREQ(4, 4, 0))
# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
+# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE (V8_GNUC_PREREQ(4, 5, 0))
# define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
+# define V8_HAS_ATTRIBUTE_UNUSED (V8_GNUC_PREREQ(2, 95, 0))
# define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0))
@@ -320,12 +324,24 @@
// A macro to mark classes or functions as deprecated.
-#if !V8_DISABLE_DEPRECATIONS && V8_HAS_ATTRIBUTE_DEPRECATED
-# define V8_DEPRECATED(declarator) declarator __attribute__((deprecated))
-#elif !V8_DISABLE_DEPRECATIONS && V8_HAS_DECLSPEC_DEPRECATED
-# define V8_DEPRECATED(declarator) __declspec(deprecated) declarator
+#if defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
+# define V8_DEPRECATED(message, declarator) \
+declarator __attribute__((deprecated(message)))
+#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED
+# define V8_DEPRECATED(message, declarator) \
+declarator __attribute__((deprecated))
+#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED
+# define V8_DEPRECATED(message, declarator) __declspec(deprecated) declarator
#else
-# define V8_DEPRECATED(declarator) declarator
+# define V8_DEPRECATED(message, declarator) declarator
+#endif
+
+
+// A macro to mark variables or types as unused, avoiding compiler warnings.
+#if V8_HAS_ATTRIBUTE_UNUSED
+# define V8_UNUSED __attribute__((unused))
+#else
+# define V8_UNUSED
#endif
diff --git a/chromium/v8/preparser/preparser-process.cc b/chromium/v8/preparser/preparser-process.cc
deleted file mode 100644
index b8167443039..00000000000
--- a/chromium/v8/preparser/preparser-process.cc
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <string.h>
-
-#include "../include/v8.h"
-#include "../include/v8stdint.h"
-#include "../include/v8-preparser.h"
-
-#include "../src/preparse-data-format.h"
-
-namespace i = v8::internal;
-
-// This file is only used for testing the preparser.
-// The first argument must be the path of a JavaScript source file, or
-// the flags "-e" and the next argument is then the source of a JavaScript
-// program.
-// Optionally this can be followed by the word "throws" (case sensitive),
-// which signals that the parsing is expected to throw - the default is
-// to expect the parsing to not throw.
-// The command line can further be followed by a message text (the
-// *type* of the exception to throw), and even more optionally, the
-// start and end position reported with the exception.
-//
-// This source file is preparsed and tested against the expectations, and if
-// successful, the resulting preparser data is written to stdout.
-// Diagnostic output is output on stderr.
-// The source file must contain only ASCII characters (UTF-8 isn't supported).
-// The file is read into memory, so it should have a reasonable size.
-
-
-// Adapts an ASCII string to the UnicodeInputStream interface.
-class AsciiInputStream : public v8::UnicodeInputStream {
- public:
- AsciiInputStream(const uint8_t* buffer, size_t length)
- : buffer_(buffer),
- end_offset_(static_cast<int>(length)),
- offset_(0) { }
-
- virtual ~AsciiInputStream() { }
-
- virtual void PushBack(int32_t ch) {
- offset_--;
-#ifdef DEBUG
- if (offset_ < 0 ||
- (ch != ((offset_ >= end_offset_) ? -1 : buffer_[offset_]))) {
- fprintf(stderr, "Invalid pushback: '%c' at offset %d.", ch, offset_);
- exit(1);
- }
-#endif
- }
-
- virtual int32_t Next() {
- if (offset_ >= end_offset_) {
- offset_++; // Increment anyway to allow symmetric pushbacks.
- return -1;
- }
- uint8_t next_char = buffer_[offset_];
-#ifdef DEBUG
- if (next_char > 0x7fu) {
- fprintf(stderr, "Non-ASCII character in input: '%c'.", next_char);
- exit(1);
- }
-#endif
- offset_++;
- return static_cast<int32_t>(next_char);
- }
-
- private:
- const uint8_t* buffer_;
- const int end_offset_;
- int offset_;
-};
-
-
-bool ReadBuffer(FILE* source, void* buffer, size_t length) {
- size_t actually_read = fread(buffer, 1, length, source);
- return (actually_read == length);
-}
-
-
-bool WriteBuffer(FILE* dest, const void* buffer, size_t length) {
- size_t actually_written = fwrite(buffer, 1, length, dest);
- return (actually_written == length);
-}
-
-
-class PreparseDataInterpreter {
- public:
- PreparseDataInterpreter(const uint8_t* data, int length)
- : data_(data), length_(length), message_(NULL) { }
-
- ~PreparseDataInterpreter() {
- if (message_ != NULL) delete[] message_;
- }
-
- bool valid() {
- int header_length =
- i::PreparseDataConstants::kHeaderSize * sizeof(int); // NOLINT
- return length_ >= header_length;
- }
-
- bool throws() {
- return valid() &&
- word(i::PreparseDataConstants::kHasErrorOffset) != 0;
- }
-
- const char* message() {
- if (message_ != NULL) return message_;
- if (!throws()) return NULL;
- int text_pos = i::PreparseDataConstants::kHeaderSize +
- i::PreparseDataConstants::kMessageTextPos;
- int length = word(text_pos);
- char* buffer = new char[length + 1];
- for (int i = 1; i <= length; i++) {
- int character = word(text_pos + i);
- buffer[i - 1] = character;
- }
- buffer[length] = '\0';
- message_ = buffer;
- return buffer;
- }
-
- int beg_pos() {
- if (!throws()) return -1;
- return word(i::PreparseDataConstants::kHeaderSize +
- i::PreparseDataConstants::kMessageStartPos);
- }
-
- int end_pos() {
- if (!throws()) return -1;
- return word(i::PreparseDataConstants::kHeaderSize +
- i::PreparseDataConstants::kMessageEndPos);
- }
-
- private:
- int word(int offset) {
- const int* word_data = reinterpret_cast<const int*>(data_);
- if (word_data + offset < reinterpret_cast<const int*>(data_ + length_)) {
- return word_data[offset];
- }
- return -1;
- }
-
- const uint8_t* const data_;
- const int length_;
- const char* message_;
-};
-
-
-template <typename T>
-class ScopedPointer {
- public:
- explicit ScopedPointer() : pointer_(NULL) {}
- explicit ScopedPointer(T* pointer) : pointer_(pointer) {}
- ~ScopedPointer() { if (pointer_ != NULL) delete[] pointer_; }
- T& operator[](int index) { return pointer_[index]; }
- T* operator*() { return pointer_ ;}
- T* operator=(T* new_value) {
- if (pointer_ != NULL) delete[] pointer_;
- pointer_ = new_value;
- return new_value;
- }
- private:
- T* pointer_;
-};
-
-
-
-void fail(v8::PreParserData* data, const char* message, ...) {
- va_list args;
- va_start(args, message);
- vfprintf(stderr, message, args);
- va_end(args);
- fflush(stderr);
- if (data != NULL) {
- // Print preparser data to stdout.
- uint32_t size = static_cast<uint32_t>(data->size());
- fprintf(stderr, "LOG: data size: %u\n", size);
- if (!WriteBuffer(stdout, data->data(), size)) {
- perror("ERROR: Writing data");
- fflush(stderr);
- }
- }
- exit(EXIT_FAILURE);
-}
-
-
-bool IsFlag(const char* arg) {
- // Anything starting with '-' is considered a flag.
- // It's summarily ignored for now.
- return arg[0] == '-';
-}
-
-
-struct ExceptionExpectation {
- ExceptionExpectation()
- : throws(false), type(NULL), beg_pos(-1), end_pos(-1) { }
- bool throws;
- const char* type;
- int beg_pos;
- int end_pos;
-};
-
-
-void CheckException(v8::PreParserData* data,
- ExceptionExpectation* expects) {
- PreparseDataInterpreter reader(data->data(), static_cast<int>(data->size()));
- if (expects->throws) {
- if (!reader.throws()) {
- if (expects->type == NULL) {
- fail(data, "Didn't throw as expected\n");
- } else {
- fail(data, "Didn't throw \"%s\" as expected\n", expects->type);
- }
- }
- if (expects->type != NULL) {
- const char* actual_message = reader.message();
- if (strcmp(expects->type, actual_message)) {
- fail(data, "Wrong error message. Expected <%s>, found <%s> at %d..%d\n",
- expects->type, actual_message, reader.beg_pos(), reader.end_pos());
- }
- }
- if (expects->beg_pos >= 0) {
- if (expects->beg_pos != reader.beg_pos()) {
- fail(data, "Wrong error start position: Expected %i, found %i\n",
- expects->beg_pos, reader.beg_pos());
- }
- }
- if (expects->end_pos >= 0) {
- if (expects->end_pos != reader.end_pos()) {
- fail(data, "Wrong error end position: Expected %i, found %i\n",
- expects->end_pos, reader.end_pos());
- }
- }
- } else if (reader.throws()) {
- const char* message = reader.message();
- fail(data, "Throws unexpectedly with message: %s at location %d-%d\n",
- message, reader.beg_pos(), reader.end_pos());
- }
-}
-
-
-ExceptionExpectation ParseExpectation(int argc, const char* argv[]) {
- // Parse ["throws" [<exn-type> [<start> [<end>]]]].
- ExceptionExpectation expects;
- int arg_index = 0;
- while (argc > arg_index && strncmp("throws", argv[arg_index], 7)) {
- arg_index++;
- }
- if (argc > arg_index) {
- expects.throws = true;
- arg_index++;
- if (argc > arg_index && !IsFlag(argv[arg_index])) {
- expects.type = argv[arg_index];
- arg_index++;
- if (argc > arg_index && !IsFlag(argv[arg_index])) {
- expects.beg_pos = atoi(argv[arg_index]); // NOLINT
- arg_index++;
- if (argc > arg_index && !IsFlag(argv[arg_index])) {
- expects.end_pos = atoi(argv[arg_index]); // NOLINT
- }
- }
- }
- }
- return expects;
-}
-
-
-int main(int argc, const char* argv[]) {
- // Parse command line.
- // Format: preparser (<scriptfile> | -e "<source>")
- // ["throws" [<exn-type> [<start> [<end>]]]]
- // Any flags (except an initial -e) are ignored.
- // Flags must not separate "throws" and its arguments.
-
- // Check for mandatory filename argument.
- int arg_index = 1;
- if (argc <= arg_index) {
- fail(NULL, "ERROR: No filename on command line.\n");
- }
- const uint8_t* source = NULL;
- const char* filename = argv[arg_index];
- if (!strcmp(filename, "-e")) {
- arg_index++;
- if (argc <= arg_index) {
- fail(NULL, "ERROR: No source after -e on command line.\n");
- }
- source = reinterpret_cast<const uint8_t*>(argv[arg_index]);
- }
- // Check remainder of command line for exception expectations.
- arg_index++;
- ExceptionExpectation expects =
- ParseExpectation(argc - arg_index, argv + arg_index);
-
- v8::V8::Initialize();
-
- ScopedPointer<uint8_t> buffer;
- size_t length;
-
- if (source == NULL) {
- // Open JS file.
- FILE* input = fopen(filename, "rb");
- if (input == NULL) {
- perror("ERROR: Error opening file");
- fflush(stderr);
- return EXIT_FAILURE;
- }
- // Find length of JS file.
- if (fseek(input, 0, SEEK_END) != 0) {
- perror("ERROR: Error during seek");
- fflush(stderr);
- return EXIT_FAILURE;
- }
- length = static_cast<size_t>(ftell(input));
- rewind(input);
- // Read JS file into memory buffer.
- buffer = new uint8_t[length];
- if (!ReadBuffer(input, *buffer, length)) {
- perror("ERROR: Reading file");
- fflush(stderr);
- return EXIT_FAILURE;
- }
- fclose(input);
- source = *buffer;
- } else {
- length = strlen(reinterpret_cast<const char*>(source));
- }
-
- // Preparse input file.
- AsciiInputStream input_buffer(source, length);
- size_t kMaxStackSize = 64 * 1024 * sizeof(void*); // NOLINT
- v8::PreParserData data = v8::Preparse(&input_buffer, kMaxStackSize);
-
- // Fail if stack overflow.
- if (data.stack_overflow()) {
- fail(&data, "ERROR: Stack overflow\n");
- }
-
- // Check that the expected exception is thrown, if an exception is
- // expected.
- CheckException(&data, &expects);
-
- return EXIT_SUCCESS;
-}
diff --git a/chromium/v8/samples/lineprocessor.cc b/chromium/v8/samples/lineprocessor.cc
index 42048202fdd..925e148a0cc 100644
--- a/chromium/v8/samples/lineprocessor.cc
+++ b/chromium/v8/samples/lineprocessor.cc
@@ -99,7 +99,7 @@ enum MainCycleType {
const char* ToCString(const v8::String::Utf8Value& value);
void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
-v8::Handle<v8::String> ReadFile(const char* name);
+v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name);
v8::Handle<v8::String> ReadLine();
void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -174,14 +174,14 @@ int RunMain(int argc, char* argv[]) {
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
- script_source = v8::String::New(argv[i + 1]);
- script_name = v8::String::New("unnamed");
+ script_source = v8::String::NewFromUtf8(isolate, argv[i + 1]);
+ script_name = v8::String::NewFromUtf8(isolate, "unnamed");
i++;
script_param_counter++;
} else {
// Use argument as a name of file to load.
- script_source = ReadFile(str);
- script_name = v8::String::New(str);
+ script_source = ReadFile(isolate, str);
+ script_name = v8::String::NewFromUtf8(isolate, str);
if (script_source.IsEmpty()) {
printf("Error reading '%s'\n", str);
return 1;
@@ -203,11 +203,12 @@ int RunMain(int argc, char* argv[]) {
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
// Bind the global 'print' function to the C++ Print callback.
- global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
+ global->Set(v8::String::NewFromUtf8(isolate, "print"),
+ v8::FunctionTemplate::New(Print));
if (cycle_type == CycleInJs) {
// Bind the global 'read_line' function to the C++ Print callback.
- global->Set(v8::String::New("read_line"),
+ global->Set(v8::String::NewFromUtf8(isolate, "read_line"),
v8::FunctionTemplate::New(ReadLine));
}
@@ -259,7 +260,7 @@ int RunMain(int argc, char* argv[]) {
if (cycle_type == CycleInCpp) {
bool res = RunCppCycle(script,
- v8::Context::GetCurrent(),
+ isolate->GetCurrentContext(),
report_exceptions);
return !res;
} else {
@@ -277,7 +278,8 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Locker lock(isolate);
#endif // ENABLE_DEBUGGER_SUPPORT
- v8::Handle<v8::String> fun_name = v8::String::New("ProcessLine");
+ v8::Handle<v8::String> fun_name =
+ v8::String::NewFromUtf8(isolate, "ProcessLine");
v8::Handle<v8::Value> process_val = context->Global()->Get(fun_name);
// If there is no Process function, or if it is not a function,
@@ -296,7 +298,7 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::String> input_line = ReadLine();
- if (input_line == v8::Undefined()) {
+ if (input_line == v8::Undefined(isolate)) {
continue;
}
@@ -306,7 +308,7 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Handle<v8::Value> result;
{
v8::TryCatch try_catch;
- result = process_fun->Call(v8::Context::GetCurrent()->Global(),
+ result = process_fun->Call(isolate->GetCurrentContext()->Global(),
argc, argv);
if (try_catch.HasCaught()) {
if (report_exceptions)
@@ -338,7 +340,7 @@ const char* ToCString(const v8::String::Utf8Value& value) {
// Reads a file into a v8 string.
-v8::Handle<v8::String> ReadFile(const char* name) {
+v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
FILE* file = fopen(name, "rb");
if (file == NULL) return v8::Handle<v8::String>();
@@ -353,7 +355,8 @@ v8::Handle<v8::String> ReadFile(const char* name) {
i += read;
}
fclose(file);
- v8::Handle<v8::String> result = v8::String::New(chars, size);
+ v8::Handle<v8::String> result =
+ v8::String::NewFromUtf8(isolate, chars, v8::String::kNormalString, size);
delete[] chars;
return result;
}
@@ -417,7 +420,8 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
// function is called. Reads a string from standard input and returns.
void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() > 0) {
- v8::ThrowException(v8::String::New("Unexpected arguments"));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(args.GetIsolate(), "Unexpected arguments"));
return;
}
args.GetReturnValue().Set(ReadLine());
@@ -435,8 +439,9 @@ v8::Handle<v8::String> ReadLine() {
#endif // ENABLE_DEBUGGER_SUPPORT
res = fgets(buffer, kBufferSize, stdin);
}
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
if (res == NULL) {
- v8::Handle<v8::Primitive> t = v8::Undefined();
+ v8::Handle<v8::Primitive> t = v8::Undefined(isolate);
return v8::Handle<v8::String>::Cast(t);
}
// Remove newline char
@@ -446,5 +451,5 @@ v8::Handle<v8::String> ReadLine() {
break;
}
}
- return v8::String::New(buffer);
+ return v8::String::NewFromUtf8(isolate, buffer);
}
diff --git a/chromium/v8/samples/process.cc b/chromium/v8/samples/process.cc
index e6f2ee3addd..b18a3ff875b 100644
--- a/chromium/v8/samples/process.cc
+++ b/chromium/v8/samples/process.cc
@@ -161,7 +161,8 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// Create a template for the global object where we set the
// built-in global functions.
Handle<ObjectTemplate> global = ObjectTemplate::New();
- global->Set(String::New("log"), FunctionTemplate::New(LogCallback));
+ global->Set(String::NewFromUtf8(GetIsolate(), "log"),
+ FunctionTemplate::New(LogCallback));
// Each processor gets its own context so different processors don't
// affect each other. Context::New returns a persistent handle which
@@ -185,7 +186,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// The script compiled and ran correctly. Now we fetch out the
// Process function from the global object.
- Handle<String> process_name = String::New("Process");
+ Handle<String> process_name = String::NewFromUtf8(GetIsolate(), "Process");
Handle<Value> process_val = context->Global()->Get(process_name);
// If there is no Process function, or if it is not a function,
@@ -244,10 +245,12 @@ bool JsHttpRequestProcessor::InstallMaps(map<string, string>* opts,
v8::Local<v8::Context>::New(GetIsolate(), context_);
// Set the options object as a property on the global object.
- context->Global()->Set(String::New("options"), opts_obj);
+ context->Global()->Set(String::NewFromUtf8(GetIsolate(), "options"),
+ opts_obj);
Handle<Object> output_obj = WrapMap(output);
- context->Global()->Set(String::New("output"), output_obj);
+ context->Global()->Set(String::NewFromUtf8(GetIsolate(), "output"),
+ output_obj);
return true;
}
@@ -291,8 +294,8 @@ JsHttpRequestProcessor::~JsHttpRequestProcessor() {
// Dispose the persistent handles. When noone else has any
// references to the objects stored in the handles they will be
// automatically reclaimed.
- context_.Dispose();
- process_.Dispose();
+ context_.Reset();
+ process_.Reset();
}
@@ -324,7 +327,7 @@ Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// Wrap the raw C++ pointer in an External so it can be referenced
// from within JavaScript.
- Handle<External> map_ptr = External::New(obj);
+ Handle<External> map_ptr = External::New(GetIsolate(), obj);
// Store the map pointer in the JavaScript wrapper.
result->SetInternalField(0, map_ptr);
@@ -370,8 +373,9 @@ void JsHttpRequestProcessor::MapGet(Local<String> name,
// Otherwise fetch the value and wrap it in a JavaScript string
const string& value = (*iter).second;
- info.GetReturnValue().Set(
- String::New(value.c_str(), static_cast<int>(value.length())));
+ info.GetReturnValue().Set(String::NewFromUtf8(
+ info.GetIsolate(), value.c_str(), String::kNormalString,
+ static_cast<int>(value.length())));
}
@@ -432,7 +436,7 @@ Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
// Wrap the raw C++ pointer in an External so it can be referenced
// from within JavaScript.
- Handle<External> request_ptr = External::New(request);
+ Handle<External> request_ptr = External::New(GetIsolate(), request);
// Store the request pointer in the JavaScript wrapper.
result->SetInternalField(0, request_ptr);
@@ -465,8 +469,9 @@ void JsHttpRequestProcessor::GetPath(Local<String> name,
const string& path = request->Path();
// Wrap the result in a JavaScript string and return it.
- info.GetReturnValue().Set(
- String::New(path.c_str(), static_cast<int>(path.length())));
+ info.GetReturnValue().Set(String::NewFromUtf8(
+ info.GetIsolate(), path.c_str(), String::kNormalString,
+ static_cast<int>(path.length())));
}
@@ -475,8 +480,9 @@ void JsHttpRequestProcessor::GetReferrer(
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Referrer();
- info.GetReturnValue().Set(
- String::New(path.c_str(), static_cast<int>(path.length())));
+ info.GetReturnValue().Set(String::NewFromUtf8(
+ info.GetIsolate(), path.c_str(), String::kNormalString,
+ static_cast<int>(path.length())));
}
@@ -484,8 +490,9 @@ void JsHttpRequestProcessor::GetHost(Local<String> name,
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Host();
- info.GetReturnValue().Set(
- String::New(path.c_str(), static_cast<int>(path.length())));
+ info.GetReturnValue().Set(String::NewFromUtf8(
+ info.GetIsolate(), path.c_str(), String::kNormalString,
+ static_cast<int>(path.length())));
}
@@ -494,8 +501,9 @@ void JsHttpRequestProcessor::GetUserAgent(
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->UserAgent();
- info.GetReturnValue().Set(
- String::New(path.c_str(), static_cast<int>(path.length())));
+ info.GetReturnValue().Set(String::NewFromUtf8(
+ info.GetIsolate(), path.c_str(), String::kNormalString,
+ static_cast<int>(path.length())));
}
@@ -507,10 +515,18 @@ Handle<ObjectTemplate> JsHttpRequestProcessor::MakeRequestTemplate(
result->SetInternalFieldCount(1);
// Add accessors for each of the fields of the request.
- result->SetAccessor(String::NewSymbol("path"), GetPath);
- result->SetAccessor(String::NewSymbol("referrer"), GetReferrer);
- result->SetAccessor(String::NewSymbol("host"), GetHost);
- result->SetAccessor(String::NewSymbol("userAgent"), GetUserAgent);
+ result->SetAccessor(
+ String::NewFromUtf8(isolate, "path", String::kInternalizedString),
+ GetPath);
+ result->SetAccessor(
+ String::NewFromUtf8(isolate, "referrer", String::kInternalizedString),
+ GetReferrer);
+ result->SetAccessor(
+ String::NewFromUtf8(isolate, "host", String::kInternalizedString),
+ GetHost);
+ result->SetAccessor(
+ String::NewFromUtf8(isolate, "userAgent", String::kInternalizedString),
+ GetUserAgent);
// Again, return the result through the current handle scope.
return handle_scope.Close(result);
@@ -575,7 +591,7 @@ void ParseOptions(int argc,
// Reads a file into a v8 string.
-Handle<String> ReadFile(const string& name) {
+Handle<String> ReadFile(Isolate* isolate, const string& name) {
FILE* file = fopen(name.c_str(), "rb");
if (file == NULL) return Handle<String>();
@@ -590,7 +606,8 @@ Handle<String> ReadFile(const string& name) {
i += read;
}
fclose(file);
- Handle<String> result = String::New(chars, size);
+ Handle<String> result =
+ String::NewFromUtf8(isolate, chars, String::kNormalString, size);
delete[] chars;
return result;
}
@@ -636,7 +653,7 @@ int main(int argc, char* argv[]) {
}
Isolate* isolate = Isolate::GetCurrent();
HandleScope scope(isolate);
- Handle<String> source = ReadFile(file);
+ Handle<String> source = ReadFile(isolate, file);
if (source.IsEmpty()) {
fprintf(stderr, "Error reading '%s'.\n", file.c_str());
return 1;
diff --git a/chromium/v8/samples/samples.gyp b/chromium/v8/samples/samples.gyp
index be7b9ea696c..dfc7410070b 100644
--- a/chromium/v8/samples/samples.gyp
+++ b/chromium/v8/samples/samples.gyp
@@ -28,7 +28,7 @@
{
'variables': {
'v8_code': 1,
- 'v8_enable_i18n_support%': 0,
+ 'v8_enable_i18n_support%': 1,
},
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'target_defaults': {
@@ -42,13 +42,13 @@
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
- '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icudata',
+ '<(icu_gyp_path):icudata',
],
}],
],
diff --git a/chromium/v8/samples/shell.cc b/chromium/v8/samples/shell.cc
index 710547c3419..f65185084b4 100644
--- a/chromium/v8/samples/shell.cc
+++ b/chromium/v8/samples/shell.cc
@@ -58,7 +58,7 @@ void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
void Load(const v8::FunctionCallbackInfo<v8::Value>& args);
void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
-v8::Handle<v8::String> ReadFile(const char* name);
+v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name);
void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
@@ -100,15 +100,20 @@ v8::Handle<v8::Context> CreateShellContext(v8::Isolate* isolate) {
// Create a template for the global object.
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
// Bind the global 'print' function to the C++ Print callback.
- global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
+ global->Set(v8::String::NewFromUtf8(isolate, "print"),
+ v8::FunctionTemplate::New(Print));
// Bind the global 'read' function to the C++ Read callback.
- global->Set(v8::String::New("read"), v8::FunctionTemplate::New(Read));
+ global->Set(v8::String::NewFromUtf8(isolate, "read"),
+ v8::FunctionTemplate::New(Read));
// Bind the global 'load' function to the C++ Load callback.
- global->Set(v8::String::New("load"), v8::FunctionTemplate::New(Load));
+ global->Set(v8::String::NewFromUtf8(isolate, "load"),
+ v8::FunctionTemplate::New(Load));
// Bind the 'quit' function
- global->Set(v8::String::New("quit"), v8::FunctionTemplate::New(Quit));
+ global->Set(v8::String::NewFromUtf8(isolate, "quit"),
+ v8::FunctionTemplate::New(Quit));
// Bind the 'version' function
- global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
+ global->Set(v8::String::NewFromUtf8(isolate, "version"),
+ v8::FunctionTemplate::New(Version));
return v8::Context::New(isolate, NULL, global);
}
@@ -140,17 +145,20 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
// the argument into a JavaScript string.
void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- v8::ThrowException(v8::String::New("Bad parameters"));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(args.GetIsolate(), "Bad parameters"));
return;
}
v8::String::Utf8Value file(args[0]);
if (*file == NULL) {
- v8::ThrowException(v8::String::New("Error loading file"));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
return;
}
- v8::Handle<v8::String> source = ReadFile(*file);
+ v8::Handle<v8::String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
- v8::ThrowException(v8::String::New("Error loading file"));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
return;
}
args.GetReturnValue().Set(source);
@@ -165,20 +173,23 @@ void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope handle_scope(args.GetIsolate());
v8::String::Utf8Value file(args[i]);
if (*file == NULL) {
- v8::ThrowException(v8::String::New("Error loading file"));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
return;
}
- v8::Handle<v8::String> source = ReadFile(*file);
+ v8::Handle<v8::String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
- v8::ThrowException(v8::String::New("Error loading file"));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
return;
}
if (!ExecuteString(args.GetIsolate(),
source,
- v8::String::New(*file),
+ v8::String::NewFromUtf8(args.GetIsolate(), *file),
false,
false)) {
- v8::ThrowException(v8::String::New("Error executing file"));
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(args.GetIsolate(), "Error executing file"));
return;
}
}
@@ -198,12 +209,13 @@ void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
- args.GetReturnValue().Set(v8::String::New(v8::V8::GetVersion()));
+ args.GetReturnValue().Set(
+ v8::String::NewFromUtf8(args.GetIsolate(), v8::V8::GetVersion()));
}
// Reads a file into a v8 string.
-v8::Handle<v8::String> ReadFile(const char* name) {
+v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
FILE* file = fopen(name, "rb");
if (file == NULL) return v8::Handle<v8::String>();
@@ -218,7 +230,8 @@ v8::Handle<v8::String> ReadFile(const char* name) {
i += read;
}
fclose(file);
- v8::Handle<v8::String> result = v8::String::New(chars, size);
+ v8::Handle<v8::String> result =
+ v8::String::NewFromUtf8(isolate, chars, v8::String::kNormalString, size);
delete[] chars;
return result;
}
@@ -239,13 +252,15 @@ int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
"Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
// Execute argument given to -e option directly.
- v8::Handle<v8::String> file_name = v8::String::New("unnamed");
- v8::Handle<v8::String> source = v8::String::New(argv[++i]);
+ v8::Handle<v8::String> file_name =
+ v8::String::NewFromUtf8(isolate, "unnamed");
+ v8::Handle<v8::String> source =
+ v8::String::NewFromUtf8(isolate, argv[++i]);
if (!ExecuteString(isolate, source, file_name, false, true)) return 1;
} else {
// Use all other arguments as names of files to load and run.
- v8::Handle<v8::String> file_name = v8::String::New(str);
- v8::Handle<v8::String> source = ReadFile(str);
+ v8::Handle<v8::String> file_name = v8::String::NewFromUtf8(isolate, str);
+ v8::Handle<v8::String> source = ReadFile(isolate, str);
if (source.IsEmpty()) {
fprintf(stderr, "Error reading '%s'\n", str);
continue;
@@ -263,7 +278,8 @@ void RunShell(v8::Handle<v8::Context> context) {
static const int kBufferSize = 256;
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
- v8::Local<v8::String> name(v8::String::New("(shell)"));
+ v8::Local<v8::String> name(
+ v8::String::NewFromUtf8(context->GetIsolate(), "(shell)"));
while (true) {
char buffer[kBufferSize];
fprintf(stderr, "> ");
@@ -271,7 +287,7 @@ void RunShell(v8::Handle<v8::Context> context) {
if (str == NULL) break;
v8::HandleScope handle_scope(context->GetIsolate());
ExecuteString(context->GetIsolate(),
- v8::String::New(str),
+ v8::String::NewFromUtf8(context->GetIsolate(), str),
name,
true,
true);
diff --git a/chromium/v8/src/OWNERS b/chromium/v8/src/OWNERS
new file mode 100644
index 00000000000..f38fecad4ea
--- /dev/null
+++ b/chromium/v8/src/OWNERS
@@ -0,0 +1,2 @@
+per-file i18n.*=cira@chromium.org
+per-file i18n.*=mnita@google.com
diff --git a/chromium/v8/src/accessors.cc b/chromium/v8/src/accessors.cc
index 669c02baf36..4da9dd44ffe 100644
--- a/chromium/v8/src/accessors.cc
+++ b/chromium/v8/src/accessors.cc
@@ -78,6 +78,61 @@ MaybeObject* Accessors::ReadOnlySetAccessor(Isolate* isolate,
}
+static V8_INLINE bool CheckForName(Handle<String> name,
+ String* property_name,
+ int offset,
+ int* object_offset) {
+ if (name->Equals(property_name)) {
+ *object_offset = offset;
+ return true;
+ }
+ return false;
+}
+
+
+bool Accessors::IsJSObjectFieldAccessor(
+ Handle<Map> map, Handle<String> name,
+ int* object_offset) {
+ Isolate* isolate = map->GetIsolate();
+ switch (map->instance_type()) {
+ case JS_ARRAY_TYPE:
+ return
+ CheckForName(name, isolate->heap()->length_string(),
+ JSArray::kLengthOffset, object_offset);
+ case JS_TYPED_ARRAY_TYPE:
+ return
+ CheckForName(name, isolate->heap()->length_string(),
+ JSTypedArray::kLengthOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->byte_length_string(),
+ JSTypedArray::kByteLengthOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->byte_offset_string(),
+ JSTypedArray::kByteOffsetOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->buffer_string(),
+ JSTypedArray::kBufferOffset, object_offset);
+ case JS_ARRAY_BUFFER_TYPE:
+ return
+ CheckForName(name, isolate->heap()->byte_length_string(),
+ JSArrayBuffer::kByteLengthOffset, object_offset);
+ case JS_DATA_VIEW_TYPE:
+ return
+ CheckForName(name, isolate->heap()->byte_length_string(),
+ JSDataView::kByteLengthOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->byte_offset_string(),
+ JSDataView::kByteOffsetOffset, object_offset) ||
+ CheckForName(name, isolate->heap()->buffer_string(),
+ JSDataView::kBufferOffset, object_offset);
+ default: {
+ if (map->instance_type() < FIRST_NONSTRING_TYPE) {
+ return
+ CheckForName(name, isolate->heap()->length_string(),
+ String::kLengthOffset, object_offset);
+ }
+ return false;
+ }
+ }
+}
+
+
//
// Accessors::ArrayLength
//
@@ -93,45 +148,49 @@ MaybeObject* Accessors::ArrayGetLength(Isolate* isolate,
// The helper function will 'flatten' Number objects.
-Object* Accessors::FlattenNumber(Isolate* isolate, Object* value) {
+Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
+ Handle<Object> value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
- JSValue* wrapper = JSValue::cast(value);
+ Handle<JSValue> wrapper = Handle<JSValue>::cast(value);
ASSERT(wrapper->GetIsolate()->context()->native_context()->number_function()->
has_initial_map());
- Map* number_map = isolate->context()->native_context()->
- number_function()->initial_map();
- if (wrapper->map() == number_map) return wrapper->value();
+ if (wrapper->map() ==
+ isolate->context()->native_context()->number_function()->initial_map()) {
+ return handle(wrapper->value(), isolate);
+ }
+
return value;
}
MaybeObject* Accessors::ArraySetLength(Isolate* isolate,
- JSObject* object,
- Object* value,
+ JSObject* object_raw,
+ Object* value_raw,
void*) {
+ HandleScope scope(isolate);
+ Handle<JSObject> object(object_raw, isolate);
+ Handle<Object> value(value_raw, isolate);
+
// This means one of the object's prototypes is a JSArray and the
// object does not have a 'length' property. Calling SetProperty
// causes an infinite loop.
if (!object->IsJSArray()) {
- return object->SetLocalPropertyIgnoreAttributesTrampoline(
- isolate->heap()->length_string(), value, NONE);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(object,
+ isolate->factory()->length_string(), value, NONE);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
value = FlattenNumber(isolate, value);
- // Need to call methods that may trigger GC.
- HandleScope scope(isolate);
-
- // Protect raw pointers.
- Handle<JSArray> array_handle(JSArray::cast(object), isolate);
- Handle<Object> value_handle(value, isolate);
+ Handle<JSArray> array_handle = Handle<JSArray>::cast(object);
bool has_exception;
Handle<Object> uint32_v =
- Execution::ToUint32(isolate, value_handle, &has_exception);
+ Execution::ToUint32(isolate, value, &has_exception);
if (has_exception) return Failure::Exception();
Handle<Object> number_v =
- Execution::ToNumber(isolate, value_handle, &has_exception);
+ Execution::ToNumber(isolate, value, &has_exception);
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
@@ -523,26 +582,28 @@ MaybeObject* Accessors::FunctionGetPrototype(Isolate* isolate,
MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
- JSObject* object,
+ JSObject* object_raw,
Object* value_raw,
void*) {
- Heap* heap = isolate->heap();
- JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
- if (function_raw == NULL) return heap->undefined_value();
- if (!function_raw->should_have_prototype()) {
- // Since we hit this accessor, object will have no prototype property.
- return object->SetLocalPropertyIgnoreAttributesTrampoline(
- heap->prototype_string(), value_raw, NONE);
- }
+ JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object_raw);
+ if (function_raw == NULL) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
Handle<JSFunction> function(function_raw, isolate);
+ Handle<JSObject> object(object_raw, isolate);
Handle<Object> value(value_raw, isolate);
+ if (!function->should_have_prototype()) {
+ // Since we hit this accessor, object will have no prototype property.
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(object,
+ isolate->factory()->prototype_string(), value, NONE);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
+ }
Handle<Object> old_value;
bool is_observed =
FLAG_harmony_observation &&
- *function == object &&
+ *function == *object &&
function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
@@ -556,7 +617,7 @@ MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
if (is_observed && !old_value->SameValue(*value)) {
JSObject::EnqueueChangeRecord(
- function, "updated", isolate->factory()->prototype_string(), old_value);
+ function, "update", isolate->factory()->prototype_string(), old_value);
}
return *function;
diff --git a/chromium/v8/src/accessors.h b/chromium/v8/src/accessors.h
index d9a2130f618..723abd253a2 100644
--- a/chromium/v8/src/accessors.h
+++ b/chromium/v8/src/accessors.h
@@ -86,6 +86,13 @@ class Accessors : public AllStatic {
static Handle<AccessorInfo> MakeModuleExport(
Handle<String> name, int index, PropertyAttributes attributes);
+ // Returns true for properties that are accessors to object fields.
+ // If true, *object_offset contains offset of object field.
+ static bool IsJSObjectFieldAccessor(
+ Handle<Map> map, Handle<String> name,
+ int* object_offset);
+
+
private:
// Accessor functions only used through the descriptor.
static MaybeObject* FunctionSetPrototype(Isolate* isolate,
@@ -142,7 +149,7 @@ class Accessors : public AllStatic {
void*);
// Helper functions.
- static Object* FlattenNumber(Isolate* isolate, Object* value);
+ static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value);
static MaybeObject* IllegalSetter(Isolate* isolate,
JSObject*,
Object*,
diff --git a/chromium/v8/src/allocation-site-scopes.cc b/chromium/v8/src/allocation-site-scopes.cc
new file mode 100644
index 00000000000..bbfb39b122c
--- /dev/null
+++ b/chromium/v8/src/allocation-site-scopes.cc
@@ -0,0 +1,102 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "allocation-site-scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() {
+ Handle<AllocationSite> scope_site;
+ if (top().is_null()) {
+ // We are creating the top level AllocationSite as opposed to a nested
+ // AllocationSite.
+ InitializeTraversal(isolate()->factory()->NewAllocationSite());
+ scope_site = Handle<AllocationSite>(*top(), isolate());
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("*** Creating top level AllocationSite %p\n",
+ static_cast<void*>(*scope_site));
+ }
+ } else {
+ ASSERT(!current().is_null());
+ scope_site = isolate()->factory()->NewAllocationSite();
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n",
+ static_cast<void*>(*top()),
+ static_cast<void*>(*current()),
+ static_cast<void*>(*scope_site));
+ }
+ current()->set_nested_site(*scope_site);
+ update_current_site(*scope_site);
+ }
+ ASSERT(!scope_site.is_null());
+ return scope_site;
+}
+
+
+void AllocationSiteCreationContext::ExitScope(
+ Handle<AllocationSite> scope_site,
+ Handle<JSObject> object) {
+ if (!object.is_null() && !object->IsFailure()) {
+ bool top_level = !scope_site.is_null() &&
+ top().is_identical_to(scope_site);
+
+ scope_site->set_transition_info(*object);
+ if (FLAG_trace_creation_allocation_sites) {
+ if (top_level) {
+ PrintF("*** Setting AllocationSite %p transition_info %p\n",
+ static_cast<void*>(*scope_site),
+ static_cast<void*>(*object));
+ } else {
+ PrintF("Setting AllocationSite (%p, %p) transition_info %p\n",
+ static_cast<void*>(*top()),
+ static_cast<void*>(*scope_site),
+ static_cast<void*>(*object));
+ }
+ }
+ }
+}
+
+
+bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
+ if (activated_ && AllocationSite::CanTrack(object->map()->instance_type())) {
+ if (FLAG_allocation_site_pretenuring ||
+ AllocationSite::GetMode(object->GetElementsKind()) ==
+ TRACK_ALLOCATION_SITE) {
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("*** Creating Memento for %s %p\n",
+ object->IsJSArray() ? "JSArray" : "JSObject",
+ static_cast<void*>(*object));
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/allocation-site-scopes.h b/chromium/v8/src/allocation-site-scopes.h
new file mode 100644
index 00000000000..a195b27d85a
--- /dev/null
+++ b/chromium/v8/src/allocation-site-scopes.h
@@ -0,0 +1,124 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ALLOCATION_SITE_SCOPES_H_
+#define V8_ALLOCATION_SITE_SCOPES_H_
+
+#include "ast.h"
+#include "handles.h"
+#include "objects.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+
+// AllocationSiteContext is the base class for walking and copying a nested
+// boilerplate with AllocationSite and AllocationMemento support.
+class AllocationSiteContext {
+ public:
+ explicit AllocationSiteContext(Isolate* isolate) {
+ isolate_ = isolate;
+ };
+
+ Handle<AllocationSite> top() { return top_; }
+ Handle<AllocationSite> current() { return current_; }
+
+ bool ShouldCreateMemento(Handle<JSObject> object) { return false; }
+
+ Isolate* isolate() { return isolate_; }
+
+ protected:
+ void update_current_site(AllocationSite* site) {
+ *(current_.location()) = site;
+ }
+
+ void InitializeTraversal(Handle<AllocationSite> site) {
+ top_ = site;
+ current_ = Handle<AllocationSite>(*top_, isolate());
+ }
+
+ private:
+ Isolate* isolate_;
+ Handle<AllocationSite> top_;
+ Handle<AllocationSite> current_;
+};
+
+
+// AllocationSiteCreationContext aids in the creation of AllocationSites to
+// accompany object literals.
+class AllocationSiteCreationContext : public AllocationSiteContext {
+ public:
+ explicit AllocationSiteCreationContext(Isolate* isolate)
+ : AllocationSiteContext(isolate) { }
+
+ Handle<AllocationSite> EnterNewScope();
+ void ExitScope(Handle<AllocationSite> site, Handle<JSObject> object);
+};
+
+
+// AllocationSiteUsageContext aids in the creation of AllocationMementos placed
+// behind some/all components of a copied object literal.
+class AllocationSiteUsageContext : public AllocationSiteContext {
+ public:
+ AllocationSiteUsageContext(Isolate* isolate, Handle<AllocationSite> site,
+ bool activated)
+ : AllocationSiteContext(isolate),
+ top_site_(site),
+ activated_(activated) { }
+
+ inline Handle<AllocationSite> EnterNewScope() {
+ if (top().is_null()) {
+ InitializeTraversal(top_site_);
+ } else {
+ // Advance current site
+ Object* nested_site = current()->nested_site();
+ // Something is wrong if we advance to the end of the list here.
+ ASSERT(nested_site->IsAllocationSite());
+ update_current_site(AllocationSite::cast(nested_site));
+ }
+ return Handle<AllocationSite>(*current(), isolate());
+ }
+
+ inline void ExitScope(Handle<AllocationSite> scope_site,
+ Handle<JSObject> object) {
+ // This assert ensures that we are pointing at the right sub-object in a
+ // recursive walk of a nested literal.
+ ASSERT(object.is_null() || *object == scope_site->transition_info());
+ }
+
+ bool ShouldCreateMemento(Handle<JSObject> object);
+
+ private:
+ Handle<AllocationSite> top_site_;
+ bool activated_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ALLOCATION_SITE_SCOPES_H_
diff --git a/chromium/v8/src/allocation-tracker.cc b/chromium/v8/src/allocation-tracker.cc
new file mode 100644
index 00000000000..8044cef3c81
--- /dev/null
+++ b/chromium/v8/src/allocation-tracker.cc
@@ -0,0 +1,280 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "allocation-tracker.h"
+
+#include "heap-snapshot-generator.h"
+#include "frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+AllocationTraceNode::AllocationTraceNode(
+ AllocationTraceTree* tree, SnapshotObjectId shared_function_info_id)
+ : tree_(tree),
+ function_id_(shared_function_info_id),
+ total_size_(0),
+ allocation_count_(0),
+ id_(tree->next_node_id()) {
+}
+
+
+AllocationTraceNode::~AllocationTraceNode() {
+}
+
+
+AllocationTraceNode* AllocationTraceNode::FindChild(SnapshotObjectId id) {
+ for (int i = 0; i < children_.length(); i++) {
+ AllocationTraceNode* node = children_[i];
+ if (node->function_id() == id) return node;
+ }
+ return NULL;
+}
+
+
+AllocationTraceNode* AllocationTraceNode::FindOrAddChild(SnapshotObjectId id) {
+ AllocationTraceNode* child = FindChild(id);
+ if (child == NULL) {
+ child = new AllocationTraceNode(tree_, id);
+ children_.Add(child);
+ }
+ return child;
+}
+
+
+void AllocationTraceNode::AddAllocation(unsigned size) {
+ total_size_ += size;
+ ++allocation_count_;
+}
+
+
+void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
+ OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
+ if (tracker != NULL) {
+ const char* name = "<unknown function>";
+ if (function_id_ != 0) {
+ AllocationTracker::FunctionInfo* info =
+ tracker->GetFunctionInfo(function_id_);
+ if (info != NULL) {
+ name = info->name;
+ }
+ }
+ OS::Print("%s #%u", name, id_);
+ } else {
+ OS::Print("%u #%u", function_id_, id_);
+ }
+ OS::Print("\n");
+ indent += 2;
+ for (int i = 0; i < children_.length(); i++) {
+ children_[i]->Print(indent, tracker);
+ }
+}
+
+
+AllocationTraceTree::AllocationTraceTree()
+ : next_node_id_(1),
+ root_(this, 0) {
+}
+
+
+AllocationTraceTree::~AllocationTraceTree() {
+}
+
+
+AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
+ const Vector<SnapshotObjectId>& path) {
+ AllocationTraceNode* node = root();
+ for (SnapshotObjectId* entry = path.start() + path.length() - 1;
+ entry != path.start() - 1;
+ --entry) {
+ node = node->FindOrAddChild(*entry);
+ }
+ return node;
+}
+
+
+void AllocationTraceTree::Print(AllocationTracker* tracker) {
+ OS::Print("[AllocationTraceTree:]\n");
+ OS::Print("Total size | Allocation count | Function id | id\n");
+ root()->Print(0, tracker);
+}
+
+void AllocationTracker::DeleteUnresolvedLocation(
+ UnresolvedLocation** location) {
+ delete *location;
+}
+
+
+AllocationTracker::FunctionInfo::FunctionInfo()
+ : name(""),
+ script_name(""),
+ script_id(0),
+ line(-1),
+ column(-1) {
+}
+
+
+static bool AddressesMatch(void* key1, void* key2) {
+ return key1 == key2;
+}
+
+
+AllocationTracker::AllocationTracker(
+ HeapObjectsMap* ids, StringsStorage* names)
+ : ids_(ids),
+ names_(names),
+ id_to_function_info_(AddressesMatch) {
+}
+
+
+AllocationTracker::~AllocationTracker() {
+ unresolved_locations_.Iterate(DeleteUnresolvedLocation);
+}
+
+
+void AllocationTracker::PrepareForSerialization() {
+ List<UnresolvedLocation*> copy(unresolved_locations_.length());
+ copy.AddAll(unresolved_locations_);
+ unresolved_locations_.Clear();
+ for (int i = 0; i < copy.length(); i++) {
+ copy[i]->Resolve();
+ delete copy[i];
+ }
+}
+
+
+void AllocationTracker::AllocationEvent(Address addr, int size) {
+ DisallowHeapAllocation no_allocation;
+ Heap* heap = ids_->heap();
+
+ // Mark the new block as FreeSpace to make sure the heap is iterable
+ // while we are capturing stack trace.
+ FreeListNode::FromAddress(addr)->set_size(heap, size);
+ ASSERT_EQ(HeapObject::FromAddress(addr)->Size(), size);
+ ASSERT(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
+
+ Isolate* isolate = heap->isolate();
+ int length = 0;
+ StackTraceFrameIterator it(isolate);
+ while (!it.done() && length < kMaxAllocationTraceLength) {
+ JavaScriptFrame* frame = it.frame();
+ SharedFunctionInfo* shared = frame->function()->shared();
+ SnapshotObjectId id = ids_->FindOrAddEntry(
+ shared->address(), shared->Size(), false);
+ allocation_trace_buffer_[length++] = id;
+ AddFunctionInfo(shared, id);
+ it.Advance();
+ }
+ AllocationTraceNode* top_node = trace_tree_.AddPathFromEnd(
+ Vector<SnapshotObjectId>(allocation_trace_buffer_, length));
+ top_node->AddAllocation(size);
+}
+
+
+static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
+ return ComputeIntegerHash(static_cast<uint32_t>(id),
+ v8::internal::kZeroHashSeed);
+}
+
+
+AllocationTracker::FunctionInfo* AllocationTracker::GetFunctionInfo(
+ SnapshotObjectId id) {
+ HashMap::Entry* entry = id_to_function_info_.Lookup(
+ reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), false);
+ if (entry == NULL) {
+ return NULL;
+ }
+ return reinterpret_cast<FunctionInfo*>(entry->value);
+}
+
+
+void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
+ SnapshotObjectId id) {
+ HashMap::Entry* entry = id_to_function_info_.Lookup(
+ reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), true);
+ if (entry->value == NULL) {
+ FunctionInfo* info = new FunctionInfo();
+ info->name = names_->GetFunctionName(shared->DebugName());
+ if (shared->script()->IsScript()) {
+ Script* script = Script::cast(shared->script());
+ if (script->name()->IsName()) {
+ Name* name = Name::cast(script->name());
+ info->script_name = names_->GetName(name);
+ }
+ info->script_id = script->id()->value();
+ // Converting start offset into line and column may cause heap
+ // allocations so we postpone them until snapshot serialization.
+ unresolved_locations_.Add(new UnresolvedLocation(
+ script,
+ shared->start_position(),
+ info));
+ }
+ entry->value = info;
+ }
+}
+
+
+AllocationTracker::UnresolvedLocation::UnresolvedLocation(
+ Script* script, int start, FunctionInfo* info)
+ : start_position_(start),
+ info_(info) {
+ script_ = Handle<Script>::cast(
+ script->GetIsolate()->global_handles()->Create(script));
+ GlobalHandles::MakeWeak(
+ reinterpret_cast<Object**>(script_.location()),
+ this, &HandleWeakScript);
+}
+
+
+AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
+ if (!script_.is_null()) {
+ script_->GetIsolate()->global_handles()->Destroy(
+ reinterpret_cast<Object**>(script_.location()));
+ }
+}
+
+
+void AllocationTracker::UnresolvedLocation::Resolve() {
+ if (script_.is_null()) return;
+ info_->line = GetScriptLineNumber(script_, start_position_);
+ info_->column = GetScriptColumnNumber(script_, start_position_);
+}
+
+
+void AllocationTracker::UnresolvedLocation::HandleWeakScript(
+ v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* obj,
+ void* data) {
+ UnresolvedLocation* location = reinterpret_cast<UnresolvedLocation*>(data);
+ location->script_ = Handle<Script>::null();
+ obj->Reset();
+}
+
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/allocation-tracker.h b/chromium/v8/src/allocation-tracker.h
new file mode 100644
index 00000000000..6844716a148
--- /dev/null
+++ b/chromium/v8/src/allocation-tracker.h
@@ -0,0 +1,137 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ALLOCATION_TRACKER_H_
+#define V8_ALLOCATION_TRACKER_H_
+
+namespace v8 {
+namespace internal {
+
+class HeapObjectsMap;
+
+class AllocationTraceTree;
+
+class AllocationTraceNode {
+ public:
+ AllocationTraceNode(AllocationTraceTree* tree,
+ SnapshotObjectId shared_function_info_id);
+ ~AllocationTraceNode();
+ AllocationTraceNode* FindChild(SnapshotObjectId shared_function_info_id);
+ AllocationTraceNode* FindOrAddChild(SnapshotObjectId shared_function_info_id);
+ void AddAllocation(unsigned size);
+
+ SnapshotObjectId function_id() const { return function_id_; }
+ unsigned allocation_size() const { return total_size_; }
+ unsigned allocation_count() const { return allocation_count_; }
+ unsigned id() const { return id_; }
+ Vector<AllocationTraceNode*> children() const { return children_.ToVector(); }
+
+ void Print(int indent, AllocationTracker* tracker);
+
+ private:
+ AllocationTraceTree* tree_;
+ SnapshotObjectId function_id_;
+ unsigned total_size_;
+ unsigned allocation_count_;
+ unsigned id_;
+ List<AllocationTraceNode*> children_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationTraceNode);
+};
+
+
+class AllocationTraceTree {
+ public:
+ AllocationTraceTree();
+ ~AllocationTraceTree();
+ AllocationTraceNode* AddPathFromEnd(const Vector<SnapshotObjectId>& path);
+ AllocationTraceNode* root() { return &root_; }
+ unsigned next_node_id() { return next_node_id_++; }
+ void Print(AllocationTracker* tracker);
+
+ private:
+ unsigned next_node_id_;
+ AllocationTraceNode root_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationTraceTree);
+};
+
+
+class AllocationTracker {
+ public:
+ struct FunctionInfo {
+ FunctionInfo();
+ const char* name;
+ const char* script_name;
+ int script_id;
+ int line;
+ int column;
+ };
+
+ AllocationTracker(HeapObjectsMap* ids, StringsStorage* names);
+ ~AllocationTracker();
+
+ void PrepareForSerialization();
+ void AllocationEvent(Address addr, int size);
+
+ AllocationTraceTree* trace_tree() { return &trace_tree_; }
+ HashMap* id_to_function_info() { return &id_to_function_info_; }
+ FunctionInfo* GetFunctionInfo(SnapshotObjectId id);
+
+ private:
+ void AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id);
+
+ class UnresolvedLocation {
+ public:
+ UnresolvedLocation(Script* script, int start, FunctionInfo* info);
+ ~UnresolvedLocation();
+ void Resolve();
+
+ private:
+ static void HandleWeakScript(v8::Isolate* isolate,
+ v8::Persistent<v8::Value>* obj,
+ void* data);
+ Handle<Script> script_;
+ int start_position_;
+ FunctionInfo* info_;
+ };
+ static void DeleteUnresolvedLocation(UnresolvedLocation** location);
+
+ static const int kMaxAllocationTraceLength = 64;
+ HeapObjectsMap* ids_;
+ StringsStorage* names_;
+ AllocationTraceTree trace_tree_;
+ SnapshotObjectId allocation_trace_buffer_[kMaxAllocationTraceLength];
+ HashMap id_to_function_info_;
+ List<UnresolvedLocation*> unresolved_locations_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationTracker);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ALLOCATION_TRACKER_H_
diff --git a/chromium/v8/src/allocation.cc b/chromium/v8/src/allocation.cc
index 94aaad3fd42..69edf6906cf 100644
--- a/chromium/v8/src/allocation.cc
+++ b/chromium/v8/src/allocation.cc
@@ -100,24 +100,4 @@ char* StrNDup(const char* str, int n) {
return result;
}
-
-void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
- next_ = other->next_;
- other->next_->previous_ = this;
- previous_ = other;
- other->next_ = this;
-}
-
-
-void PreallocatedStorage::Unlink() {
- next_->previous_ = previous_;
- previous_->next_ = next_;
-}
-
-
-PreallocatedStorage::PreallocatedStorage(size_t size)
- : size_(size) {
- previous_ = next_ = this;
-}
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/allocation.h b/chromium/v8/src/allocation.h
index 45bde4c4cb0..03cc8f5e73f 100644
--- a/chromium/v8/src/allocation.h
+++ b/chromium/v8/src/allocation.h
@@ -109,34 +109,6 @@ class FreeStoreAllocationPolicy {
};
-// Allocation policy for allocating in preallocated space.
-// Used as an allocation policy for ScopeInfo when generating
-// stack traces.
-class PreallocatedStorage {
- public:
- explicit PreallocatedStorage(size_t size);
- size_t size() { return size_; }
-
- private:
- size_t size_;
- PreallocatedStorage* previous_;
- PreallocatedStorage* next_;
-
- void LinkTo(PreallocatedStorage* other);
- void Unlink();
-
- friend class Isolate;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
-};
-
-
-struct PreallocatedStorageAllocationPolicy {
- INLINE(void* New(size_t size));
- INLINE(static void Delete(void* ptr));
-};
-
-
} } // namespace v8::internal
#endif // V8_ALLOCATION_H_
diff --git a/chromium/v8/src/api.cc b/chromium/v8/src/api.cc
index 71a8f4a6cf7..d7c76d5a13d 100644
--- a/chromium/v8/src/api.cc
+++ b/chromium/v8/src/api.cc
@@ -77,8 +77,7 @@
namespace v8 {
#define ON_BAILOUT(isolate, location, code) \
- if (IsDeadCheck(isolate, location) || \
- IsExecutionTerminatingCheck(isolate)) { \
+ if (IsExecutionTerminatingCheck(isolate)) { \
code; \
UNREACHABLE(); \
}
@@ -253,13 +252,6 @@ static inline bool ApiCheck(bool condition,
}
-static bool ReportV8Dead(const char* location) {
- FatalErrorCallback callback = GetFatalErrorHandler();
- callback(location, "V8 is no longer usable");
- return true;
-}
-
-
static bool ReportEmptyHandle(const char* location) {
FatalErrorCallback callback = GetFatalErrorHandler();
callback(location, "Reading from empty handle");
@@ -267,24 +259,6 @@ static bool ReportEmptyHandle(const char* location) {
}
-/**
- * IsDeadCheck checks that the vm is usable. If, for instance, the vm has been
- * out of memory at some point this check will fail. It should be called on
- * entry to all methods that touch anything in the heap, except destructors
- * which you sometimes can't avoid calling after the vm has crashed. Functions
- * that call EnsureInitialized or ON_BAILOUT don't have to also call
- * IsDeadCheck. ON_BAILOUT has the advantage over EnsureInitialized that you
- * can arrange to return if the VM is dead. This is needed to ensure that no VM
- * heap allocations are attempted on a dead VM. EnsureInitialized has the
- * advantage over ON_BAILOUT that it actually initializes the VM if this has not
- * yet been done.
- */
-static inline bool IsDeadCheck(i::Isolate* isolate, const char* location) {
- return !isolate->IsInitialized()
- && isolate->IsDead() ? ReportV8Dead(location) : false;
-}
-
-
static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
if (!isolate->IsInitialized()) return false;
if (isolate->has_scheduled_exception()) {
@@ -321,7 +295,6 @@ static bool InitializeHelper(i::Isolate* isolate) {
static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
const char* location) {
- if (IsDeadCheck(isolate, location)) return false;
if (isolate != NULL) {
if (isolate->IsInitialized()) return true;
}
@@ -500,19 +473,7 @@ void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::ThrowException()")) {
- return v8::Handle<Value>();
- }
- ENTER_V8(isolate);
- // If we're passed an empty handle, we throw an undefined exception
- // to deal more gracefully with out of memory situations.
- if (value.IsEmpty()) {
- isolate->ScheduleThrow(isolate->heap()->undefined_value());
- } else {
- isolate->ScheduleThrow(*Utils::OpenHandle(*value));
- }
- return v8::Undefined();
+ return v8::Isolate::GetCurrent()->ThrowException(value);
}
@@ -603,12 +564,55 @@ ResourceConstraints::ResourceConstraints()
max_old_space_size_(0),
max_executable_size_(0),
stack_limit_(NULL),
- is_memory_constrained_() { }
+ max_available_threads_(0) { }
+
+void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
+ uint32_t number_of_processors) {
+ const int lump_of_memory = (i::kPointerSize / 4) * i::MB;
+#if V8_OS_ANDROID
+ // Android has higher physical memory requirements before raising the maximum
+ // heap size limits since it has no swap space.
+ const uint64_t low_limit = 512ul * i::MB;
+ const uint64_t medium_limit = 1ul * i::GB;
+ const uint64_t high_limit = 2ul * i::GB;
+#else
+ const uint64_t low_limit = 512ul * i::MB;
+ const uint64_t medium_limit = 768ul * i::MB;
+ const uint64_t high_limit = 1ul * i::GB;
+#endif
+ // The young_space_size should be a power of 2 and old_generation_size should
+ // be a multiple of Page::kPageSize.
+ if (physical_memory <= low_limit) {
+ set_max_young_space_size(2 * lump_of_memory);
+ set_max_old_space_size(128 * lump_of_memory);
+ set_max_executable_size(96 * lump_of_memory);
+ } else if (physical_memory <= medium_limit) {
+ set_max_young_space_size(8 * lump_of_memory);
+ set_max_old_space_size(256 * lump_of_memory);
+ set_max_executable_size(192 * lump_of_memory);
+ } else if (physical_memory <= high_limit) {
+ set_max_young_space_size(16 * lump_of_memory);
+ set_max_old_space_size(512 * lump_of_memory);
+ set_max_executable_size(256 * lump_of_memory);
+ } else {
+ set_max_young_space_size(16 * lump_of_memory);
+ set_max_old_space_size(700 * lump_of_memory);
+ set_max_executable_size(256 * lump_of_memory);
+ }
-bool SetResourceConstraints(ResourceConstraints* constraints) {
- i::Isolate* isolate = EnterIsolateIfNeeded();
+ set_max_available_threads(i::Max(i::Min(number_of_processors, 4u), 1u));
+}
+
+
+void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory) {
+ ConfigureDefaults(physical_memory, i::CPU::NumberOfProcessorsOnline());
+}
+
+bool SetResourceConstraints(Isolate* v8_isolate,
+ ResourceConstraints* constraints) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
int young_space_size = constraints->max_young_space_size();
int old_gen_size = constraints->max_old_space_size();
int max_executable_size = constraints->max_executable_size();
@@ -624,16 +628,13 @@ bool SetResourceConstraints(ResourceConstraints* constraints) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
isolate->stack_guard()->SetStackLimit(limit);
}
- if (constraints->is_memory_constrained().has_value) {
- isolate->set_is_memory_constrained(
- constraints->is_memory_constrained().value);
- }
+
+ isolate->set_max_available_threads(constraints->max_available_threads());
return true;
}
i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
- if (IsDeadCheck(isolate, "V8::Persistent::New")) return NULL;
LOG_API(isolate, "Persistent::New");
i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef DEBUG
@@ -728,50 +729,58 @@ int HandleScope::NumberOfHandles() {
}
-i::Object** HandleScope::CreateHandle(i::Object* value) {
- return i::HandleScope::CreateHandle(i::Isolate::Current(), value);
+i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
+ return i::HandleScope::CreateHandle(isolate, value);
}
-i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
- ASSERT(isolate == i::Isolate::Current());
- return i::HandleScope::CreateHandle(isolate, value);
+i::Object** HandleScope::CreateHandle(i::HeapObject* heap_object,
+ i::Object* value) {
+ ASSERT(heap_object->IsHeapObject());
+ return i::HandleScope::CreateHandle(heap_object->GetIsolate(), value);
}
-i::Object** HandleScope::CreateHandle(i::HeapObject* value) {
- ASSERT(value->IsHeapObject());
- return reinterpret_cast<i::Object**>(
- i::HandleScope::CreateHandle(value->GetIsolate(), value));
+EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ escape_slot_ = CreateHandle(isolate, isolate->heap()->the_hole_value());
+ Initialize(v8_isolate);
+}
+
+
+i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
+ ApiCheck(*escape_slot_ == isolate_->heap()->the_hole_value(),
+ "EscapeableHandleScope::Escape",
+ "Escape value set twice");
+ if (escape_value == NULL) {
+ *escape_slot_ = isolate_->heap()->undefined_value();
+ return NULL;
+ }
+ *escape_slot_ = *escape_value;
+ return escape_slot_;
}
void Context::Enter() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Context::Enter()")) return;
ENTER_V8(isolate);
-
isolate->handle_scope_implementer()->EnterContext(env);
-
isolate->handle_scope_implementer()->SaveContext(isolate->context());
isolate->set_context(*env);
}
void Context::Exit() {
- // Exit is essentially a static function and doesn't use the
- // receiver, so we have to get the current isolate from the thread
- // local.
+ // TODO(dcarney): fix this once chrome is fixed.
i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
-
- if (!ApiCheck(isolate->handle_scope_implementer()->LeaveLastContext(),
+ i::Handle<i::Context> context = i::Handle<i::Context>::null();
+ ENTER_V8(isolate);
+ if (!ApiCheck(isolate->handle_scope_implementer()->LeaveContext(context),
"v8::Context::Exit()",
"Cannot exit non-entered context")) {
return;
}
-
// Content of 'last_context' could be NULL.
i::Context* last_context =
isolate->handle_scope_implementer()->RestoreContext();
@@ -797,7 +806,7 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
bool can_grow,
const char* location) {
i::Handle<i::Context> env = Utils::OpenHandle(context);
- bool ok = !IsDeadCheck(env->GetIsolate(), location) &&
+ bool ok =
ApiCheck(env->IsNativeContext(), location, "Not a native context") &&
ApiCheck(index >= 0, location, "Negative index");
if (!ok) return i::Handle<i::FixedArray>();
@@ -974,7 +983,6 @@ void Template::Set(v8::Handle<String> name,
v8::Handle<Data> value,
v8::PropertyAttribute attribute) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Template::Set()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
const int kSize = 3;
@@ -993,7 +1001,6 @@ void Template::SetAccessorProperty(
v8::PropertyAttribute attribute,
v8::AccessControl access_control) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Template::SetAccessor()")) return;
ENTER_V8(isolate);
ASSERT(!name.IsEmpty());
ASSERT(!getter.IsEmpty() || !setter.IsEmpty());
@@ -1019,9 +1026,6 @@ static void InitializeFunctionTemplate(
Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::PrototypeTemplate()")) {
- return Local<ObjectTemplate>();
- }
ENTER_V8(isolate);
i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
isolate);
@@ -1035,7 +1039,6 @@ Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::Inherit()")) return;
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_parent_template(*Utils::OpenHandle(*value));
}
@@ -1061,7 +1064,9 @@ static Local<FunctionTemplate> FunctionTemplateNew(
}
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != 0) {
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
Utils::ToLocal(obj)->SetCallHandler(callback, data);
}
obj->set_length(length);
@@ -1073,32 +1078,42 @@ static Local<FunctionTemplate> FunctionTemplateNew(
}
Local<FunctionTemplate> FunctionTemplate::New(
+ Isolate* isolate,
FunctionCallback callback,
v8::Handle<Value> data,
v8::Handle<Signature> signature,
int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
- LOG_API(isolate, "FunctionTemplate::New");
- ENTER_V8(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::FunctionTemplate::New()");
+ LOG_API(i_isolate, "FunctionTemplate::New");
+ ENTER_V8(i_isolate);
return FunctionTemplateNew(
- isolate, callback, data, signature, length, false);
+ i_isolate, callback, data, signature, length, false);
}
-Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
- int argc, Handle<FunctionTemplate> argv[]) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Signature::New()");
- LOG_API(isolate, "Signature::New");
- ENTER_V8(isolate);
+Local<FunctionTemplate> FunctionTemplate::New(
+ FunctionCallback callback,
+ v8::Handle<Value> data,
+ v8::Handle<Signature> signature,
+ int length) {
+ return New(Isolate::GetCurrent(), callback, data, signature, length);
+}
+
+Local<Signature> Signature::New(Isolate* isolate,
+ Handle<FunctionTemplate> receiver, int argc,
+ Handle<FunctionTemplate> argv[]) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::Signature::New()");
+ LOG_API(i_isolate, "Signature::New");
+ ENTER_V8(i_isolate);
i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::SIGNATURE_INFO_TYPE);
+ i_isolate->factory()->NewStruct(i::SIGNATURE_INFO_TYPE);
i::Handle<i::SignatureInfo> obj =
i::Handle<i::SignatureInfo>::cast(struct_obj);
if (!receiver.IsEmpty()) obj->set_receiver(*Utils::OpenHandle(*receiver));
if (argc > 0) {
- i::Handle<i::FixedArray> args = isolate->factory()->NewFixedArray(argc);
+ i::Handle<i::FixedArray> args = i_isolate->factory()->NewFixedArray(argc);
for (int i = 0; i < argc; i++) {
if (!argv[i].IsEmpty())
args->set(i, *Utils::OpenHandle(*argv[i]));
@@ -1109,6 +1124,20 @@ Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
}
+Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
+ int argc, Handle<FunctionTemplate> argv[]) {
+ return New(Isolate::GetCurrent(), receiver, argc, argv);
+}
+
+
+Local<AccessorSignature> AccessorSignature::New(
+ Isolate* isolate,
+ Handle<FunctionTemplate> receiver) {
+ return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
+}
+
+
+// While this is just a cast, it's lame not to use an Isolate parameter.
Local<AccessorSignature> AccessorSignature::New(
Handle<FunctionTemplate> receiver) {
return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
@@ -1262,7 +1291,7 @@ int TypeSwitch::match(v8::Handle<Value> value) {
i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
i::FixedArray* types = i::FixedArray::cast(info->types());
for (int i = 0; i < types->length(); i++) {
- if (obj->IsInstanceOf(i::FunctionTemplateInfo::cast(types->get(i))))
+ if (i::FunctionTemplateInfo::cast(types->get(i))->IsTemplateFor(*obj))
return i + 1;
}
return 0;
@@ -1278,7 +1307,6 @@ int TypeSwitch::match(v8::Handle<Value> value) {
void FunctionTemplate::SetCallHandler(FunctionCallback callback,
v8::Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
@@ -1286,7 +1314,9 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
obj->set_data(*Utils::OpenHandle(*data));
Utils::OpenHandle(this)->set_call_code(*obj);
}
@@ -1324,7 +1354,9 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
isolate->factory()->NewExecutableAccessorInfo();
SET_FIELD_WRAPPED(obj, set_getter, getter);
SET_FIELD_WRAPPED(obj, set_setter, setter);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
obj->set_data(*Utils::OpenHandle(*data));
return SetAccessorInfoProperties(obj, name, settings, attributes, signature);
}
@@ -1349,14 +1381,13 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::InstanceTemplate()")
- || EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
+ if (EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
return Local<ObjectTemplate>();
ENTER_V8(isolate);
i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this);
if (handle->instance_template()->IsUndefined()) {
Local<ObjectTemplate> templ =
- ObjectTemplate::New(ToApiHandle<FunctionTemplate>(handle));
+ ObjectTemplate::New(isolate, ToApiHandle<FunctionTemplate>(handle));
handle->set_instance_template(*Utils::OpenHandle(*templ));
}
i::Handle<i::ObjectTemplateInfo> result(
@@ -1367,7 +1398,6 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
void FunctionTemplate::SetLength(int length) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetLength()")) return;
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_length(length);
}
@@ -1375,7 +1405,6 @@ void FunctionTemplate::SetLength(int length) {
void FunctionTemplate::SetClassName(Handle<String> name) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetClassName()")) return;
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_class_name(*Utils::OpenHandle(*name));
}
@@ -1383,9 +1412,6 @@ void FunctionTemplate::SetClassName(Handle<String> name) {
void FunctionTemplate::SetHiddenPrototype(bool value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetHiddenPrototype()")) {
- return;
- }
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_hidden_prototype(value);
}
@@ -1393,9 +1419,6 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
void FunctionTemplate::ReadOnlyPrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::ReadOnlyPrototype()")) {
- return;
- }
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_read_only_prototype(true);
}
@@ -1403,9 +1426,6 @@ void FunctionTemplate::ReadOnlyPrototype() {
void FunctionTemplate::RemovePrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::RemovePrototype()")) {
- return;
- }
ENTER_V8(isolate);
Utils::OpenHandle(this)->set_remove_prototype(true);
}
@@ -1414,17 +1434,19 @@ void FunctionTemplate::RemovePrototype() {
// --- O b j e c t T e m p l a t e ---
+Local<ObjectTemplate> ObjectTemplate::New(Isolate* isolate) {
+ return New(reinterpret_cast<i::Isolate*>(isolate), Local<FunctionTemplate>());
+}
+
+
Local<ObjectTemplate> ObjectTemplate::New() {
- return New(Local<FunctionTemplate>());
+ return New(i::Isolate::Current(), Local<FunctionTemplate>());
}
Local<ObjectTemplate> ObjectTemplate::New(
+ i::Isolate* isolate,
v8::Handle<FunctionTemplate> constructor) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::New()")) {
- return Local<ObjectTemplate>();
- }
EnsureInitializedForIsolate(isolate, "v8::ObjectTemplate::New()");
LOG_API(isolate, "ObjectTemplate::New");
ENTER_V8(isolate);
@@ -1495,7 +1517,6 @@ static bool TemplateSetAccessor(
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(template_obj)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return false;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(
@@ -1551,9 +1572,6 @@ void ObjectTemplate::SetNamedPropertyHandler(
NamedPropertyEnumeratorCallback enumerator,
Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
- return;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
@@ -1571,7 +1589,9 @@ void ObjectTemplate::SetNamedPropertyHandler(
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
obj->set_data(*Utils::OpenHandle(*data));
cons->set_named_property_handler(*obj);
}
@@ -1579,7 +1599,6 @@ void ObjectTemplate::SetNamedPropertyHandler(
void ObjectTemplate::MarkAsUndetectable() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::MarkAsUndetectable()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
@@ -1596,9 +1615,6 @@ void ObjectTemplate::SetAccessCheckCallbacks(
Handle<Value> data,
bool turned_on_by_default) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessCheckCallbacks()")) {
- return;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
@@ -1611,7 +1627,9 @@ void ObjectTemplate::SetAccessCheckCallbacks(
SET_FIELD_WRAPPED(info, set_named_callback, named_callback);
SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
info->set_data(*Utils::OpenHandle(*data));
i::FunctionTemplateInfo* constructor =
@@ -1630,9 +1648,6 @@ void ObjectTemplate::SetIndexedPropertyHandler(
IndexedPropertyEnumeratorCallback enumerator,
Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetIndexedPropertyHandler()")) {
- return;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
@@ -1650,7 +1665,9 @@ void ObjectTemplate::SetIndexedPropertyHandler(
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
obj->set_data(*Utils::OpenHandle(*data));
cons->set_indexed_property_handler(*obj);
}
@@ -1659,10 +1676,6 @@ void ObjectTemplate::SetIndexedPropertyHandler(
void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::ObjectTemplate::SetCallAsFunctionHandler()")) {
- return;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(this);
@@ -1674,26 +1687,21 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
- if (data.IsEmpty()) data = v8::Undefined();
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
obj->set_data(*Utils::OpenHandle(*data));
cons->set_instance_call_handler(*obj);
}
int ObjectTemplate::InternalFieldCount() {
- if (IsDeadCheck(Utils::OpenHandle(this)->GetIsolate(),
- "v8::ObjectTemplate::InternalFieldCount()")) {
- return 0;
- }
return i::Smi::cast(Utils::OpenHandle(this)->internal_field_count())->value();
}
void ObjectTemplate::SetInternalFieldCount(int value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetInternalFieldCount()")) {
- return;
- }
if (!ApiCheck(i::Smi::IsValid(value),
"v8::ObjectTemplate::SetInternalFieldCount()",
"Invalid internal field count")) {
@@ -1713,10 +1721,13 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
// --- S c r i p t D a t a ---
-ScriptData* ScriptData::PreCompile(const char* input, int length) {
+ScriptData* ScriptData::PreCompile(v8::Isolate* isolate,
+ const char* input,
+ int length) {
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
- return i::PreParserApi::PreParse(i::Isolate::Current(), &stream);
+ return i::PreParserApi::PreParse(
+ reinterpret_cast<i::Isolate*>(isolate), &stream);
}
@@ -1763,13 +1774,13 @@ Local<Script> Script::New(v8::Handle<String> source,
v8::ScriptOrigin* origin,
v8::ScriptData* pre_data,
v8::Handle<String> script_data) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Isolate* isolate = str->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
LOG_API(isolate, "Script::New");
ENTER_V8(isolate);
i::SharedFunctionInfo* raw_result = NULL;
{ i::HandleScope scope(isolate);
- i::Handle<i::String> str = Utils::OpenHandle(*source);
i::Handle<i::Object> name_obj;
int line_offset = 0;
int column_offset = 0;
@@ -1786,8 +1797,9 @@ Local<Script> Script::New(v8::Handle<String> source,
static_cast<int>(origin->ResourceColumnOffset()->Value());
}
if (!origin->ResourceIsSharedCrossOrigin().IsEmpty()) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
is_shared_cross_origin =
- origin->ResourceIsSharedCrossOrigin() == v8::True();
+ origin->ResourceIsSharedCrossOrigin() == v8::True(v8_isolate);
}
}
EXCEPTION_PREAMBLE(isolate);
@@ -1831,7 +1843,8 @@ Local<Script> Script::Compile(v8::Handle<String> source,
v8::ScriptOrigin* origin,
v8::ScriptData* pre_data,
v8::Handle<String> script_data) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Isolate* isolate = str->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
LOG_API(isolate, "Script::Compile");
ENTER_V8(isolate);
@@ -1858,7 +1871,11 @@ Local<Script> Script::Compile(v8::Handle<String> source,
Local<Value> Script::Run() {
- i::Isolate* isolate = i::Isolate::Current();
+ // If execution is terminating, Compile(script)->Run() requires this check.
+ if (this == NULL) return Local<Value>();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
LOG_API(isolate, "Script::Run");
ENTER_V8(isolate);
@@ -1867,7 +1884,6 @@ Local<Value> Script::Run() {
i::Object* raw_result = NULL;
{
i::HandleScope scope(isolate);
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSFunction> fun;
if (obj->IsSharedFunctionInfo()) {
i::Handle<i::SharedFunctionInfo>
@@ -1905,7 +1921,9 @@ static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
Local<Value> Script::Id() {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::Id()", return Local<Value>());
LOG_API(isolate, "Script::Id");
i::Object* raw_id = NULL;
@@ -1922,7 +1940,9 @@ Local<Value> Script::Id() {
int Script::GetId() {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::Id()", return -1);
LOG_API(isolate, "Script::Id");
{
@@ -1935,10 +1955,11 @@ int Script::GetId() {
int Script::GetLineNumber(int code_pos) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::GetLineNumber()", return -1);
LOG_API(isolate, "Script::GetLineNumber");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsScript()) {
i::Handle<i::Script> script = i::Handle<i::Script>(i::Script::cast(*obj));
return i::GetScriptLineNumber(script, code_pos);
@@ -1949,10 +1970,11 @@ int Script::GetLineNumber(int code_pos) {
Handle<Value> Script::GetScriptName() {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::GetName()", return Handle<String>());
LOG_API(isolate, "Script::GetName");
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsScript()) {
i::Object* name = i::Script::cast(*obj)->name();
return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
@@ -1963,7 +1985,9 @@ Handle<Value> Script::GetScriptName() {
void Script::SetData(v8::Handle<String> data) {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::HeapObject> obj =
+ i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
+ i::Isolate* isolate = obj->GetIsolate();
ON_BAILOUT(isolate, "v8::Script::SetData()", return);
LOG_API(isolate, "Script::SetData");
{
@@ -1995,8 +2019,9 @@ v8::TryCatch::TryCatch()
v8::TryCatch::~TryCatch() {
ASSERT(isolate_ == i::Isolate::Current());
if (rethrow_) {
- v8::HandleScope scope(reinterpret_cast<Isolate*>(isolate_));
- v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
+ v8::Isolate* isolate = reinterpret_cast<Isolate*>(isolate_);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(isolate, Exception());
if (HasCaught() && capture_message_) {
// If an exception was caught and rethrow_ is indicated, the saved
// message, script, and location need to be restored to Isolate TLS
@@ -2006,7 +2031,7 @@ v8::TryCatch::~TryCatch() {
isolate_->RestorePendingMessageFromTryCatch(this);
}
isolate_->UnregisterTryCatchHandler(this);
- v8::ThrowException(exc);
+ reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc);
ASSERT(!isolate_->thread_local_top()->rethrowing_message_);
} else {
isolate_->UnregisterTryCatchHandler(this);
@@ -2032,7 +2057,7 @@ bool v8::TryCatch::HasTerminated() const {
v8::Handle<v8::Value> v8::TryCatch::ReThrow() {
if (!HasCaught()) return v8::Local<v8::Value>();
rethrow_ = true;
- return v8::Undefined();
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate_));
}
@@ -2056,7 +2081,7 @@ v8::Local<Value> v8::TryCatch::StackTrace() const {
i::HandleScope scope(isolate_);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
i::Handle<i::String> name = isolate_->factory()->stack_string();
- if (!obj->HasProperty(*name)) return v8::Local<Value>();
+ if (!i::JSReceiver::HasProperty(obj, name)) return v8::Local<Value>();
i::Handle<i::Object> value = i::GetProperty(isolate_, obj, name);
if (value.is_null()) return v8::Local<Value>();
return v8::Utils::ToLocal(scope.CloseAndEscape(value));
@@ -2106,21 +2131,18 @@ Local<String> Message::Get() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Message::Get()", return Local<String>());
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(isolate, obj);
Local<String> result = Utils::ToLocal(raw_result);
- return scope.Close(result);
+ return scope.Escape(result);
}
v8::Handle<Value> Message::GetScriptResourceName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceName()")) {
- return Local<String>();
- }
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
// Return this.script.name.
@@ -2129,17 +2151,14 @@ v8::Handle<Value> Message::GetScriptResourceName() const {
isolate));
i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name(),
isolate);
- return scope.Close(Utils::ToLocal(resource_name));
+ return scope.Escape(Utils::ToLocal(resource_name));
}
v8::Handle<Value> Message::GetScriptData() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceData()")) {
- return Local<Value>();
- }
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
// Return this.script.data.
@@ -2147,24 +2166,21 @@ v8::Handle<Value> Message::GetScriptData() const {
i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
isolate));
i::Handle<i::Object> data(i::Script::cast(script->value())->data(), isolate);
- return scope.Close(Utils::ToLocal(data));
+ return scope.Escape(Utils::ToLocal(data));
}
v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStackTrace()")) {
- return Local<v8::StackTrace>();
- }
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
i::Handle<i::Object> stackFramesObj(message->stack_frames(), isolate);
if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>();
i::Handle<i::JSArray> stackTrace =
i::Handle<i::JSArray>::cast(stackFramesObj);
- return scope.Close(Utils::StackTraceToLocal(stackTrace));
+ return scope.Escape(Utils::StackTraceToLocal(stackTrace));
}
@@ -2215,7 +2231,6 @@ int Message::GetLineNumber() const {
int Message::GetStartPosition() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStartPosition()")) return 0;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSMessageObject> message =
@@ -2226,7 +2241,6 @@ int Message::GetStartPosition() const {
int Message::GetEndPosition() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetEndPosition()")) return 0;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSMessageObject> message =
@@ -2237,9 +2251,6 @@ int Message::GetEndPosition() const {
int Message::GetStartColumn() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetStartColumn()")) {
- return kNoColumnInfo;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
@@ -2255,7 +2266,6 @@ int Message::GetStartColumn() const {
int Message::GetEndColumn() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::GetEndColumn()")) return kNoColumnInfo;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
@@ -2275,7 +2285,6 @@ int Message::GetEndColumn() const {
bool Message::IsSharedCrossOrigin() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Message::IsSharedCrossOrigin()")) return 0;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSMessageObject> message =
@@ -2291,25 +2300,29 @@ Local<String> Message::GetSourceLine() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Message::GetSourceLine()", return Local<String>());
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = CallV8HeapFunction("GetSourceLine",
Utils::OpenHandle(this),
&has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::String>());
if (result->IsString()) {
- return scope.Close(Utils::ToLocal(i::Handle<i::String>::cast(result)));
+ return scope.Escape(Utils::ToLocal(i::Handle<i::String>::cast(result)));
} else {
return Local<String>();
}
}
+void Message::PrintCurrentStackTrace(Isolate* isolate, FILE* out) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i_isolate->PrintCurrentStackTrace(out);
+}
+
+
void Message::PrintCurrentStackTrace(FILE* out) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Message::PrintCurrentStackTrace()")) return;
- ENTER_V8(isolate);
- isolate->PrintCurrentStackTrace(out);
+ PrintCurrentStackTrace(Isolate::GetCurrent(), out);
}
@@ -2317,21 +2330,17 @@ void Message::PrintCurrentStackTrace(FILE* out) {
Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::GetFrame()")) {
- return Local<StackFrame>();
- }
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
i::Object* raw_object = self->GetElementNoExceptionThrown(isolate, index);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_object));
- return scope.Close(Utils::StackFrameToLocal(obj));
+ return scope.Escape(Utils::StackFrameToLocal(obj));
}
int StackTrace::GetFrameCount() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::GetFrameCount()")) return -1;
ENTER_V8(isolate);
return i::Smi::cast(Utils::OpenHandle(this)->length())->value();
}
@@ -2339,32 +2348,33 @@ int StackTrace::GetFrameCount() const {
Local<Array> StackTrace::AsArray() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackTrace::AsArray()")) Local<Array>();
ENTER_V8(isolate);
return Utils::ToLocal(Utils::OpenHandle(this));
}
-Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
+Local<StackTrace> StackTrace::CurrentStackTrace(
+ Isolate* isolate,
+ int frame_limit,
StackTraceOptions options) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StackTrace::CurrentStackTrace()")) {
- Local<StackTrace>();
- }
- ENTER_V8(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
i::Handle<i::JSArray> stackTrace =
- isolate->CaptureCurrentStackTrace(frame_limit, options);
+ i_isolate->CaptureCurrentStackTrace(frame_limit, options);
return Utils::StackTraceToLocal(stackTrace);
}
+Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
+ StackTraceOptions options) {
+ return CurrentStackTrace(Isolate::GetCurrent(), frame_limit, options);
+}
+
+
// --- S t a c k F r a m e ---
int StackFrame::GetLineNumber() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetLineNumber()")) {
- return Message::kNoLineNumberInfo;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2378,9 +2388,6 @@ int StackFrame::GetLineNumber() const {
int StackFrame::GetColumn() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetColumn()")) {
- return Message::kNoColumnInfo;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2394,9 +2401,6 @@ int StackFrame::GetColumn() const {
int StackFrame::GetScriptId() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptId()")) {
- return Message::kNoScriptIdInfo;
- }
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2410,55 +2414,45 @@ int StackFrame::GetScriptId() const {
Local<String> StackFrame::GetScriptName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptName()")) {
- return Local<String>();
- }
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "scriptName");
if (!name->IsString()) {
return Local<String>();
}
- return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
+ return scope.Escape(Local<String>::Cast(Utils::ToLocal(name)));
}
Local<String> StackFrame::GetScriptNameOrSourceURL() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptNameOrSourceURL()")) {
- return Local<String>();
- }
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "scriptNameOrSourceURL");
if (!name->IsString()) {
return Local<String>();
}
- return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
+ return scope.Escape(Local<String>::Cast(Utils::ToLocal(name)));
}
Local<String> StackFrame::GetFunctionName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::GetFunctionName()")) {
- return Local<String>();
- }
ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> name = GetProperty(self, "functionName");
if (!name->IsString()) {
return Local<String>();
}
- return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
+ return scope.Escape(Local<String>::Cast(Utils::ToLocal(name)));
}
bool StackFrame::IsEval() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::IsEval()")) return false;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2469,7 +2463,6 @@ bool StackFrame::IsEval() const {
bool StackFrame::IsConstructor() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::StackFrame::IsConstructor()")) return false;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -2504,9 +2497,6 @@ Local<Value> JSON::Parse(Local<String> json_string) {
// --- D a t a ---
bool Value::FullIsUndefined() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) {
- return false;
- }
bool result = Utils::OpenHandle(this)->IsUndefined();
ASSERT_EQ(result, QuickIsUndefined());
return result;
@@ -2514,7 +2504,6 @@ bool Value::FullIsUndefined() const {
bool Value::FullIsNull() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false;
bool result = Utils::OpenHandle(this)->IsNull();
ASSERT_EQ(result, QuickIsNull());
return result;
@@ -2522,27 +2511,21 @@ bool Value::FullIsNull() const {
bool Value::IsTrue() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsTrue()")) return false;
return Utils::OpenHandle(this)->IsTrue();
}
bool Value::IsFalse() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFalse()")) return false;
return Utils::OpenHandle(this)->IsFalse();
}
bool Value::IsFunction() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFunction()")) {
- return false;
- }
return Utils::OpenHandle(this)->IsJSFunction();
}
bool Value::FullIsString() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsString()")) return false;
bool result = Utils::OpenHandle(this)->IsString();
ASSERT_EQ(result, QuickIsString());
return result;
@@ -2550,20 +2533,16 @@ bool Value::FullIsString() const {
bool Value::IsSymbol() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsSymbol()")) return false;
return Utils::OpenHandle(this)->IsSymbol();
}
bool Value::IsArray() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArray()")) return false;
return Utils::OpenHandle(this)->IsJSArray();
}
bool Value::IsArrayBuffer() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()"))
- return false;
return Utils::OpenHandle(this)->IsJSArrayBuffer();
}
@@ -2574,8 +2553,6 @@ bool Value::IsArrayBufferView() const {
bool Value::IsTypedArray() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()"))
- return false;
return Utils::OpenHandle(this)->IsJSTypedArray();
}
@@ -2594,8 +2571,6 @@ F(Uint8ClampedArray, kExternalPixelArray)
#define VALUE_IS_TYPED_ARRAY(TypedArray, type_const) \
bool Value::Is##TypedArray() const { \
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::Is" #TypedArray "()")) \
- return false; \
i::Handle<i::Object> obj = Utils::OpenHandle(this); \
if (!obj->IsJSTypedArray()) return false; \
return i::JSTypedArray::cast(*obj)->type() == type_const; \
@@ -2612,35 +2587,26 @@ bool Value::IsDataView() const {
bool Value::IsObject() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false;
return Utils::OpenHandle(this)->IsJSObject();
}
bool Value::IsNumber() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNumber()")) return false;
return Utils::OpenHandle(this)->IsNumber();
}
bool Value::IsBoolean() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsBoolean()")) {
- return false;
- }
return Utils::OpenHandle(this)->IsBoolean();
}
bool Value::IsExternal() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
- return false;
- }
return Utils::OpenHandle(this)->IsExternal();
}
bool Value::IsInt32() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsInt32()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return true;
if (obj->IsNumber()) {
@@ -2657,7 +2623,6 @@ bool Value::IsInt32() const {
bool Value::IsUint32() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUint32()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
if (obj->IsNumber()) {
@@ -2675,7 +2640,6 @@ bool Value::IsUint32() const {
bool Value::IsDate() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsDate()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->HasSpecificClassOf(isolate->heap()->Date_string());
}
@@ -2683,7 +2647,6 @@ bool Value::IsDate() const {
bool Value::IsStringObject() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsStringObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->HasSpecificClassOf(isolate->heap()->String_string());
}
@@ -2693,7 +2656,6 @@ bool Value::IsSymbolObject() const {
// TODO(svenpanne): these and other test functions should be written such
// that they do not use Isolate::Current().
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsSymbolObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->HasSpecificClassOf(isolate->heap()->Symbol_string());
}
@@ -2701,7 +2663,6 @@ bool Value::IsSymbolObject() const {
bool Value::IsNumberObject() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsNumberObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->HasSpecificClassOf(isolate->heap()->Number_string());
}
@@ -2729,7 +2690,6 @@ static bool CheckConstructor(i::Isolate* isolate,
bool Value::IsNativeError() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsNativeError()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsJSObject()) {
i::Handle<i::JSObject> js_obj(i::JSObject::cast(*obj));
@@ -2748,14 +2708,12 @@ bool Value::IsNativeError() const {
bool Value::IsBooleanObject() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IsBooleanObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->HasSpecificClassOf(isolate->heap()->Boolean_string());
}
bool Value::IsRegExp() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsRegExp()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsJSRegExp();
}
@@ -2768,9 +2726,6 @@ Local<String> Value::ToString() const {
str = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToString()")) {
- return Local<String>();
- }
LOG_API(isolate, "ToString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2788,9 +2743,6 @@ Local<String> Value::ToDetailString() const {
str = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToDetailString()")) {
- return Local<String>();
- }
LOG_API(isolate, "ToDetailString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2808,9 +2760,6 @@ Local<v8::Object> Value::ToObject() const {
val = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToObject()")) {
- return Local<v8::Object>();
- }
LOG_API(isolate, "ToObject");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2827,9 +2776,6 @@ Local<Boolean> Value::ToBoolean() const {
return ToApiHandle<Boolean>(obj);
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToBoolean()")) {
- return Local<Boolean>();
- }
LOG_API(isolate, "ToBoolean");
ENTER_V8(isolate);
i::Handle<i::Object> val =
@@ -2846,9 +2792,6 @@ Local<Number> Value::ToNumber() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToNumber()")) {
- return Local<Number>();
- }
LOG_API(isolate, "ToNumber");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2866,7 +2809,6 @@ Local<Integer> Value::ToInteger() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToInteger()")) return Local<Integer>();
LOG_API(isolate, "ToInteger");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -2886,7 +2828,6 @@ void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
void External::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
ApiCheck(Utils::OpenHandle(that)->IsExternal(),
"v8::External::Cast()",
"Could not convert to external");
@@ -2894,7 +2835,6 @@ void External::CheckCast(v8::Value* that) {
void v8::Object::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Object::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSObject(),
"v8::Object::Cast()",
@@ -2903,7 +2843,6 @@ void v8::Object::CheckCast(Value* that) {
void v8::Function::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Function::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSFunction(),
"v8::Function::Cast()",
@@ -2912,7 +2851,6 @@ void v8::Function::CheckCast(Value* that) {
void v8::String::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::String::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsString(),
"v8::String::Cast()",
@@ -2921,7 +2859,6 @@ void v8::String::CheckCast(v8::Value* that) {
void v8::Symbol::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Symbol::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsSymbol(),
"v8::Symbol::Cast()",
@@ -2930,7 +2867,6 @@ void v8::Symbol::CheckCast(v8::Value* that) {
void v8::Number::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Number::Cast()",
@@ -2939,7 +2875,6 @@ void v8::Number::CheckCast(v8::Value* that) {
void v8::Integer::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Integer::Cast()",
@@ -2948,7 +2883,6 @@ void v8::Integer::CheckCast(v8::Value* that) {
void v8::Array::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Array::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArray(),
"v8::Array::Cast()",
@@ -2957,7 +2891,6 @@ void v8::Array::CheckCast(Value* that) {
void v8::ArrayBuffer::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::ArrayBuffer::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArrayBuffer(),
"v8::ArrayBuffer::Cast()",
@@ -2974,7 +2907,6 @@ void v8::ArrayBufferView::CheckCast(Value* that) {
void v8::TypedArray::CheckCast(Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::TypedArray::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSTypedArray(),
"v8::TypedArray::Cast()",
@@ -2984,8 +2916,6 @@ void v8::TypedArray::CheckCast(Value* that) {
#define CHECK_TYPED_ARRAY_CAST(ApiClass, typeConst) \
void v8::ApiClass::CheckCast(Value* that) { \
- if (IsDeadCheck(i::Isolate::Current(), "v8::" #ApiClass "::Cast()")) \
- return; \
i::Handle<i::Object> obj = Utils::OpenHandle(that); \
ApiCheck(obj->IsJSTypedArray() && \
i::JSTypedArray::cast(*obj)->type() == typeConst, \
@@ -3009,7 +2939,6 @@ void v8::DataView::CheckCast(Value* that) {
void v8::Date::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_string()),
"v8::Date::Cast()",
@@ -3019,7 +2948,6 @@ void v8::Date::CheckCast(v8::Value* that) {
void v8::StringObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StringObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_string()),
"v8::StringObject::Cast()",
@@ -3029,7 +2957,6 @@ void v8::StringObject::CheckCast(v8::Value* that) {
void v8::SymbolObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::SymbolObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
"v8::SymbolObject::Cast()",
@@ -3039,7 +2966,6 @@ void v8::SymbolObject::CheckCast(v8::Value* that) {
void v8::NumberObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::NumberObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_string()),
"v8::NumberObject::Cast()",
@@ -3049,7 +2975,6 @@ void v8::NumberObject::CheckCast(v8::Value* that) {
void v8::BooleanObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::BooleanObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
"v8::BooleanObject::Cast()",
@@ -3058,7 +2983,6 @@ void v8::BooleanObject::CheckCast(v8::Value* that) {
void v8::RegExp::CheckCast(v8::Value* that) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSRegExp(),
"v8::RegExp::Cast()",
@@ -3078,9 +3002,6 @@ double Value::NumberValue() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::NumberValue()")) {
- return i::OS::nan_value();
- }
LOG_API(isolate, "NumberValue");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3098,7 +3019,6 @@ int64_t Value::IntegerValue() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::IntegerValue()")) return 0;
LOG_API(isolate, "IntegerValue");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3120,7 +3040,6 @@ Local<Int32> Value::ToInt32() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToInt32()")) return Local<Int32>();
LOG_API(isolate, "ToInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3138,7 +3057,6 @@ Local<Uint32> Value::ToUint32() const {
num = obj;
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToUint32()")) return Local<Uint32>();
LOG_API(isolate, "ToUInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3156,7 +3074,6 @@ Local<Uint32> Value::ToArrayIndex() const {
return Local<Uint32>();
}
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::ToArrayIndex()")) return Local<Uint32>();
LOG_API(isolate, "ToArrayIndex");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3184,7 +3101,6 @@ int32_t Value::Int32Value() const {
return i::Smi::cast(*obj)->value();
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Int32Value()")) return 0;
LOG_API(isolate, "Int32Value (slow)");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3202,9 +3118,8 @@ int32_t Value::Int32Value() const {
bool Value::Equals(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Equals()")
- || EmptyCheck("v8::Value::Equals()", this)
- || EmptyCheck("v8::Value::Equals()", that)) {
+ if (EmptyCheck("v8::Value::Equals()", this) ||
+ EmptyCheck("v8::Value::Equals()", that)) {
return false;
}
LOG_API(isolate, "Equals");
@@ -3229,9 +3144,8 @@ bool Value::Equals(Handle<Value> that) const {
bool Value::StrictEquals(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::StrictEquals()")
- || EmptyCheck("v8::Value::StrictEquals()", this)
- || EmptyCheck("v8::Value::StrictEquals()", that)) {
+ if (EmptyCheck("v8::Value::StrictEquals()", this) ||
+ EmptyCheck("v8::Value::StrictEquals()", that)) {
return false;
}
LOG_API(isolate, "StrictEquals");
@@ -3259,13 +3173,25 @@ bool Value::StrictEquals(Handle<Value> that) const {
}
+bool Value::SameValue(Handle<Value> that) const {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (EmptyCheck("v8::Value::SameValue()", this) ||
+ EmptyCheck("v8::Value::SameValue()", that)) {
+ return false;
+ }
+ LOG_API(isolate, "SameValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> other = Utils::OpenHandle(*that);
+ return obj->SameValue(*other);
+}
+
+
uint32_t Value::Uint32Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
} else {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Value::Uint32Value()")) return 0;
LOG_API(isolate, "Uint32Value");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
@@ -3291,7 +3217,7 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::SetProperty(
+ i::Handle<i::Object> obj = i::Runtime::SetObjectProperty(
isolate,
self,
key_obj,
@@ -3346,6 +3272,12 @@ bool v8::Object::ForceSet(v8::Handle<Value> key,
}
+bool v8::Object::SetPrivate(v8::Handle<Private> key, v8::Handle<Value> value) {
+ return Set(v8::Handle<Value>(reinterpret_cast<Value*>(*key)),
+ value, DontEnum);
+}
+
+
bool v8::Object::ForceDelete(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::ForceDelete()", return false);
@@ -3397,6 +3329,11 @@ Local<Value> v8::Object::Get(uint32_t index) {
}
+Local<Value> v8::Object::GetPrivate(v8::Handle<Private> key) {
+ return Get(v8::Handle<Value>(reinterpret_cast<Value*>(*key)));
+}
+
+
PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::GetPropertyAttribute()",
@@ -3454,7 +3391,7 @@ Local<Object> v8::Object::FindInstanceInPrototypeChain(
ENTER_V8(isolate);
i::JSObject* object = *Utils::OpenHandle(this);
i::FunctionTemplateInfo* tmpl_info = *Utils::OpenHandle(*tmpl);
- while (!object->IsInstanceOf(tmpl_info)) {
+ while (!tmpl_info->IsTemplateFor(object)) {
i::Object* prototype = object->GetPrototype();
if (!prototype->IsJSObject()) return Local<Object>();
object = i::JSObject::cast(prototype);
@@ -3506,13 +3443,14 @@ Local<Array> v8::Object::GetOwnPropertyNames() {
Local<String> v8::Object::ObjectProtoToString() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::ObjectProtoToString()",
+ i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
+ Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
+ ON_BAILOUT(i_isolate, "v8::Object::ObjectProtoToString()",
return Local<v8::String>());
- ENTER_V8(isolate);
+ ENTER_V8(i_isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name(self->class_name(), isolate);
+ i::Handle<i::Object> name(self->class_name(), i_isolate);
// Native implementation of Object.prototype.toString (v8natives.js):
// var c = %_ClassOf(this);
@@ -3520,13 +3458,11 @@ Local<String> v8::Object::ObjectProtoToString() {
// return "[object " + c + "]";
if (!name->IsString()) {
- return v8::String::New("[object ]");
-
+ return v8::String::NewFromUtf8(isolate, "[object ]");
} else {
i::Handle<i::String> class_name = i::Handle<i::String>::cast(name);
if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Arguments"))) {
- return v8::String::New("[object Object]");
-
+ return v8::String::NewFromUtf8(isolate, "[object Object]");
} else {
const char* prefix = "[object ";
Local<String> str = Utils::ToLocal(class_name);
@@ -3552,7 +3488,8 @@ Local<String> v8::Object::ObjectProtoToString() {
i::OS::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
// Copy the buffer into a heap-allocated string and return it.
- Local<String> result = v8::String::New(buf.start(), buf_len);
+ Local<String> result = v8::String::NewFromUtf8(
+ isolate, buf.start(), String::kNormalString, buf_len);
return result;
}
}
@@ -3596,6 +3533,11 @@ bool v8::Object::Delete(v8::Handle<Value> key) {
}
+bool v8::Object::DeletePrivate(v8::Handle<Private> key) {
+ return Delete(v8::Handle<Value>(reinterpret_cast<Value*>(*key)));
+}
+
+
bool v8::Object::Has(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::Has()", return false);
@@ -3610,6 +3552,11 @@ bool v8::Object::Has(v8::Handle<Value> key) {
}
+bool v8::Object::HasPrivate(v8::Handle<Private> key) {
+ return Has(v8::Handle<Value>(reinterpret_cast<Value*>(*key)));
+}
+
+
bool v8::Object::Delete(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::DeleteProperty()",
@@ -3625,7 +3572,7 @@ bool v8::Object::Has(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasProperty()", return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return self->HasElement(index);
+ return i::JSReceiver::HasElement(self, index);
}
@@ -3679,8 +3626,8 @@ bool v8::Object::HasOwnProperty(Handle<String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
return false);
- return Utils::OpenHandle(this)->HasLocalProperty(
- *Utils::OpenHandle(*key));
+ return i::JSReceiver::HasLocalProperty(
+ Utils::OpenHandle(this), Utils::OpenHandle(*key));
}
@@ -3688,9 +3635,8 @@ bool v8::Object::HasRealNamedProperty(Handle<String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
return false);
- return Utils::OpenHandle(this)->HasRealNamedProperty(
- isolate,
- *Utils::OpenHandle(*key));
+ return i::JSObject::HasRealNamedProperty(Utils::OpenHandle(this),
+ Utils::OpenHandle(*key));
}
@@ -3698,7 +3644,7 @@ bool v8::Object::HasRealIndexedProperty(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasRealIndexedProperty()",
return false);
- return Utils::OpenHandle(this)->HasRealElementProperty(isolate, index);
+ return i::JSObject::HasRealElementProperty(Utils::OpenHandle(this), index);
}
@@ -3708,9 +3654,8 @@ bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
"v8::Object::HasRealNamedCallbackProperty()",
return false);
ENTER_V8(isolate);
- return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
- isolate,
- *Utils::OpenHandle(*key));
+ return i::JSObject::HasRealNamedCallbackProperty(Utils::OpenHandle(this),
+ Utils::OpenHandle(*key));
}
@@ -3813,7 +3758,7 @@ Local<v8::Object> v8::Object::Clone() {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::JSObject> result = i::Copy(self);
+ i::Handle<i::JSObject> result = i::JSObject::Copy(self);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(result);
@@ -3852,7 +3797,8 @@ int v8::Object::GetIdentityHash() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::JSObject::GetIdentityHash(self);
+ return i::Handle<i::Smi>::cast(
+ i::JSReceiver::GetOrCreateIdentityHash(self))->value();
}
@@ -4108,12 +4054,11 @@ bool v8::Object::IsCallable() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (obj->IsJSFunction()) return true;
- return i::Execution::GetFunctionDelegate(isolate, obj)->IsJSFunction();
+ return obj->IsCallable();
}
-Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
+Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Value> recv,
int argc,
v8::Handle<v8::Value> argv[]) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -4141,7 +4086,7 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
}
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned = i::Execution::Call(
- isolate, fun, recv_obj, argc, args, &has_pending_exception);
+ isolate, fun, recv_obj, argc, args, &has_pending_exception, true);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
return Utils::ToLocal(scope.CloseAndEscape(returned));
}
@@ -4213,7 +4158,7 @@ Local<v8::Object> Function::NewInstance(int argc,
ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -4221,11 +4166,11 @@ Local<v8::Object> Function::NewInstance(int argc,
i::Handle<i::Object> returned =
i::Execution::New(function, argc, args, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
- return scope.Close(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
+ return scope.Escape(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
}
-Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
+Local<v8::Value> Function::Call(v8::Handle<v8::Value> recv, int argc,
v8::Handle<v8::Value> argv[]) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
@@ -4242,7 +4187,7 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned = i::Execution::Call(
- isolate, fun, recv_obj, argc, args, &has_pending_exception);
+ isolate, fun, recv_obj, argc, args, &has_pending_exception, true);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
raw_result = *returned;
}
@@ -4274,6 +4219,29 @@ Handle<Value> Function::GetInferredName() const {
}
+Handle<Value> Function::GetDisplayName() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Function::GetDisplayName()",
+ return ToApiHandle<Primitive>(
+ isolate->factory()->undefined_value()));
+ ENTER_V8(isolate);
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ i::Handle<i::String> property_name =
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("displayName"));
+ i::LookupResult lookup(isolate);
+ func->LookupRealNamedProperty(*property_name, &lookup);
+ if (lookup.IsFound()) {
+ i::Object* value = lookup.GetLazyValue();
+ if (value && value->IsString()) {
+ i::String* name = i::String::cast(value);
+ if (name->length() > 0) return Utils::ToLocal(i::Handle<i::String>(name));
+ }
+ }
+ return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
+}
+
+
ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
@@ -4312,12 +4280,20 @@ int Function::GetScriptColumnNumber() const {
}
+bool Function::IsBuiltin() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ return func->IsBuiltin();
+}
+
+
Handle<Value> Function::GetScriptId() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (!func->shared()->script()->IsScript())
- return v8::Undefined();
+ i::Isolate* isolate = func->GetIsolate();
+ if (!func->shared()->script()->IsScript()) {
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return Utils::ToLocal(i::Handle<i::Object>(script->id(), func->GetIsolate()));
+ return Utils::ToLocal(i::Handle<i::Object>(script->id(), isolate));
}
@@ -4331,16 +4307,12 @@ int Function::ScriptId() const {
int String::Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
return str->length();
}
bool String::IsOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsOneByte()")) {
- return false;
- }
return str->HasOnlyOneByteChars();
}
@@ -4456,10 +4428,6 @@ class ContainsOnlyOneByteHelper {
bool String::ContainsOnlyOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(),
- "v8::String::ContainsOnlyOneByte()")) {
- return false;
- }
if (str->HasOnlyOneByteChars()) return true;
ContainsOnlyOneByteHelper helper;
return helper.Check(*str);
@@ -4663,7 +4631,6 @@ static int Utf8Length(i::String* str, i::Isolate* isolate) {
int String::Utf8Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
i::Isolate* isolate = str->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::Utf8Length()")) return 0;
return v8::Utf8Length(*str, isolate);
}
@@ -4849,7 +4816,6 @@ int String::WriteUtf8(char* buffer,
int* nchars_ref,
int options) const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
LOG_API(isolate, "String::WriteUtf8");
ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(this);
@@ -4894,40 +4860,6 @@ int String::WriteUtf8(char* buffer,
}
-int String::WriteAscii(char* buffer,
- int start,
- int length,
- int options) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0;
- LOG_API(isolate, "String::WriteAscii");
- ENTER_V8(isolate);
- ASSERT(start >= 0 && length >= -1);
- i::Handle<i::String> str = Utils::OpenHandle(this);
- isolate->string_tracker()->RecordWrite(str);
- if (options & HINT_MANY_WRITES_EXPECTED) {
- FlattenString(str); // Flatten the string for efficiency.
- }
-
- int end = length;
- if ((length == -1) || (length > str->length() - start)) {
- end = str->length() - start;
- }
- if (end < 0) return 0;
- i::StringCharacterStream write_stream(*str, isolate->write_iterator(), start);
- int i;
- for (i = 0; i < end; i++) {
- char c = static_cast<char>(write_stream.GetNext());
- if (c == '\0' && !(options & PRESERVE_ASCII_NULL)) c = ' ';
- buffer[i] = c;
- }
- if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length)) {
- buffer[i] = '\0';
- }
- return i;
-}
-
-
template<typename CharType>
static inline int WriteHelper(const String* string,
CharType* buffer,
@@ -4935,7 +4867,6 @@ static inline int WriteHelper(const String* string,
int length,
int options) {
i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::Write()")) return 0;
LOG_API(isolate, "String::Write");
ENTER_V8(isolate);
ASSERT(start >= 0 && length >= -1);
@@ -4977,9 +4908,6 @@ int String::Write(uint16_t* buffer,
bool v8::String::IsExternal() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternal()")) {
- return false;
- }
EnsureInitializedForIsolate(str->GetIsolate(), "v8::String::IsExternal()");
return i::StringShape(*str).IsExternalTwoByte();
}
@@ -4987,9 +4915,6 @@ bool v8::String::IsExternal() const {
bool v8::String::IsExternalAscii() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternalAscii()")) {
- return false;
- }
return i::StringShape(*str).IsExternalAscii();
}
@@ -5035,10 +4960,6 @@ void v8::String::VerifyExternalStringResourceBase(
const v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(),
- "v8::String::GetExternalAsciiStringResource()")) {
- return NULL;
- }
if (i::StringShape(*str).IsExternalAscii()) {
const void* resource =
i::Handle<i::ExternalAsciiString>::cast(str)->resource();
@@ -5050,30 +4971,30 @@ const v8::String::ExternalAsciiStringResource*
Local<Value> Symbol::Name() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Symbol::Name()"))
- return Local<Value>();
i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
i::Handle<i::Object> name(sym->name(), sym->GetIsolate());
return Utils::ToLocal(name);
}
+Local<Value> Private::Name() const {
+ return reinterpret_cast<const Symbol*>(this)->Name();
+}
+
+
double Number::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->Number();
}
bool Boolean::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Boolean::Value()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsTrue();
}
int64_t Integer::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
@@ -5084,7 +5005,6 @@ int64_t Integer::Value() const {
int32_t Int32::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Int32::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
@@ -5095,7 +5015,6 @@ int32_t Int32::Value() const {
uint32_t Uint32::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Uint32::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
@@ -5107,9 +5026,6 @@ uint32_t Uint32::Value() const {
int v8::Object::InternalFieldCount() {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (IsDeadCheck(obj->GetIsolate(), "v8::Object::InternalFieldCount()")) {
- return 0;
- }
return obj->GetInternalFieldCount();
}
@@ -5117,10 +5033,9 @@ int v8::Object::InternalFieldCount() {
static bool InternalFieldOK(i::Handle<i::JSObject> obj,
int index,
const char* location) {
- return !IsDeadCheck(obj->GetIsolate(), location) &&
- ApiCheck(index < obj->GetInternalFieldCount(),
- location,
- "Internal field out of bounds");
+ return ApiCheck(index < obj->GetInternalFieldCount(),
+ location,
+ "Internal field out of bounds");
}
@@ -5171,6 +5086,24 @@ static void* ExternalValue(i::Object* obj) {
// --- E n v i r o n m e n t ---
+void v8::V8::InitializePlatform(Platform* platform) {
+#ifdef V8_USE_DEFAULT_PLATFORM
+ FATAL("Can't override v8::Platform when using default implementation");
+#else
+ i::V8::InitializePlatform(platform);
+#endif
+}
+
+
+void v8::V8::ShutdownPlatform() {
+#ifdef V8_USE_DEFAULT_PLATFORM
+ FATAL("Can't override v8::Platform when using default implementation");
+#else
+ i::V8::ShutdownPlatform();
+#endif
+}
+
+
bool v8::V8::Initialize() {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
if (isolate != NULL && isolate->IsInitialized()) {
@@ -5191,11 +5124,6 @@ void v8::V8::SetReturnAddressLocationResolver(
}
-bool v8::V8::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
- return SetFunctionEntryHook(Isolate::GetCurrent(), entry_hook);
-}
-
-
bool v8::V8::SetFunctionEntryHook(Isolate* ext_isolate,
FunctionEntryHook entry_hook) {
ASSERT(ext_isolate != NULL);
@@ -5256,25 +5184,8 @@ HeapStatistics::HeapStatistics(): total_heap_size_(0),
heap_size_limit_(0) { }
-void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
- i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized()) {
- // Isolate is unitialized thus heap is not configured yet.
- heap_statistics->total_heap_size_ = 0;
- heap_statistics->total_heap_size_executable_ = 0;
- heap_statistics->total_physical_size_ = 0;
- heap_statistics->used_heap_size_ = 0;
- heap_statistics->heap_size_limit_ = 0;
- return;
- }
- Isolate* ext_isolate = reinterpret_cast<Isolate*>(isolate);
- return ext_isolate->GetHeapStatistics(heap_statistics);
-}
-
-
void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::V8::VisitExternalResources");
isolate->heap()->VisitExternalResources(visitor);
}
@@ -5298,8 +5209,6 @@ class VisitorAdapter : public i::ObjectVisitor {
void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId");
-
i::DisallowHeapAllocation no_allocation;
VisitorAdapter visitor_adapter(visitor);
@@ -5311,8 +5220,6 @@ void v8::V8::VisitHandlesForPartialDependence(
Isolate* exported_isolate, PersistentHandleVisitor* visitor) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
ASSERT(isolate == i::Isolate::Current());
- IsDeadCheck(isolate, "v8::V8::VisitHandlesForPartialDependence");
-
i::DisallowHeapAllocation no_allocation;
VisitorAdapter visitor_adapter(visitor);
@@ -5423,7 +5330,6 @@ Local<Context> v8::Context::New(
v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
- i::Isolate::EnsureDefaultIsolate();
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
EnsureInitializedForIsolate(isolate, "v8::Context::New()");
LOG_API(isolate, "Context::New");
@@ -5438,9 +5344,6 @@ Local<Context> v8::Context::New(
void v8::Context::SetSecurityToken(Handle<Value> token) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::SetSecurityToken()")) {
- return;
- }
ENTER_V8(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
@@ -5450,10 +5353,6 @@ void v8::Context::SetSecurityToken(Handle<Value> token) {
void v8::Context::UseDefaultSecurityToken() {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::UseDefaultSecurityToken()")) {
- return;
- }
ENTER_V8(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
env->set_security_token(env->global_object());
@@ -5462,9 +5361,6 @@ void v8::Context::UseDefaultSecurityToken() {
Handle<Value> v8::Context::GetSecurityToken() {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetSecurityToken()")) {
- return Handle<Value>();
- }
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Object* security_token = env->security_token();
i::Handle<i::Object> token_handle(security_token, isolate);
@@ -5494,112 +5390,62 @@ v8::Local<v8::Context> Context::GetEntered() {
if (!EnsureInitializedForIsolate(isolate, "v8::Context::GetEntered()")) {
return Local<Context>();
}
- i::Handle<i::Object> last =
- isolate->handle_scope_implementer()->LastEnteredContext();
- if (last.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(last);
- return Utils::ToLocal(context);
+ return reinterpret_cast<Isolate*>(isolate)->GetEnteredContext();
}
v8::Local<v8::Context> Context::GetCurrent() {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCurrent()")) {
- return Local<Context>();
- }
return reinterpret_cast<Isolate*>(isolate)->GetCurrentContext();
}
v8::Local<v8::Context> Context::GetCalling() {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::GetCalling()")) {
- return Local<Context>();
- }
- i::Handle<i::Object> calling =
- isolate->GetCallingNativeContext();
- if (calling.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
- return Utils::ToLocal(context);
+ return reinterpret_cast<Isolate*>(isolate)->GetCallingContext();
}
v8::Local<v8::Object> Context::Global() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::Global()")) {
- return Local<v8::Object>();
- }
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
i::Handle<i::Object> global(context->global_proxy(), isolate);
+ // TODO(dcarney): This should always return the global proxy
+ // but can't presently as calls to GetProtoype will return the wrong result.
+ if (i::Handle<i::JSGlobalProxy>::cast(
+ global)->IsDetachedFrom(context->global_object())) {
+ global = i::Handle<i::Object>(context->global_object(), isolate);
+ }
return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
}
void Context::DetachGlobal() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::DetachGlobal()")) return;
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
isolate->bootstrapper()->DetachGlobal(context);
}
-void Context::ReattachGlobal(Handle<Object> global_object) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::ReattachGlobal()")) return;
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Handle<i::JSGlobalProxy> global_proxy =
- i::Handle<i::JSGlobalProxy>::cast(Utils::OpenHandle(*global_object));
- isolate->bootstrapper()->ReattachGlobal(context, global_proxy);
-}
-
-
void Context::AllowCodeGenerationFromStrings(bool allow) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::AllowCodeGenerationFromStrings()")) {
- return;
- }
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
context->set_allow_code_gen_from_strings(
allow ? isolate->heap()->true_value() : isolate->heap()->false_value());
}
bool Context::IsCodeGenerationFromStringsAllowed() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::IsCodeGenerationFromStringsAllowed()")) {
- return false;
- }
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
return !context->allow_code_gen_from_strings()->IsFalse();
}
void Context::SetErrorMessageForCodeGenerationFromStrings(
Handle<String> error) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::SetErrorMessageForCodeGenerationFromStrings()")) {
- return;
- }
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Handle<i::String> error_handle = Utils::OpenHandle(*error);
context->set_error_message_for_code_gen_from_strings(*error_handle);
}
@@ -5639,23 +5485,27 @@ bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
ON_BAILOUT(i::Isolate::Current(), "v8::FunctionTemplate::HasInstanceOf()",
return false);
i::Object* obj = *Utils::OpenHandle(*value);
- return obj->IsInstanceOf(*Utils::OpenHandle(this));
+ return Utils::OpenHandle(this)->IsTemplateFor(obj);
}
-Local<External> v8::External::New(void* value) {
+Local<External> v8::External::New(Isolate* isolate, void* value) {
STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::External::New()");
- LOG_API(isolate, "External::New");
- ENTER_V8(isolate);
- i::Handle<i::JSObject> external = isolate->factory()->NewExternal(value);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::External::New()");
+ LOG_API(i_isolate, "External::New");
+ ENTER_V8(i_isolate);
+ i::Handle<i::JSObject> external = i_isolate->factory()->NewExternal(value);
return Utils::ExternalToLocal(external);
}
+Local<External> v8::External::New(void* value) {
+ return v8::External::New(Isolate::GetCurrent(), value);
+}
+
+
void* External::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return NULL;
return ExternalValue(*Utils::OpenHandle(this));
}
@@ -5833,22 +5683,28 @@ bool RedirectToExternalString(i::Isolate* isolate,
Local<String> v8::String::NewExternal(
+ Isolate* isolate,
v8::String::ExternalStringResource* resource) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
- LOG_API(isolate, "String::NewExternal");
- ENTER_V8(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::String::NewExternal()");
+ LOG_API(i_isolate, "String::NewExternal");
+ ENTER_V8(i_isolate);
CHECK(resource && resource->data());
- i::Handle<i::String> result = NewExternalStringHandle(isolate, resource);
- isolate->heap()->external_string_table()->AddString(*result);
+ i::Handle<i::String> result = NewExternalStringHandle(i_isolate, resource);
+ i_isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
}
+Local<String> v8::String::NewExternal(
+ v8::String::ExternalStringResource* resource) {
+ return NewExternal(Isolate::GetCurrent(), resource);
+}
+
+
bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
if (i::StringShape(*obj).IsExternalTwoByte()) {
return false; // Already an external string.
}
@@ -5883,23 +5739,30 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
Local<String> v8::String::NewExternal(
+ Isolate* isolate,
v8::String::ExternalAsciiStringResource* resource) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
- LOG_API(isolate, "String::NewExternal");
- ENTER_V8(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::String::NewExternal()");
+ LOG_API(i_isolate, "String::NewExternal");
+ ENTER_V8(i_isolate);
CHECK(resource && resource->data());
- i::Handle<i::String> result = NewExternalAsciiStringHandle(isolate, resource);
- isolate->heap()->external_string_table()->AddString(*result);
+ i::Handle<i::String> result =
+ NewExternalAsciiStringHandle(i_isolate, resource);
+ i_isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
}
+Local<String> v8::String::NewExternal(
+ v8::String::ExternalAsciiStringResource* resource) {
+ return NewExternal(Isolate::GetCurrent(), resource);
+}
+
+
bool v8::String::MakeExternal(
v8::String::ExternalAsciiStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false;
if (i::StringShape(*obj).IsExternalTwoByte()) {
return false; // Already an external string.
}
@@ -5937,7 +5800,12 @@ bool v8::String::CanMakeExternal() {
if (!internal::FLAG_clever_optimizations) return false;
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
+
+ // TODO(yangguo): Externalizing sliced/cons strings allocates.
+ // This rule can be removed when all code that can
+ // trigger an access check is handlified and therefore GC safe.
+ if (isolate->heap()->old_pointer_space()->Contains(*obj)) return false;
+
if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false;
int size = obj->Size(); // Byte size of the original string.
if (size < i::ExternalString::kShortSize) return false;
@@ -5946,31 +5814,40 @@ bool v8::String::CanMakeExternal() {
}
-Local<v8::Object> v8::Object::New() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Object::New()");
- LOG_API(isolate, "Object::New");
- ENTER_V8(isolate);
+Local<v8::Object> v8::Object::New(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::Object::New()");
+ LOG_API(i_isolate, "Object::New");
+ ENTER_V8(i_isolate);
i::Handle<i::JSObject> obj =
- isolate->factory()->NewJSObject(isolate->object_function());
+ i_isolate->factory()->NewJSObject(i_isolate->object_function());
return Utils::ToLocal(obj);
}
-Local<v8::Value> v8::NumberObject::New(double value) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::NumberObject::New()");
- LOG_API(isolate, "NumberObject::New");
- ENTER_V8(isolate);
- i::Handle<i::Object> number = isolate->factory()->NewNumber(value);
- i::Handle<i::Object> obj = isolate->factory()->ToObject(number);
+Local<v8::Object> v8::Object::New() {
+ return New(Isolate::GetCurrent());
+}
+
+
+Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::NumberObject::New()");
+ LOG_API(i_isolate, "NumberObject::New");
+ ENTER_V8(i_isolate);
+ i::Handle<i::Object> number = i_isolate->factory()->NewNumber(value);
+ i::Handle<i::Object> obj = i_isolate->factory()->ToObject(number);
return Utils::ToLocal(obj);
}
+Local<v8::Value> v8::NumberObject::New(double value) {
+ return New(Isolate::GetCurrent(), value);
+}
+
+
double v8::NumberObject::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::NumberObject::NumberValue()")) return 0;
LOG_API(isolate, "NumberObject::NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -5994,7 +5871,6 @@ Local<v8::Value> v8::BooleanObject::New(bool value) {
bool v8::BooleanObject::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::BooleanObject::BooleanValue()")) return 0;
LOG_API(isolate, "BooleanObject::BooleanValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -6015,9 +5891,6 @@ Local<v8::Value> v8::StringObject::New(Handle<String> value) {
Local<v8::String> v8::StringObject::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::StringObject::StringValue()")) {
- return Local<v8::String>();
- }
LOG_API(isolate, "StringObject::StringValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -6039,8 +5912,6 @@ Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Handle<Symbol> value) {
Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::SymbolObject::SymbolValue()"))
- return Local<v8::Symbol>();
LOG_API(isolate, "SymbolObject::SymbolValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -6049,26 +5920,30 @@ Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
}
-Local<v8::Value> v8::Date::New(double time) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Date::New()");
- LOG_API(isolate, "Date::New");
+Local<v8::Value> v8::Date::New(Isolate* isolate, double time) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::Date::New()");
+ LOG_API(i_isolate, "Date::New");
if (std::isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
time = i::OS::nan_value();
}
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
+ ENTER_V8(i_isolate);
+ EXCEPTION_PREAMBLE(i_isolate);
i::Handle<i::Object> obj =
- i::Execution::NewDate(isolate, time, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Value>());
+ i::Execution::NewDate(i_isolate, time, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(i_isolate, Local<v8::Value>());
return Utils::ToLocal(obj);
}
+Local<v8::Value> v8::Date::New(double time) {
+ return New(Isolate::GetCurrent(), time);
+}
+
+
double v8::Date::ValueOf() const {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Date::NumberValue()")) return 0;
LOG_API(isolate, "Date::NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
@@ -6076,22 +5951,22 @@ double v8::Date::ValueOf() const {
}
-void v8::Date::DateTimeConfigurationChangeNotification() {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Date::DateTimeConfigurationChangeNotification()",
+void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ON_BAILOUT(i_isolate, "v8::Date::DateTimeConfigurationChangeNotification()",
return);
- LOG_API(isolate, "Date::DateTimeConfigurationChangeNotification");
- ENTER_V8(isolate);
+ LOG_API(i_isolate, "Date::DateTimeConfigurationChangeNotification");
+ ENTER_V8(i_isolate);
- isolate->date_cache()->ResetDateCache();
+ i_isolate->date_cache()->ResetDateCache();
- i::HandleScope scope(isolate);
+ i::HandleScope scope(i_isolate);
// Get the function ResetDateCache (defined in date.js).
i::Handle<i::String> func_name_str =
- isolate->factory()->InternalizeOneByteString(
+ i_isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("ResetDateCache"));
i::MaybeObject* result =
- isolate->js_builtins_object()->GetProperty(*func_name_str);
+ i_isolate->js_builtins_object()->GetProperty(*func_name_str);
i::Object* object_func;
if (!result->ToObject(&object_func)) {
return;
@@ -6104,7 +5979,7 @@ void v8::Date::DateTimeConfigurationChangeNotification() {
// Call ResetDateCache(0 but expect no exceptions:
bool caught_exception = false;
i::Execution::TryCall(func,
- isolate->js_builtins_object(),
+ i_isolate->js_builtins_object(),
0,
NULL,
&caught_exception);
@@ -6112,6 +5987,11 @@ void v8::Date::DateTimeConfigurationChangeNotification() {
}
+void v8::Date::DateTimeConfigurationChangeNotification() {
+ DateTimeConfigurationChangeNotification(Isolate::GetCurrent());
+}
+
+
static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
i::Isolate* isolate = i::Isolate::Current();
uint8_t flags_buf[3];
@@ -6142,10 +6022,6 @@ Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern,
Local<v8::String> v8::RegExp::GetSource() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::RegExp::GetSource()")) {
- return Local<v8::String>();
- }
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
return Utils::ToLocal(i::Handle<i::String>(obj->Pattern()));
}
@@ -6162,31 +6038,31 @@ REGEXP_FLAG_ASSERT_EQ(kMultiline, MULTILINE);
#undef REGEXP_FLAG_ASSERT_EQ
v8::RegExp::Flags v8::RegExp::GetFlags() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::GetFlags()")) {
- return v8::RegExp::kNone;
- }
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
return static_cast<RegExp::Flags>(obj->GetFlags().value());
}
-Local<v8::Array> v8::Array::New(int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Array::New()");
- LOG_API(isolate, "Array::New");
- ENTER_V8(isolate);
+Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::Array::New()");
+ LOG_API(i_isolate, "Array::New");
+ ENTER_V8(i_isolate);
int real_length = length > 0 ? length : 0;
- i::Handle<i::JSArray> obj = isolate->factory()->NewJSArray(real_length);
+ i::Handle<i::JSArray> obj = i_isolate->factory()->NewJSArray(real_length);
i::Handle<i::Object> length_obj =
- isolate->factory()->NewNumberFromInt(real_length);
+ i_isolate->factory()->NewNumberFromInt(real_length);
obj->set_length(*length_obj);
return Utils::ToLocal(obj);
}
+Local<v8::Array> v8::Array::New(int length) {
+ return New(Isolate::GetCurrent(), length);
+}
+
+
uint32_t v8::Array::Length() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Array::Length()")) return 0;
i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
i::Object* length = obj->length();
if (length->IsSmi()) {
@@ -6212,7 +6088,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
EXCEPTION_PREAMBLE(isolate);
ENTER_V8(isolate);
- i::Handle<i::JSObject> result = i::Copy(paragon_handle);
+ i::Handle<i::JSObject> result = i::JSObject::Copy(paragon_handle);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(result);
@@ -6264,37 +6140,46 @@ void v8::ArrayBuffer::Neuter() {
size_t v8::ArrayBuffer::ByteLength() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0;
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_length()->Number());
}
-Local<ArrayBuffer> v8::ArrayBuffer::New(size_t byte_length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::ArrayBuffer::New(size_t)");
- LOG_API(isolate, "v8::ArrayBuffer::New(size_t)");
- ENTER_V8(isolate);
+Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::ArrayBuffer::New(size_t)");
+ LOG_API(i_isolate, "v8::ArrayBuffer::New(size_t)");
+ ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
- isolate->factory()->NewJSArrayBuffer();
- i::Runtime::SetupArrayBufferAllocatingData(isolate, obj, byte_length);
+ i_isolate->factory()->NewJSArrayBuffer();
+ i::Runtime::SetupArrayBufferAllocatingData(i_isolate, obj, byte_length);
return Utils::ToLocal(obj);
}
-Local<ArrayBuffer> v8::ArrayBuffer::New(void* data, size_t byte_length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::ArrayBuffer::New(void*, size_t)");
- LOG_API(isolate, "v8::ArrayBuffer::New(void*, size_t)");
- ENTER_V8(isolate);
+Local<ArrayBuffer> v8::ArrayBuffer::New(size_t byte_length) {
+ return New(Isolate::GetCurrent(), byte_length);
+}
+
+
+Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
+ size_t byte_length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
+ LOG_API(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
+ ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
- isolate->factory()->NewJSArrayBuffer();
- i::Runtime::SetupArrayBuffer(isolate, obj, true, data, byte_length);
+ i_isolate->factory()->NewJSArrayBuffer();
+ i::Runtime::SetupArrayBuffer(i_isolate, obj, true, data, byte_length);
return Utils::ToLocal(obj);
}
+Local<ArrayBuffer> v8::ArrayBuffer::New(void* data, size_t byte_length) {
+ return New(Isolate::GetCurrent(), data, byte_length);
+}
+
+
Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
ASSERT(obj->buffer()->IsJSArrayBuffer());
@@ -6315,18 +6200,7 @@ size_t v8::ArrayBufferView::ByteLength() {
}
-void* v8::ArrayBufferView::BaseAddress() {
- i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
- i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
- void* buffer_data = buffer->backing_store();
- size_t byte_offset = static_cast<size_t>(obj->byte_offset()->Number());
- return static_cast<uint8_t*>(buffer_data) + byte_offset;
-}
-
-
size_t v8::TypedArray::Length() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::TypedArray::Length()")) return 0;
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->length()->Number());
}
@@ -6367,8 +6241,10 @@ i::Handle<i::JSTypedArray> NewTypedArray(
ASSERT(byte_offset % sizeof(ElementType) == 0);
+ CHECK(length <= (std::numeric_limits<size_t>::max() / sizeof(ElementType)));
+ size_t byte_length = length * sizeof(ElementType);
SetupArrayBufferView(
- isolate, obj, buffer, byte_offset, length * sizeof(ElementType));
+ isolate, obj, buffer, byte_offset, byte_length);
i::Handle<i::Object> length_object =
isolate->factory()->NewNumberFromSize(length);
@@ -6435,27 +6311,37 @@ Local<DataView> DataView::New(Handle<ArrayBuffer> array_buffer,
}
-Local<Symbol> v8::Symbol::New(Isolate* isolate) {
+Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
LOG_API(i_isolate, "Symbol::New()");
ENTER_V8(i_isolate);
i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
+ if (data != NULL) {
+ if (length == -1) length = i::StrLength(data);
+ i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
+ i::Vector<const char>(data, length));
+ result->set_name(*name);
+ }
return Utils::ToLocal(result);
}
-Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
+Local<Private> v8::Private::New(
+ Isolate* isolate, const char* data, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
- LOG_API(i_isolate, "Symbol::New(char)");
+ EnsureInitializedForIsolate(i_isolate, "v8::Private::New()");
+ LOG_API(i_isolate, "Private::New()");
ENTER_V8(i_isolate);
- if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
- i::Vector<const char>(data, length));
- i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
- result->set_name(*name);
- return Utils::ToLocal(result);
+ i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
+ if (data != NULL) {
+ if (length == -1) length = i::StrLength(data);
+ i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
+ i::Vector<const char>(data, length));
+ symbol->set_name(*name);
+ }
+ Local<Symbol> result = Utils::ToLocal(symbol);
+ return v8::Handle<Private>(reinterpret_cast<Private*>(*result));
}
@@ -6482,18 +6368,28 @@ Local<Number> v8::Number::New(Isolate* isolate, double value) {
Local<Integer> v8::Integer::New(int32_t value) {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
- return v8::Integer::New(value, reinterpret_cast<Isolate*>(isolate));
+ return v8::Integer::New(reinterpret_cast<Isolate*>(isolate), value);
}
Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Integer::NewFromUnsigned()");
- return Integer::NewFromUnsigned(value, reinterpret_cast<Isolate*>(isolate));
+ return Integer::NewFromUnsigned(reinterpret_cast<Isolate*>(isolate), value);
}
Local<Integer> v8::Integer::New(int32_t value, Isolate* isolate) {
+ return Integer::New(isolate, value);
+}
+
+
+Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
+ return Integer::NewFromUnsigned(isolate, value);
+}
+
+
+Local<Integer> v8::Integer::New(Isolate* isolate, int32_t value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ASSERT(internal_isolate->IsInitialized());
if (i::Smi::IsValid(value)) {
@@ -6506,7 +6402,7 @@ Local<Integer> v8::Integer::New(int32_t value, Isolate* isolate) {
}
-Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
+Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ASSERT(internal_isolate->IsInitialized());
bool fits_into_int32_t = (value & (1 << 31)) == 0;
@@ -6584,14 +6480,12 @@ void V8::SetCaptureStackTraceForUncaughtExceptions(
void V8::SetCounterFunction(CounterLookupCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetCounterFunction()")) return;
isolate->stats_table()->SetCounterFunction(callback);
}
void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return;
isolate->stats_table()->SetCreateHistogramFunction(callback);
isolate->InitializeLoggingAndCounters();
isolate->counters()->ResetHistograms();
@@ -6600,7 +6494,6 @@ void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
- if (IsDeadCheck(isolate, "v8::V8::SetAddHistogramSampleFunction()")) return;
isolate->stats_table()->
SetAddHistogramSampleFunction(callback);
}
@@ -6608,24 +6501,20 @@ void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
void V8::SetFailedAccessCheckCallbackFunction(
FailedAccessCheckCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetFailedAccessCheckCallbackFunction()")) {
- return;
- }
isolate->SetFailedAccessCheckCallback(callback);
}
-intptr_t Isolate::AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes) {
+int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
+ int64_t change_in_bytes) {
i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
return heap->AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
}
-intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
+int64_t V8::AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes) {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() ||
- IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
+ if (isolate == NULL || !isolate->IsInitialized()) {
return 0;
}
Isolate* isolate_ext = reinterpret_cast<Isolate*>(isolate);
@@ -6647,9 +6536,15 @@ CpuProfiler* Isolate::GetCpuProfiler() {
}
+bool Isolate::InContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->context() != NULL;
+}
+
+
v8::Local<v8::Context> Isolate::GetCurrentContext() {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
- i::Context* context = internal_isolate->context();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Context* context = isolate->context();
if (context == NULL) return Local<Context>();
i::Context* native_context = context->global_object()->native_context();
if (native_context == NULL) return Local<Context>();
@@ -6657,73 +6552,119 @@ v8::Local<v8::Context> Isolate::GetCurrentContext() {
}
-void Isolate::SetObjectGroupId(const Persistent<Value>& object,
- UniqueId id) {
+v8::Local<v8::Context> Isolate::GetCallingContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Handle<i::Object> calling = isolate->GetCallingNativeContext();
+ if (calling.is_null()) return Local<Context>();
+ return Utils::ToLocal(i::Handle<i::Context>::cast(calling));
+}
+
+
+v8::Local<v8::Context> Isolate::GetEnteredContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Handle<i::Object> last =
+ isolate->handle_scope_implementer()->LastEnteredContext();
+ if (last.is_null()) return Local<Context>();
+ return Utils::ToLocal(i::Handle<i::Context>::cast(last));
+}
+
+
+v8::Local<Value> Isolate::ThrowException(v8::Local<v8::Value> value) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ ENTER_V8(isolate);
+ // If we're passed an empty handle, we throw an undefined exception
+ // to deal more gracefully with out of memory situations.
+ if (value.IsEmpty()) {
+ isolate->ScheduleThrow(isolate->heap()->undefined_value());
+ } else {
+ isolate->ScheduleThrow(*Utils::OpenHandle(*value));
+ }
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+}
+
+
+void Isolate::SetObjectGroupId(internal::Object** object, UniqueId id) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
internal_isolate->global_handles()->SetObjectGroupId(
- Utils::OpenPersistent(object).location(),
+ v8::internal::Handle<v8::internal::Object>(object).location(),
id);
}
-void Isolate::SetReferenceFromGroup(UniqueId id,
- const Persistent<Value>& object) {
+void Isolate::SetReferenceFromGroup(UniqueId id, internal::Object** object) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
internal_isolate->global_handles()->SetReferenceFromGroup(
id,
- Utils::OpenPersistent(object).location());
+ v8::internal::Handle<v8::internal::Object>(object).location());
}
-void Isolate::SetReference(const Persistent<Object>& parent,
- const Persistent<Value>& child) {
+void Isolate::SetReference(internal::Object** parent,
+ internal::Object** child) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
- i::Object** parent_location = Utils::OpenPersistent(parent).location();
+ i::Object** parent_location =
+ v8::internal::Handle<v8::internal::Object>(parent).location();
internal_isolate->global_handles()->SetReference(
reinterpret_cast<i::HeapObject**>(parent_location),
- Utils::OpenPersistent(child).location());
+ v8::internal::Handle<v8::internal::Object>(child).location());
}
-void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return;
- isolate->heap()->SetGlobalGCPrologueCallback(callback);
+void Isolate::AddGCPrologueCallback(GCPrologueCallback callback,
+ GCType gc_type) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->AddGCPrologueCallback(callback, gc_type);
}
-void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCEpilogueCallback()")) return;
- isolate->heap()->SetGlobalGCEpilogueCallback(callback);
+void Isolate::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->RemoveGCPrologueCallback(callback);
+}
+
+
+void Isolate::AddGCEpilogueCallback(GCEpilogueCallback callback,
+ GCType gc_type) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
+}
+
+
+void Isolate::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->RemoveGCEpilogueCallback(callback);
}
void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddGCPrologueCallback()")) return;
- isolate->heap()->AddGCPrologueCallback(callback, gc_type);
+ isolate->heap()->AddGCPrologueCallback(
+ reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback),
+ gc_type,
+ false);
}
void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveGCPrologueCallback()")) return;
- isolate->heap()->RemoveGCPrologueCallback(callback);
+ isolate->heap()->RemoveGCPrologueCallback(
+ reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback));
}
void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddGCEpilogueCallback()")) return;
- isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
+ isolate->heap()->AddGCEpilogueCallback(
+ reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback),
+ gc_type,
+ false);
}
void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveGCEpilogueCallback()")) return;
- isolate->heap()->RemoveGCEpilogueCallback(callback);
+ isolate->heap()->RemoveGCEpilogueCallback(
+ reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback));
}
@@ -6731,7 +6672,6 @@ void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
AllocationAction action) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddMemoryAllocationCallback()")) return;
isolate->memory_allocator()->AddMemoryAllocationCallback(
callback, space, action);
}
@@ -6739,7 +6679,6 @@ void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveMemoryAllocationCallback()")) return;
isolate->memory_allocator()->RemoveMemoryAllocationCallback(
callback);
}
@@ -6747,17 +6686,11 @@ void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
if (callback == NULL) return;
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::AddLeaveScriptCallback()")) return;
i::V8::AddCallCompletedCallback(callback);
}
void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::RemoveLeaveScriptCallback()")) return;
i::V8::RemoveCallCompletedCallback(callback);
}
@@ -6843,7 +6776,6 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::Utf8Value::Utf8Value()")) return;
if (obj.IsEmpty()) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -6865,7 +6797,6 @@ String::Utf8Value::~Utf8Value() {
String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::AsciiValue::AsciiValue()")) return;
if (obj.IsEmpty()) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -6887,7 +6818,6 @@ String::AsciiValue::~AsciiValue() {
String::Value::Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::String::Value::Value()")) return;
if (obj.IsEmpty()) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -7062,6 +6992,16 @@ void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
}
+void Debug::SendCommand(Isolate* isolate,
+ const uint16_t* command,
+ int length,
+ ClientData* client_data) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->debugger()->ProcessCommand(
+ i::Vector<const uint16_t>(command, length), client_data);
+}
+
+
void Debug::SendCommand(const uint16_t* command, int length,
ClientData* client_data,
Isolate* isolate) {
@@ -7125,7 +7065,7 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
if (!isolate->IsInitialized()) return Local<Value>();
ON_BAILOUT(isolate, "v8::Debug::GetMirror()", return Local<Value>());
ENTER_V8(isolate);
- v8::HandleScope scope(reinterpret_cast<Isolate*>(isolate));
+ v8::EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Debug* isolate_debug = isolate->debug();
isolate_debug->Load();
i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
@@ -7137,11 +7077,10 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
const int kArgc = 1;
v8::Handle<v8::Value> argv[kArgc] = { obj };
EXCEPTION_PREAMBLE(isolate);
- v8::Handle<v8::Value> result = v8_fun->Call(Utils::ToLocal(debug),
- kArgc,
- argv);
+ v8::Local<v8::Value> result =
+ v8_fun->Call(Utils::ToLocal(debug), kArgc, argv);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return scope.Close(result);
+ return scope.Escape(result);
}
@@ -7187,7 +7126,6 @@ void Debug::SetLiveEditEnabled(bool enable, Isolate* isolate) {
Handle<String> CpuProfileNode::GetFunctionName() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetFunctionName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
const i::CodeEntry* entry = node->entry();
if (!entry->has_name_prefix()) {
@@ -7210,7 +7148,6 @@ int CpuProfileNode::GetScriptId() const {
Handle<String> CpuProfileNode::GetScriptResourceName() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
node->entry()->resource_name()));
@@ -7222,16 +7159,15 @@ int CpuProfileNode::GetLineNumber() const {
}
-const char* CpuProfileNode::GetBailoutReason() const {
- const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
- return node->entry()->bailout_reason();
+int CpuProfileNode::GetColumnNumber() const {
+ return reinterpret_cast<const i::ProfileNode*>(this)->
+ entry()->column_number();
}
-double CpuProfileNode::GetSelfSamplesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount");
- return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
+const char* CpuProfileNode::GetBailoutReason() const {
+ const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ return node->entry()->bailout_reason();
}
@@ -7264,7 +7200,6 @@ const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
void CpuProfile::Delete() {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::Delete");
i::CpuProfiler* profiler = isolate->cpu_profiler();
ASSERT(profiler != NULL);
profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
@@ -7282,7 +7217,6 @@ unsigned CpuProfile::GetUid() const {
Handle<String> CpuProfile::GetTitle() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetTitle");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
profile->title()));
@@ -7374,15 +7308,12 @@ static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
HeapGraphEdge::Type HeapGraphEdge::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetType");
return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
}
Handle<Value> HeapGraphEdge::GetName() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetName");
i::HeapGraphEdge* edge = ToInternal(this);
switch (edge->type()) {
case i::HeapGraphEdge::kContextVariable:
@@ -7398,21 +7329,17 @@ Handle<Value> HeapGraphEdge::GetName() const {
isolate->factory()->NewNumberFromInt(edge->index()));
default: UNREACHABLE();
}
- return v8::Undefined();
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode");
const i::HeapEntry* from = ToInternal(this)->from();
return reinterpret_cast<const HeapGraphNode*>(from);
}
const HeapGraphNode* HeapGraphEdge::GetToNode() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphEdge::GetToNode");
const i::HeapEntry* to = ToInternal(this)->to();
return reinterpret_cast<const HeapGraphNode*>(to);
}
@@ -7425,44 +7352,33 @@ static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
HeapGraphNode::Type HeapGraphNode::GetType() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetType");
return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
}
Handle<String> HeapGraphNode::GetName() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetName");
return ToApiHandle<String>(
isolate->factory()->InternalizeUtf8String(ToInternal(this)->name()));
}
SnapshotObjectId HeapGraphNode::GetId() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
return ToInternal(this)->id();
}
int HeapGraphNode::GetSelfSize() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetSelfSize");
return ToInternal(this)->self_size();
}
int HeapGraphNode::GetChildrenCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
return ToInternal(this)->children().length();
}
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->children()[index]);
}
@@ -7470,7 +7386,6 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue");
i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject();
return !object.is_null() ?
ToApiHandle<Value>(object) :
@@ -7486,7 +7401,6 @@ static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
void HeapSnapshot::Delete() {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::Delete");
if (isolate->heap_profiler()->GetSnapshotsCount() > 1) {
ToInternal(this)->Delete();
} else {
@@ -7497,61 +7411,46 @@ void HeapSnapshot::Delete() {
unsigned HeapSnapshot::GetUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetUid");
return ToInternal(this)->uid();
}
Handle<String> HeapSnapshot::GetTitle() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle");
return ToApiHandle<String>(
isolate->factory()->InternalizeUtf8String(ToInternal(this)->title()));
}
const HeapGraphNode* HeapSnapshot::GetRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetHead");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
}
const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->GetEntryById(id));
}
int HeapSnapshot::GetNodesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
return ToInternal(this)->entries().length();
}
const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
return reinterpret_cast<const HeapGraphNode*>(
&ToInternal(this)->entries().at(index));
}
SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetMaxSnapshotJSObjectId");
return ToInternal(this)->max_snapshot_js_object_id();
}
void HeapSnapshot::Serialize(OutputStream* stream,
HeapSnapshot::SerializationFormat format) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::Serialize");
ApiCheck(format == kJSON,
"v8::HeapSnapshot::Serialize",
"Unknown serialization format");
@@ -7593,8 +7492,9 @@ const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
}
-void HeapProfiler::StartTrackingHeapObjects() {
- reinterpret_cast<i::HeapProfiler*>(this)->StartHeapObjectsTracking();
+void HeapProfiler::StartTrackingHeapObjects(bool track_allocations) {
+ reinterpret_cast<i::HeapProfiler*>(this)->StartHeapObjectsTracking(
+ track_allocations);
}
@@ -7632,6 +7532,16 @@ void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
}
+void HeapProfiler::StartRecordingHeapAllocations() {
+ reinterpret_cast<i::HeapProfiler*>(this)->StartHeapObjectsTracking(true);
+}
+
+
+void HeapProfiler::StopRecordingHeapAllocations() {
+ reinterpret_cast<i::HeapProfiler*>(this)->StopHeapObjectsTracking();
+}
+
+
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
@@ -7762,9 +7672,11 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
v->VisitPointers(blocks()->last(), handle_scope_data_.next);
}
- if (!saved_contexts_.is_empty()) {
- Object** start = reinterpret_cast<Object**>(&saved_contexts_.first());
- v->VisitPointers(start, start + saved_contexts_.length());
+ List<Context*>* context_lists[2] = { &saved_contexts_, &entered_contexts_};
+ for (unsigned i = 0; i < ARRAY_SIZE(context_lists); i++) {
+ if (context_lists[i]->is_empty()) continue;
+ Object** start = reinterpret_cast<Object**>(&context_lists[i]->first());
+ v->VisitPointers(start, start + context_lists[i]->length());
}
}
@@ -7823,7 +7735,7 @@ DeferredHandles::~DeferredHandles() {
isolate_->UnlinkDeferredHandles(this);
for (int i = 0; i < blocks_.length(); i++) {
-#ifdef ENABLE_EXTRA_CHECKS
+#ifdef ENABLE_HANDLE_ZAPPING
HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]);
#endif
isolate_->handle_scope_implementer()->ReturnBlock(blocks_[i]);
diff --git a/chromium/v8/src/api.h b/chromium/v8/src/api.h
index 51bc4942b24..5f19380e65b 100644
--- a/chromium/v8/src/api.h
+++ b/chromium/v8/src/api.h
@@ -308,12 +308,12 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
template <class T>
v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
- v8::HandleScope* scope) {
+ v8::EscapableHandleScope* scope) {
v8::internal::Handle<T> handle;
if (!is_null()) {
handle = *this;
}
- return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)), true);
+ return Utils::OpenHandle(*scope->Escape(Utils::ToLocal(handle)), true);
}
@@ -542,12 +542,12 @@ class HandleScopeImplementer {
inline void DecrementCallDepth() {call_depth_--;}
inline bool CallDepthIsZero() { return call_depth_ == 0; }
- inline void EnterContext(Handle<Object> context);
- inline bool LeaveLastContext();
+ inline void EnterContext(Handle<Context> context);
+ inline bool LeaveContext(Handle<Context> context);
// Returns the last entered context or an empty handle if no
// contexts have been entered.
- inline Handle<Object> LastEnteredContext();
+ inline Handle<Context> LastEnteredContext();
inline void SaveContext(Context* context);
inline Context* RestoreContext();
@@ -592,7 +592,7 @@ class HandleScopeImplementer {
Isolate* isolate_;
List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
- List<Handle<Object> > entered_contexts_;
+ List<Context*> entered_contexts_;
// Used as a stack to keep track of saved contexts.
List<Context*> saved_contexts_;
Object** spare_;
@@ -630,21 +630,23 @@ bool HandleScopeImplementer::HasSavedContexts() {
}
-void HandleScopeImplementer::EnterContext(Handle<Object> context) {
- entered_contexts_.Add(context);
+void HandleScopeImplementer::EnterContext(Handle<Context> context) {
+ entered_contexts_.Add(*context);
}
-bool HandleScopeImplementer::LeaveLastContext() {
+bool HandleScopeImplementer::LeaveContext(Handle<Context> context) {
if (entered_contexts_.is_empty()) return false;
+ // TODO(dcarney): figure out what's wrong here
+ // if (entered_contexts_.last() != *context) return false;
entered_contexts_.RemoveLast();
return true;
}
-Handle<Object> HandleScopeImplementer::LastEnteredContext() {
- if (entered_contexts_.is_empty()) return Handle<Object>::null();
- return entered_contexts_.last();
+Handle<Context> HandleScopeImplementer::LastEnteredContext() {
+ if (entered_contexts_.is_empty()) return Handle<Context>::null();
+ return Handle<Context>(entered_contexts_.last());
}
@@ -665,7 +667,7 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
#ifdef DEBUG
// SealHandleScope may make the prev_limit to point inside the block.
if (block_start <= prev_limit && prev_limit <= block_limit) {
-#ifdef ENABLE_EXTRA_CHECKS
+#ifdef ENABLE_HANDLE_ZAPPING
internal::HandleScope::ZapRange(prev_limit, block_limit);
#endif
break;
@@ -675,7 +677,7 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
#endif
blocks_.RemoveLast();
-#ifdef ENABLE_EXTRA_CHECKS
+#ifdef ENABLE_HANDLE_ZAPPING
internal::HandleScope::ZapRange(block_start, block_limit);
#endif
if (spare_ != NULL) {
diff --git a/chromium/v8/src/apinatives.js b/chromium/v8/src/apinatives.js
index 5fb36c09e72..6431901bf23 100644
--- a/chromium/v8/src/apinatives.js
+++ b/chromium/v8/src/apinatives.js
@@ -71,7 +71,6 @@ function InstantiateFunction(data, name) {
(serialNumber in cache) && (cache[serialNumber] != kUninitialized);
if (!isFunctionCached) {
try {
- cache[serialNumber] = null;
var fun = %CreateApiFunction(data);
if (name) %FunctionSetName(fun, name);
var flags = %GetTemplateField(data, kApiFlagOffset);
diff --git a/chromium/v8/src/arguments.cc b/chromium/v8/src/arguments.cc
index 287805717e5..205da7c68a6 100644
--- a/chromium/v8/src/arguments.cc
+++ b/chromium/v8/src/arguments.cc
@@ -38,7 +38,7 @@ template<typename T>
template<typename V>
v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
// Check the ReturnValue.
- Object** handle = &this->end()[kReturnValueOffset];
+ Object** handle = &this->begin()[kReturnValueOffset];
// Nothing was set, return empty handle as per previous behaviour.
if ((*handle)->IsTheHole()) return v8::Handle<V>();
return Utils::Convert<Object, V>(Handle<Object>(handle));
@@ -49,7 +49,7 @@ v8::Handle<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- FunctionCallbackInfo<v8::Value> info(end(),
+ FunctionCallbackInfo<v8::Value> info(begin(),
argv_,
argc_,
is_construct_call_);
@@ -63,7 +63,7 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
f(info); \
return GetReturnValue<ReturnValue>(isolate); \
}
@@ -75,7 +75,7 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
@@ -88,7 +88,7 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, arg2, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
@@ -101,7 +101,7 @@ void PropertyCallbackArguments::Call(Function f, \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, arg2, info); \
}
@@ -117,5 +117,12 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
#undef WRITE_CALL_2_VOID
-} } // namespace v8::internal
+double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
+ // TODO(ulan): This clobbers only subset of registers depending on compiler,
+ // Rewrite this in assembly to really clobber all registers.
+ // GCC for ia32 uses the FPU and does not touch XMM registers.
+ return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04;
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/arguments.h b/chromium/v8/src/arguments.h
index c1db98b53db..b7137c3175a 100644
--- a/chromium/v8/src/arguments.h
+++ b/chromium/v8/src/arguments.h
@@ -137,7 +137,7 @@ class CustomArgumentsBase : public Relocatable {
v->VisitPointers(values_, values_ + kArrayLength);
}
protected:
- inline Object** end() { return values_ + kArrayLength - 1; }
+ inline Object** begin() { return values_; }
explicit inline CustomArgumentsBase(Isolate* isolate)
: Relocatable(isolate) {}
Object* values_[kArrayLength];
@@ -151,7 +151,7 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
typedef CustomArgumentsBase<T::kArgsLength> Super;
~CustomArguments() {
- this->end()[kReturnValueOffset] =
+ this->begin()[kReturnValueOffset] =
reinterpret_cast<Object*>(kHandleZapValue);
}
@@ -162,7 +162,7 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
v8::Handle<V> GetReturnValue(Isolate* isolate);
inline Isolate* isolate() {
- return reinterpret_cast<Isolate*>(this->end()[T::kIsolateIndex]);
+ return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
}
};
@@ -185,7 +185,7 @@ class PropertyCallbackArguments
Object* self,
JSObject* holder)
: Super(isolate) {
- Object** values = this->end();
+ Object** values = this->begin();
values[T::kThisIndex] = self;
values[T::kHolderIndex] = holder;
values[T::kDataIndex] = data;
@@ -237,6 +237,13 @@ class FunctionCallbackArguments
typedef FunctionCallbackInfo<Value> T;
typedef CustomArguments<T> Super;
static const int kArgsLength = T::kArgsLength;
+ static const int kHolderIndex = T::kHolderIndex;
+ static const int kDataIndex = T::kDataIndex;
+ static const int kReturnValueDefaultValueIndex =
+ T::kReturnValueDefaultValueIndex;
+ static const int kIsolateIndex = T::kIsolateIndex;
+ static const int kCalleeIndex = T::kCalleeIndex;
+ static const int kContextSaveIndex = T::kContextSaveIndex;
FunctionCallbackArguments(internal::Isolate* isolate,
internal::Object* data,
@@ -249,10 +256,11 @@ class FunctionCallbackArguments
argv_(argv),
argc_(argc),
is_construct_call_(is_construct_call) {
- Object** values = end();
+ Object** values = begin();
values[T::kDataIndex] = data;
values[T::kCalleeIndex] = callee;
values[T::kHolderIndex] = holder;
+ values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
@@ -281,12 +289,23 @@ class FunctionCallbackArguments
};
+double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
+
+
+#ifdef DEBUG
+#define CLOBBER_DOUBLE_REGISTERS() ClobberDoubleRegisters(1, 2, 3, 4);
+#else
+#define CLOBBER_DOUBLE_REGISTERS()
+#endif
+
+
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
Type Name(int args_length, Object** args_object, Isolate* isolate)
#define RUNTIME_FUNCTION(Type, Name) \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
+ CLOBBER_DOUBLE_REGISTERS(); \
Arguments args(args_length, args_object); \
return __RT_impl_##Name(args, isolate); \
} \
diff --git a/chromium/v8/src/arm/assembler-arm-inl.h b/chromium/v8/src/arm/assembler-arm-inl.h
index a1d1e1b5670..3399958ee3d 100644
--- a/chromium/v8/src/arm/assembler-arm-inl.h
+++ b/chromium/v8/src/arm/assembler-arm-inl.h
@@ -57,6 +57,11 @@ int DwVfpRegister::NumRegisters() {
}
+int DwVfpRegister::NumReservedRegisters() {
+ return kNumReservedRegisters;
+}
+
+
int DwVfpRegister::NumAllocatableRegisters() {
return NumRegisters() - kNumReservedRegisters;
}
@@ -104,7 +109,7 @@ Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
+ return Assembler::target_pointer_address_at(pc_);
}
@@ -126,31 +131,21 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_pointer_at(pc_)));
-}
-
-
-Object** RelocInfo::target_object_address() {
- // Provide a "natural pointer" to the embedded object,
- // which can be de-referenced during heap iteration.
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- reconstructed_obj_ptr_ =
- reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
- return &reconstructed_obj_ptr_;
+ Assembler::target_address_at(pc_)));
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
- Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
@@ -160,10 +155,9 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
}
-Address* RelocInfo::target_reference_address() {
+Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
- return &reconstructed_adr_ptr_;
+ return Assembler::target_address_at(pc_);
}
@@ -208,6 +202,13 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
static const int kNoCodeAgeSequenceLength = 3;
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on Arm.
+ return Handle<Object>();
+}
+
+
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
@@ -262,6 +263,15 @@ Object** RelocInfo::call_object_address() {
}
+void RelocInfo::WipeOut() {
+ ASSERT(IsEmbeddedObject(rmode_) ||
+ IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, NULL);
+}
+
+
bool RelocInfo::IsPatchedReturnSequence() {
Instr current_instr = Assembler::instr_at(pc_);
Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
@@ -387,33 +397,12 @@ void Assembler::emit(Instr x) {
Address Assembler::target_pointer_address_at(Address pc) {
- Address target_pc = pc;
- Instr instr = Memory::int32_at(target_pc);
- // If we have a bx instruction, the instruction before the bx is
- // what we need to patch.
- static const int32_t kBxInstMask = 0x0ffffff0;
- static const int32_t kBxInstPattern = 0x012fff10;
- if ((instr & kBxInstMask) == kBxInstPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-
- // With a blx instruction, the instruction before is what needs to be patched.
- if ((instr & kBlxRegMask) == kBlxRegPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-
- ASSERT(IsLdrPcImmediateOffset(instr));
- int offset = instr & 0xfff; // offset_12 is unsigned
- if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
- // Verify that the constant pool comes after the instruction referencing it.
- ASSERT(offset >= -4);
- return target_pc + offset + 8;
+ Instr instr = Memory::int32_at(pc);
+ return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
}
-Address Assembler::target_pointer_at(Address pc) {
+Address Assembler::target_address_at(Address pc) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* instr = Instruction::At(pc);
@@ -422,6 +411,7 @@ Address Assembler::target_pointer_at(Address pc) {
(next_instr->ImmedMovwMovtValue() << 16) |
instr->ImmedMovwMovtValue());
}
+ ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
return Memory::Address_at(target_pointer_address_at(pc));
}
@@ -467,19 +457,13 @@ void Assembler::deserialization_set_special_target_at(
}
-void Assembler::set_external_target_at(Address constant_pool_entry,
- Address target) {
- Memory::Address_at(constant_pool_entry) = target;
-}
-
-
static Instr EncodeMovwImmediate(uint32_t immediate) {
ASSERT(immediate < 0x10000);
return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
}
-void Assembler::set_target_pointer_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc, Address target) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
@@ -510,16 +494,6 @@ void Assembler::set_target_pointer_at(Address pc, Address target) {
}
-Address Assembler::target_address_at(Address pc) {
- return target_pointer_at(pc);
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- set_target_pointer_at(pc, target);
-}
-
-
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/chromium/v8/src/arm/assembler-arm.cc b/chromium/v8/src/arm/assembler-arm.cc
index bd8b0613eb9..4171f7dcdf8 100644
--- a/chromium/v8/src/arm/assembler-arm.cc
+++ b/chromium/v8/src/arm/assembler-arm.cc
@@ -50,6 +50,7 @@ bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+unsigned CpuFeatures::cross_compile_ = 0;
unsigned CpuFeatures::cache_line_size_ = 64;
@@ -516,12 +517,13 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
- num_pending_reloc_info_ = 0;
+ num_pending_32_bit_reloc_info_ = 0;
num_pending_64_bit_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
- first_const_pool_use_ = -1;
+ first_const_pool_32_use_ = -1;
+ first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
ClearRecordedAstId();
}
@@ -535,7 +537,7 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
- ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
// Set up code descriptor.
@@ -543,6 +545,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->origin = this;
}
@@ -3148,14 +3151,19 @@ void Assembler::GrowBuffer() {
// to relocate any emitted relocation entries.
// Relocate pending relocation entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
+ for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
rinfo.set_pc(rinfo.pc() + pc_delta);
}
}
+ for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
+ ASSERT(rinfo.rmode() == RelocInfo::NONE64);
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
}
@@ -3163,7 +3171,7 @@ void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
- ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
@@ -3175,7 +3183,7 @@ void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
- ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
ASSERT(num_pending_64_bit_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
@@ -3183,6 +3191,14 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::emit_code_stub_address(Code* stub) {
+ CheckBuffer();
+ *reinterpret_cast<uint32_t*>(pc_) =
+ reinterpret_cast<uint32_t>(stub->instruction_start());
+ pc_ += sizeof(uint32_t);
+}
+
+
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
UseConstantPoolMode mode) {
// We do not try to reuse pool constants.
@@ -3237,15 +3253,19 @@ void Assembler::RecordRelocInfo(double data) {
void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
- ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
- if (num_pending_reloc_info_ == 0) {
- first_const_pool_use_ = pc_offset();
- }
- pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
if (rinfo.rmode() == RelocInfo::NONE64) {
- ++num_pending_64_bit_reloc_info_;
+ ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
+ if (num_pending_64_bit_reloc_info_ == 0) {
+ first_const_pool_64_use_ = pc_offset();
+ }
+ pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+ } else {
+ ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
+ if (num_pending_32_bit_reloc_info_ == 0) {
+ first_const_pool_32_use_ = pc_offset();
+ }
+ pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
}
- ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
@@ -3255,12 +3275,15 @@ void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
- // If there are some pending entries, the constant pool cannot be blocked
- // further than constant pool instruction's reach.
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_limit - first_const_pool_use_ < kMaxDistToIntPool));
- // TODO(jfb) Also check 64-bit entries are in range (requires splitting
- // them up from 32-bit entries).
+ // Max pool start (if we need a jump and an alignment).
+#ifdef DEBUG
+ int start = pc_limit + kInstrSize + 2 * kPointerSize;
+ ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
+ (start - first_const_pool_32_use_ +
+ num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
+ ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
+ (start - first_const_pool_64_use_ < kMaxDistToFPPool));
+#endif
no_const_pool_before_ = pc_limit;
}
@@ -3281,8 +3304,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// There is nothing to do if there are no pending constant pool entries.
- if (num_pending_reloc_info_ == 0) {
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ if ((num_pending_32_bit_reloc_info_ == 0) &&
+ (num_pending_64_bit_reloc_info_ == 0)) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
@@ -3291,24 +3314,18 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Check that the code buffer is large enough before emitting the constant
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
- // Note 64-bit values are wider, and the first one needs to be 64-bit aligned.
int jump_instr = require_jump ? kInstrSize : 0;
int size_up_to_marker = jump_instr + kInstrSize;
- int size_after_marker = num_pending_reloc_info_ * kPointerSize;
+ int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
- // 64-bit values must be 64-bit aligned.
- // We'll start emitting at PC: branch+marker, then 32-bit values, then
- // 64-bit values which might need to be aligned.
- bool require_64_bit_align = has_fp_values &&
- (((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3);
- if (require_64_bit_align) {
- size_after_marker += kInstrSize;
- }
- // num_pending_reloc_info_ also contains 64-bit entries, the above code
- // therefore already counted half of the size for 64-bit entries. Add the
- // remaining size.
- STATIC_ASSERT(kPointerSize == kDoubleSize / 2);
- size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2);
+ bool require_64_bit_align = false;
+ if (has_fp_values) {
+ require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
+ if (require_64_bit_align) {
+ size_after_marker += kInstrSize;
+ }
+ size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
+ }
int size = size_up_to_marker + size_after_marker;
@@ -3321,19 +3338,25 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// * the instruction doesn't require a jump after itself to jump over the
// constant pool, and we're getting close to running out of range.
if (!force_emit) {
- ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0));
- int dist = pc_offset() + size - first_const_pool_use_;
+ ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
+ bool need_emit = false;
if (has_fp_values) {
- if ((dist < kMaxDistToFPPool - kCheckPoolInterval) &&
- (require_jump || (dist < kMaxDistToFPPool / 2))) {
- return;
- }
- } else {
- if ((dist < kMaxDistToIntPool - kCheckPoolInterval) &&
- (require_jump || (dist < kMaxDistToIntPool / 2))) {
- return;
+ int dist64 = pc_offset() +
+ size -
+ num_pending_32_bit_reloc_info_ * kPointerSize -
+ first_const_pool_64_use_;
+ if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
+ (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
+ need_emit = true;
}
}
+ int dist32 =
+ pc_offset() + size - first_const_pool_32_use_;
+ if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
+ (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
+ need_emit = true;
+ }
+ if (!need_emit) return;
}
int needed_space = size + kGap;
@@ -3362,15 +3385,10 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Emit 64-bit constant pool entries first: their range is smaller than
// 32-bit entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
+ for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
- if (rinfo.rmode() != RelocInfo::NONE64) {
- // 32-bit values emitted later.
- continue;
- }
-
- ASSERT(!((uintptr_t)pc_ & 0x3)); // Check 64-bit alignment.
+ ASSERT(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
@@ -3380,53 +3398,85 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
ASSERT(is_uint10(delta));
+ bool found = false;
+ uint64_t value = rinfo.raw_data64();
+ for (int j = 0; j < i; j++) {
+ RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
+ if (value == rinfo2.raw_data64()) {
+ found = true;
+ ASSERT(rinfo2.rmode() == RelocInfo::NONE64);
+ Instr instr2 = instr_at(rinfo2.pc());
+ ASSERT(IsVldrDPcImmediateOffset(instr2));
+ delta = GetVldrDRegisterImmediateOffset(instr2);
+ delta += rinfo2.pc() - rinfo.pc();
+ break;
+ }
+ }
+
instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
- const double double_data = rinfo.data64();
- uint64_t uint_data = 0;
- OS::MemCopy(&uint_data, &double_data, sizeof(double_data));
- emit(uint_data & 0xFFFFFFFF);
- emit(uint_data >> 32);
+ if (!found) {
+ uint64_t uint_data = rinfo.raw_data64();
+ emit(uint_data & 0xFFFFFFFF);
+ emit(uint_data >> 32);
+ }
}
// Emit 32-bit constant pool entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
+ for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
- rinfo.rmode() != RelocInfo::CONST_POOL);
-
- if (rinfo.rmode() == RelocInfo::NONE64) {
- // 64-bit values emitted earlier.
- continue;
- }
+ rinfo.rmode() != RelocInfo::CONST_POOL &&
+ rinfo.rmode() != RelocInfo::NONE64);
Instr instr = instr_at(rinfo.pc());
// 64-bit loads shouldn't get here.
ASSERT(!IsVldrDPcImmediateOffset(instr));
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
- // 0 is the smallest delta:
- // ldr rd, [pc, #0]
- // constant pool marker
- // data
-
if (IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0) {
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
ASSERT(is_uint12(delta));
+ // 0 is the smallest delta:
+ // ldr rd, [pc, #0]
+ // constant pool marker
+ // data
+
+ bool found = false;
+ if (!Serializer::enabled() && (rinfo.rmode() >= RelocInfo::CELL)) {
+ for (int j = 0; j < i; j++) {
+ RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
+
+ if ((rinfo2.data() == rinfo.data()) &&
+ (rinfo2.rmode() == rinfo.rmode())) {
+ Instr instr2 = instr_at(rinfo2.pc());
+ if (IsLdrPcImmediateOffset(instr2)) {
+ delta = GetLdrRegisterImmediateOffset(instr2);
+ delta += rinfo2.pc() - rinfo.pc();
+ found = true;
+ break;
+ }
+ }
+ }
+ }
+
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
- emit(rinfo.data());
+
+ if (!found) {
+ emit(rinfo.data());
+ }
} else {
ASSERT(IsMovW(instr));
- emit(rinfo.data());
}
}
- num_pending_reloc_info_ = 0;
+ num_pending_32_bit_reloc_info_ = 0;
num_pending_64_bit_reloc_info_ = 0;
- first_const_pool_use_ = -1;
+ first_const_pool_32_use_ = -1;
+ first_const_pool_64_use_ = -1;
RecordComment("]");
diff --git a/chromium/v8/src/arm/assembler-arm.h b/chromium/v8/src/arm/assembler-arm.h
index 866b1c9024d..84bc8794e81 100644
--- a/chromium/v8/src/arm/assembler-arm.h
+++ b/chromium/v8/src/arm/assembler-arm.h
@@ -64,23 +64,41 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
- return (supported_ & (1u << f)) != 0;
+ return Check(f, supported_);
}
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- return (found_by_runtime_probing_only_ &
- (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
static bool IsSafeForSnapshot(CpuFeature f) {
- return (IsSupported(f) &&
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
static unsigned cache_line_size() { return cache_line_size_; }
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ unsigned mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
+
private:
+ static bool Check(CpuFeature f, unsigned set) {
+ return (set & flag2set(f)) != 0;
+ }
+
+ static unsigned flag2set(CpuFeature f) {
+ return 1u << f;
+ }
+
#ifdef DEBUG
static bool initialized_;
#endif
@@ -88,7 +106,10 @@ class CpuFeatures : public AllStatic {
static unsigned found_by_runtime_probing_only_;
static unsigned cache_line_size_;
+ static unsigned cross_compile_;
+
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -114,10 +135,30 @@ class CpuFeatures : public AllStatic {
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
+// These constants are used in several locations, including static initializers
+const int kRegister_no_reg_Code = -1;
+const int kRegister_r0_Code = 0;
+const int kRegister_r1_Code = 1;
+const int kRegister_r2_Code = 2;
+const int kRegister_r3_Code = 3;
+const int kRegister_r4_Code = 4;
+const int kRegister_r5_Code = 5;
+const int kRegister_r6_Code = 6;
+const int kRegister_r7_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_fp_Code = 11;
+const int kRegister_ip_Code = 12;
+const int kRegister_sp_Code = 13;
+const int kRegister_lr_Code = 14;
+const int kRegister_pc_Code = 15;
+
// Core register
struct Register {
static const int kNumRegisters = 16;
- static const int kMaxNumAllocatableRegisters = 8;
+ static const int kMaxNumAllocatableRegisters =
+ FLAG_enable_ool_constant_pool ? 8 : 9;
static const int kSizeInBytes = 4;
inline static int NumAllocatableRegisters();
@@ -143,7 +184,11 @@ struct Register {
"r5",
"r6",
"r7",
+ "r8",
};
+ if (FLAG_enable_ool_constant_pool && (index >= 7)) {
+ return names[index + 1];
+ }
return names[index];
}
@@ -172,25 +217,6 @@ struct Register {
int code_;
};
-// These constants are used in several locations, including static initializers
-const int kRegister_no_reg_Code = -1;
-const int kRegister_r0_Code = 0;
-const int kRegister_r1_Code = 1;
-const int kRegister_r2_Code = 2;
-const int kRegister_r3_Code = 3;
-const int kRegister_r4_Code = 4;
-const int kRegister_r5_Code = 5;
-const int kRegister_r6_Code = 6;
-const int kRegister_r7_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_fp_Code = 11;
-const int kRegister_ip_Code = 12;
-const int kRegister_sp_Code = 13;
-const int kRegister_lr_Code = 14;
-const int kRegister_pc_Code = 15;
-
const Register no_reg = { kRegister_no_reg_Code };
const Register r0 = { kRegister_r0_Code };
@@ -200,6 +226,7 @@ const Register r3 = { kRegister_r3_Code };
const Register r4 = { kRegister_r4_Code };
const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code };
+// Used as constant pool pointer register if FLAG_enable_ool_constant_pool.
const Register r7 = { kRegister_r7_Code };
// Used as context register.
const Register r8 = { kRegister_r8_Code };
@@ -252,6 +279,7 @@ struct DwVfpRegister {
// Any code included in the snapshot must be able to run both with 16 or 32
// registers.
inline static int NumRegisters();
+ inline static int NumReservedRegisters();
inline static int NumAllocatableRegisters();
inline static int ToAllocationIndex(DwVfpRegister reg);
@@ -752,10 +780,6 @@ class Assembler : public AssemblerBase {
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
- // Read/Modify the pointer in the branch/call/move instruction at pc.
- INLINE(static Address target_pointer_at(Address pc));
- INLINE(static void set_target_pointer_at(Address pc, Address target));
-
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target));
@@ -773,11 +797,6 @@ class Assembler : public AssemblerBase {
inline static void deserialization_set_special_target_at(
Address constant_pool_entry, Address target);
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address constant_pool_entry,
- Address target);
-
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
@@ -1360,6 +1379,9 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
+ // Emits the address of the code stub's first instruction.
+ void emit_code_stub_address(Code* stub);
+
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
@@ -1411,7 +1433,8 @@ class Assembler : public AssemblerBase {
static const int kMaxDistToIntPool = 4*KB;
static const int kMaxDistToFPPool = 1*KB;
// All relocations could be integer, it therefore acts as the limit.
- static const int kMaxNumPendingRelocInfo = kMaxDistToIntPool/kInstrSize;
+ static const int kMaxNumPending32RelocInfo = kMaxDistToIntPool/kInstrSize;
+ static const int kMaxNumPending64RelocInfo = kMaxDistToFPPool/kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
@@ -1449,11 +1472,16 @@ class Assembler : public AssemblerBase {
// StartBlockConstPool to have an effect.
void EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
+#ifdef DEBUG
+ // Max pool start (if we need a jump and an alignment).
+ int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long.
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToIntPool)));
+ ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
+ (start + num_pending_64_bit_reloc_info_ * kDoubleSize <
+ (first_const_pool_32_use_ + kMaxDistToIntPool)));
ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToFPPool)));
+ (start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
+#endif
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
// still blocked
@@ -1502,7 +1530,8 @@ class Assembler : public AssemblerBase {
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
- int first_const_pool_use_;
+ int first_const_pool_32_use_;
+ int first_const_pool_64_use_;
// Relocation info generation
// Each relocation is encoded as a variable size value
@@ -1516,12 +1545,12 @@ class Assembler : public AssemblerBase {
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
- // the buffer of pending relocation info
- RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
- // number of pending reloc info entries in the buffer
- int num_pending_reloc_info_;
- // Number of pending reloc info entries included above which also happen to
- // be 64-bit.
+ // The buffers of pending relocation info.
+ RelocInfo pending_32_bit_reloc_info_[kMaxNumPending32RelocInfo];
+ RelocInfo pending_64_bit_reloc_info_[kMaxNumPending64RelocInfo];
+ // Number of pending reloc info entries in the 32 bits buffer.
+ int num_pending_32_bit_reloc_info_;
+ // Number of pending reloc info entries in the 64 bits buffer.
int num_pending_64_bit_reloc_info_;
// The bound position, before this we cannot do instruction elimination.
diff --git a/chromium/v8/src/arm/builtins-arm.cc b/chromium/v8/src/arm/builtins-arm.cc
index f60e1f86714..bef4bc3c465 100644
--- a/chromium/v8/src/arm/builtins-arm.cc
+++ b/chromium/v8/src/arm/builtins-arm.cc
@@ -193,14 +193,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
Register argument = r2;
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- r0, // Input.
- argument, // Result.
- r3, // Scratch.
- r4, // Scratch.
- r5, // Scratch.
- &not_cached);
+ __ LookupNumberStringCache(r0, // Input.
+ argument, // Result.
+ r3, // Scratch.
+ r4, // Scratch.
+ r5, // Scratch.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
__ bind(&argument_is_string);
@@ -296,10 +294,8 @@ static void CallRuntimePassFunction(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
- // Push call kind information.
- __ push(r5);
- // Function is also the parameter to the runtime call.
- __ push(r1);
+ // Push call kind information and function as parameter to the runtime call.
+ __ Push(r5, r1);
__ CallRuntime(function_id, 1);
// Restore call kind information.
@@ -408,9 +404,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ strb(r4, constructor_count);
__ b(ne, &allocate);
- __ Push(r1, r2);
+ __ push(r1);
- __ push(r1); // constructor
+ __ Push(r2, r1); // r1 = constructor
// The call will replace the stub, so the countdown is only done once.
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
@@ -447,9 +443,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: object size (in words)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
- __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
@@ -457,14 +452,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
// r0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmp(r0, r6);
+ __ add(ip, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ cmp(r0, ip);
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
}
- __ InitializeFieldsWithFiller(r5, r0, r7);
+ __ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
- __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
+ __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
}
- __ InitializeFieldsWithFiller(r5, r6, r7);
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -529,16 +526,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r7, r8);
- __ Assert(eq, kUndefinedValueNotLoaded);
- }
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(&entry);
__ bind(&loop);
- __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+ __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r2, r6);
__ b(lt, &loop);
@@ -702,7 +693,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver
// r3: argc
// r4: argv
- // r5-r7, cp may be clobbered
+ // r5-r6, r8 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
@@ -742,7 +733,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
- __ mov(r7, Operand(r4));
+ if (!FLAG_enable_ool_constant_pool) {
+ __ mov(r8, Operand(r4));
+ }
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
}
@@ -807,12 +800,13 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// The following registers must be saved and restored when calling through to
// the runtime:
// r0 - contains return address (beginning of patch sequence)
- // r1 - function object
+ // r1 - isolate
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(1, 0, r1);
+ __ PrepareCallCFunction(1, 0, r2);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ mov(pc, r0);
}
@@ -830,7 +824,41 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // r0 - contains return address (beginning of patch sequence)
+ // r1 - isolate
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ PrepareCallCFunction(1, 0, r2);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(ExternalReference::get_mark_code_as_executed_function(
+ masm->isolate()), 2);
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Jump to point after the code-age stub.
+ __ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize));
+ __ mov(pc, r0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -839,7 +867,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
@@ -848,6 +876,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
}
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -895,21 +933,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- __ Ret();
-}
-
-
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -956,6 +979,24 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ Ret();
+}
+
+
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
@@ -1147,11 +1188,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset = -5 * kPointerSize;
- const int kLimitOffset = -4 * kPointerSize;
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
@@ -1176,8 +1219,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Out of stack space.
__ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ push(r1);
- __ push(r0);
+ __ Push(r1, r0);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
// End of stack check.
@@ -1259,8 +1301,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// r0: current argument index
__ bind(&loop);
__ ldr(r1, MemOperand(fp, kArgsOffset));
- __ push(r1);
- __ push(r0);
+ __ Push(r1, r0);
// Call the runtime to access the property in the arguments array.
__ CallRuntime(Runtime::kGetProperty, 2);
@@ -1313,7 +1354,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(3 * kPointerSize));
+ __ add(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -1323,7 +1365,8 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ ldr(r1, MemOperand(fp, -3 * kPointerSize));
+ __ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
@@ -1410,7 +1453,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: code entry to call
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
- __ sub(r2, r2, Operand(4 * kPointerSize)); // Adjust for frame.
+ // Adjust for frame.
+ __ sub(r2, r2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ 2 * kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/chromium/v8/src/arm/code-stubs-arm.cc b/chromium/v8/src/arm/code-stubs-arm.cc
index cd1809fb2a8..cc2dbdcdee0 100644
--- a/chromium/v8/src/arm/code-stubs-arm.cc
+++ b/chromium/v8/src/arm/code-stubs-arm.cc
@@ -59,6 +59,17 @@ void ToNumberStub::InitializeInterfaceDescriptor(
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -66,7 +77,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
}
@@ -77,7 +88,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -102,6 +113,17 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -122,6 +144,19 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void KeyedArrayCallStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -158,6 +193,18 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
}
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
static void InitializeArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
@@ -166,14 +213,21 @@ static void InitializeArrayConstructorDescriptor(
// r0 -- number of arguments
// r1 -- function
// r2 -- type info cell with elements kind
- static Register registers[] = { r1, r2 };
- descriptor->register_param_count_ = 2;
- if (constant_stack_parameter_count != 0) {
+ static Register registers_variable_args[] = { r1, r2, r0 };
+ static Register registers_no_args[] = { r1, r2 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &r0;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = r0;
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
@@ -187,15 +241,21 @@ static void InitializeInternalArrayConstructorDescriptor(
// register state
// r0 -- number of arguments
// r1 -- constructor function
- static Register registers[] = { r1 };
- descriptor->register_param_count_ = 1;
+ static Register registers_variable_args[] = { r1, r0 };
+ static Register registers_no_args[] = { r1 };
- if (constant_stack_parameter_count != 0) {
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &r0;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = r0;
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
@@ -279,6 +339,17 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
+void NewStringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -622,27 +693,12 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
- return true;
- }
- if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
+ stub2.GetCode(isolate);
}
@@ -825,8 +881,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Convert lhs to a double in d7.
__ SmiToDouble(d7, lhs);
// Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
+ __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
// We now have both loaded as doubles but we can skip the lhs nan check
// since it's a smi.
@@ -851,8 +906,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Rhs is a smi, lhs is a heap number.
// Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
+ __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
// Convert rhs to a double in d6 .
__ SmiToDouble(d6, rhs);
// Fall through to both_loaded_as_doubles.
@@ -920,10 +974,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
+ __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
+ __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(both_loaded_as_doubles);
}
@@ -972,108 +1024,6 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
- __ sub(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- __ eor(scratch1, scratch1, Operand(scratch2));
- __ and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- __ ldr(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ sub(scratch2, object, Operand(kHeapObjectTag));
- __ vldr(d0, scratch2, HeapNumber::kValueOffset);
- __ sub(probe, probe, Operand(kHeapObjectTag));
- __ vldr(d1, probe, HeapNumber::kValueOffset);
- __ VFPCompareAndSetFlags(d0, d1);
- __ b(ne, not_found); // The cache did not contain this value.
- __ b(&load_result_from_cache);
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ and_(scratch, mask, Operand(object, ASR, 1));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch,
- number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ cmp(object, probe);
- __ b(ne, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ ldr(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ ldr(r1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, &runtime);
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ Ret();
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
static void ICCompareStub_CheckInputType(MacroAssembler* masm,
Register input,
Register scratch,
@@ -1281,994 +1231,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-// Generates code to call a C function to do a double operation.
-// This code never falls through, but returns with a heap number containing
-// the result in r0.
-// Register heapnumber_result must be a heap number in which the
-// result of the operation will be stored.
-// Requires the following layout on entry:
-// d0: Left value.
-// d1: Right value.
-// If soft float ABI, use also r0, r1, r2, r3.
-static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Assert that heap_number_result is callee-saved.
- // We currently always use r5 to pass it.
- ASSERT(heap_number_result.is(r5));
-
- // Push the current return address before the C call. Return will be
- // through pop(pc) below.
- __ push(lr);
- __ PrepareCallCFunction(0, 2, scratch);
- if (!masm->use_eabi_hardfloat()) {
- __ vmov(r0, r1, d0);
- __ vmov(r2, r3, d1);
- }
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number. Double returned in
- // registers r0 and r1 or in d0.
- if (masm->use_eabi_hardfloat()) {
- __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- } else {
- __ Strd(r0, r1,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- }
- // Place heap_number_result in r0 and return to the pushed return address.
- __ mov(r0, Operand(heap_number_result));
- __ pop(pc);
-}
-
-
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = true; // VFP2 is a base requirement for V8
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ push(r2);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
-
- ASSERT(right.is(r0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op) {
- case Token::ADD:
- __ add(right, left, Operand(right), SetCC); // Add optimistically.
- __ Ret(vc);
- __ sub(right, right, Operand(left)); // Revert optimistic add.
- break;
- case Token::SUB:
- __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
- __ Ret(vc);
- __ sub(right, left, Operand(right)); // Revert optimistic subtract.
- break;
- case Token::MUL:
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(ip, right);
- // Do multiplication
- // scratch1 = lower 32 bits of ip * left.
- // scratch2 = higher 32 bits of ip * left.
- __ smull(scratch1, scratch2, left, ip);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &not_smi_result);
- // Go slow on zero result to handle -0.
- __ cmp(scratch1, Operand::Zero());
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return smi 0 if the non-zero one was positive.
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- break;
- case Token::DIV: {
- Label div_with_sdiv;
-
- // Check for 0 divisor.
- __ cmp(right, Operand::Zero());
- __ b(eq, &not_smi_result);
-
- // Check for power of two on the right hand side.
- __ sub(scratch1, right, Operand(1));
- __ tst(scratch1, right);
- if (CpuFeatures::IsSupported(SUDIV)) {
- __ b(ne, &div_with_sdiv);
- // Check for no remainder.
- __ tst(left, scratch1);
- __ b(ne, &not_smi_result);
- // Check for positive left hand side.
- __ cmp(left, Operand::Zero());
- __ b(mi, &div_with_sdiv);
- } else {
- __ b(ne, &not_smi_result);
- // Check for positive and no remainder.
- __ orr(scratch2, scratch1, Operand(0x80000000u));
- __ tst(left, scratch2);
- __ b(ne, &not_smi_result);
- }
-
- // Perform division by shifting.
- __ clz(scratch1, scratch1);
- __ rsb(scratch1, scratch1, Operand(31));
- __ mov(right, Operand(left, LSR, scratch1));
- __ Ret();
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatureScope scope(masm, SUDIV);
- Label result_not_zero;
-
- __ bind(&div_with_sdiv);
- // Do division.
- __ sdiv(scratch1, left, right);
- // Check that the remainder is zero.
- __ mls(scratch2, scratch1, right, left);
- __ cmp(scratch2, Operand::Zero());
- __ b(ne, &not_smi_result);
- // Check for negative zero result.
- __ cmp(scratch1, Operand::Zero());
- __ b(ne, &result_not_zero);
- __ cmp(right, Operand::Zero());
- __ b(lt, &not_smi_result);
- __ bind(&result_not_zero);
- // Check for the corner case of dividing the most negative smi by -1.
- __ cmp(scratch1, Operand(0x40000000));
- __ b(eq, &not_smi_result);
- // Tag and return the result.
- __ SmiTag(right, scratch1);
- __ Ret();
- }
- break;
- }
- case Token::MOD: {
- Label modulo_with_sdiv;
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- // Check for x % 0.
- __ cmp(right, Operand::Zero());
- __ b(eq, &not_smi_result);
-
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u));
- __ b(ne, &modulo_with_sdiv);
-
- // Check for power of two on the right hand side.
- __ sub(scratch1, right, Operand(1));
- __ tst(scratch1, right);
- __ b(ne, &modulo_with_sdiv);
- } else {
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u));
- __ b(ne, &not_smi_result);
-
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
- }
-
- // Perform modulus by masking (scratch1 contains right - 1).
- __ and_(right, left, Operand(scratch1));
- __ Ret();
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatureScope scope(masm, SUDIV);
- __ bind(&modulo_with_sdiv);
- __ mov(scratch2, right);
- // Perform modulus with sdiv and mls.
- __ sdiv(scratch1, left, right);
- __ mls(right, scratch1, right, left);
- // Return if the result is not 0.
- __ cmp(right, Operand::Zero());
- __ Ret(ne);
- // The result is 0, check for -0 case.
- __ cmp(left, Operand::Zero());
- __ Ret(pl);
- // This is a -0 case, restore the value of right.
- __ mov(right, scratch2);
- // We fall through here to not_smi_result to produce -0.
- }
- break;
- }
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- __ Ret();
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- // Smi tag result.
- __ bic(right, right, Operand(kSmiTagMask));
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, &not_smi_result);
- // Smi tag result.
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ TrySmiTag(right, scratch1, &not_smi_result);
- __ Ret();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&not_smi_result);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r6;
- Register scratch2 = r7;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands) {
- __ AssertSmi(left);
- __ AssertSmi(right);
- }
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
-
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Allocate new heap number for result.
- Register result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
-
- // Load left and right operands into d0 and d1.
- if (smi_operands) {
- __ SmiToDouble(d1, right);
- __ SmiToDouble(d0, left);
- } else {
- // Load right operand into d1.
- if (right_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- right, d1, heap_number_map, scratch1, d8, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(right, d1, heap_number_map, scratch1, fail);
- }
- // Load left operand into d0.
- if (left_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- left, d0, heap_number_map, scratch1, d8, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(
- left, d0, heap_number_map, scratch1, fail);
- }
- }
-
- // Calculate the result.
- if (op != Token::MOD) {
- // Using VFP registers:
- // d0: Left value
- // d1: Right value
- switch (op) {
- case Token::ADD:
- __ vadd(d5, d0, d1);
- break;
- case Token::SUB:
- __ vsub(d5, d0, d1);
- break;
- case Token::MUL:
- __ vmul(d5, d0, d1);
- break;
- case Token::DIV:
- __ vdiv(d5, d0, d1);
- break;
- default:
- UNREACHABLE();
- }
-
- __ sub(r0, result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op, result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(r3, left);
- __ SmiUntag(r2, right);
- } else {
- // Convert operands to 32-bit integers. Right in r2 and left in r3.
- __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers);
- __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers);
- }
-
- Label result_not_a_smi;
- switch (op) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- __ b(mi, &result_not_a_smi);
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check that the *signed* result fits in a smi.
- __ TrySmiTag(r0, r2, &result_not_a_smi);
- __ Ret();
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = r5;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
- }
-
- // r2: Answer as signed int32.
- // r5: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(r5));
-
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
- // mentioned above SHR needs to always produce a positive result.
- __ vmov(s0, r2);
- if (op == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
- Label not_smis;
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- __ JumpIfNotSmi(scratch1, &not_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, &not_smis, op, mode);
- }
- __ bind(&not_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value())));
- __ b(ne, &right_arg_changed);
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
- LowDwVfpRegister double_scratch = d0;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ orr(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers r0 and r1 (right
- // and left) are preserved for the runtime call.
- __ LoadNumberAsInt32Double(
- right, d1, heap_number_map, scratch1, d8, &transition);
- __ LoadNumberAsInt32Double(
- left, d0, heap_number_map, scratch1, d8, &transition);
-
- if (op_ != Token::MOD) {
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d0, d1);
- break;
- case Token::SUB:
- __ vsub(d5, d0, d1);
- break;
- case Token::MUL:
- __ vmul(d5, d0, d1);
- break;
- case Token::DIV:
- __ vdiv(d5, d0, d1);
- break;
- default:
- UNREACHABLE();
- }
-
- if (result_type_ <= BinaryOpIC::INT32) {
- __ TryDoubleToInt32Exact(scratch1, d5, d8);
- // If the ne condition is set, result does
- // not fit in a 32-bit integer.
- __ b(ne, &transition);
- // Try to tag the result as a Smi, return heap number on overflow.
- __ SmiTag(scratch1, SetCC);
- __ b(vs, &return_heap_number);
- // Check for minus zero, transition in that case (because we need
- // to return a heap number).
- Label not_zero;
- ASSERT(kSmiTag == 0);
- __ b(ne, &not_zero);
- __ VmovHigh(scratch2, d5);
- __ tst(scratch2, Operand(HeapNumber::kSignMask));
- __ b(ne, &transition);
- __ bind(&not_zero);
- __ mov(r0, scratch1);
- __ Ret();
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- // We are using vfp registers so r5 is available.
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- if (encoded_right_arg_.has_value) {
- __ Vmov(d8, fixed_right_arg_value(), scratch1);
- __ VFPCompareAndSetFlags(d1, d8);
- __ b(ne, &transition);
- }
-
- // We preserved r0 and r1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(r5, r4);
-
- Label pop_and_call_runtime;
-
- // Allocate a heap number to store the result.
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime,
- mode_);
-
- // Load the left value from the value saved on the stack.
- __ Pop(r1, r0);
-
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
- __ b(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- // Convert operands to 32-bit integers. Right in r2 and left in r3. The
- // registers r0 and r1 (right and left) are preserved for the runtime
- // call.
- __ LoadNumberAsInt32(left, r3, heap_number_map,
- scratch1, d0, d1, &transition);
- __ LoadNumberAsInt32(right, r2, heap_number_map,
- scratch1, d0, d1, &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (r2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number);
- break;
- case Token::SHL:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi. If not try to return a heap number.
- // (We know the result is an int32).
- __ TrySmiTag(r0, r2, &return_heap_number);
- __ Ret();
-
- __ bind(&return_heap_number);
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
-
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_s32(double_scratch, double_scratch.low());
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_u32(double_scratch, double_scratch.low());
- }
-
- // Store the result.
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
- __ b(ne, &check);
- if (Token::IsBitOp(op_)) {
- __ mov(r1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &done);
- if (Token::IsBitOp(op_)) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
-
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = r1;
- Register right = r0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &left_not_string);
-
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(r0) && !result.is(r1));
-
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ b(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, Operand(overwritable_operand));
- __ bind(&allocated);
- } else {
- ASSERT(mode == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(r1, r0);
-}
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in d2, double result goes
// into d2.
@@ -2280,7 +1242,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
Label calculate;
Label invalid_cache;
const Register scratch0 = r9;
- const Register scratch1 = r7;
+ Register scratch1 = no_reg; // will be r4
const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED);
@@ -2360,6 +1322,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ cmp(r2, r4);
__ cmp(r3, r5, eq);
__ b(ne, &calculate);
+
+ scratch1 = r4; // Start of scratch1 range.
+
// Cache hit. Load result, cleanup and return.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(
@@ -2496,13 +1461,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = r2;
const Register heapnumbermap = r5;
const Register heapnumber = r0;
- const DwVfpRegister double_base = d1;
- const DwVfpRegister double_exponent = d2;
- const DwVfpRegister double_result = d3;
- const DwVfpRegister double_scratch = d0;
- const SwVfpRegister single_scratch = s0;
+ const DwVfpRegister double_base = d0;
+ const DwVfpRegister double_exponent = d1;
+ const DwVfpRegister double_result = d2;
+ const DwVfpRegister double_scratch = d3;
+ const SwVfpRegister single_scratch = s6;
const Register scratch = r9;
- const Register scratch2 = r7;
+ const Register scratch2 = r4;
Label call_runtime, done, int_exponent;
if (exponent_type_ == ON_STACK) {
@@ -2694,20 +1659,14 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated(Isolate* isolate) {
- return (!save_doubles_ || isolate->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
}
@@ -2726,16 +1685,13 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
store_buffer_overflow_code = *stub.GetCode(isolate);
}
- save_doubles_code->set_is_pregenerated(true);
- store_buffer_overflow_code->set_is_pregenerated(true);
isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
+ stub.GetCode(isolate);
}
@@ -2765,9 +1721,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (do_gc) {
// Passing r0.
- __ PrepareCallCFunction(1, 0, r1);
+ __ PrepareCallCFunction(2, 0, r1);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(ExternalReference::perform_gc_function(isolate),
- 1, 0);
+ 2, 0);
}
ExternalReference scope_depth =
@@ -2841,7 +1798,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// sp: stack pointer
// fp: frame pointer
// Callee-saved register r4 still holds argc.
- __ LeaveExitFrame(save_doubles_, r4);
+ __ LeaveExitFrame(save_doubles_, r4, true);
__ mov(pc, lr);
// check if we should retry or throw exception
@@ -3011,14 +1968,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r3: argc
// r4: argv
Isolate* isolate = masm->isolate();
- __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ mov(r7, Operand(Smi::FromInt(marker)));
+ __ mov(r8, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
__ mov(r5,
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
__ ldr(r5, MemOperand(r5));
- __ Push(r8, r7, r6, r5);
+ __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ Push(ip, r8, r6, r5);
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -3064,7 +2021,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Invoke: Link this frame into the handler chain. There's only one
// handler block in this code object, so its index is 0.
__ bind(&invoke);
- // Must preserve r0-r4, r5-r7 are available.
+ // Must preserve r0-r4, r5-r6 are available.
__ PushTryHandler(StackHandler::JS_ENTRY, 0);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bl(&invoke) above, which
@@ -3375,8 +2332,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
receiver = r0;
}
- StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
@@ -3603,7 +2559,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
- __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ cmp(r1, Operand::Zero());
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
@@ -3654,7 +2610,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
__ add(r6, r1, Operand(Smi::FromInt(2)));
__ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
__ add(r6, r4, Operand(r1, LSL, 1));
__ add(r6, r6, Operand(kParameterMapHeaderSize));
__ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
@@ -3672,31 +2628,36 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ ldr(r9, MemOperand(sp, 0 * kPointerSize));
__ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ sub(r9, r9, Operand(r1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ add(r3, r4, Operand(r6, LSL, 1));
__ add(r3, r3, Operand(kParameterMapHeaderSize));
// r6 = loop variable (tagged)
// r1 = mapping index (tagged)
// r3 = address of backing store (tagged)
- // r4 = address of parameter map (tagged)
- // r5 = temporary scratch (a.o., for address calculation)
- // r7 = the hole value
+ // r4 = address of parameter map (tagged), which is also the address of new
+ // object + Heap::kArgumentsObjectSize (tagged)
+ // r0 = temporary scratch (a.o., for address calculation)
+ // r5 = the hole value
__ jmp(&parameters_test);
__ bind(&parameters_loop);
__ sub(r6, r6, Operand(Smi::FromInt(1)));
- __ mov(r5, Operand(r6, LSL, 1));
- __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ str(r9, MemOperand(r4, r5));
- __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ str(r7, MemOperand(r3, r5));
+ __ mov(r0, Operand(r6, LSL, 1));
+ __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ str(r9, MemOperand(r4, r0));
+ __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ str(r5, MemOperand(r3, r0));
__ add(r9, r9, Operand(Smi::FromInt(1)));
__ bind(&parameters_test);
__ cmp(r6, Operand(Smi::FromInt(0)));
__ b(ne, &parameters_loop);
+ // Restore r0 = new object (tagged)
+ __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
+
__ bind(&skip_parameter_map);
+ // r0 = address of new object (tagged)
// r2 = argument count (tagged)
// r3 = address of backing store (tagged)
// r5 = scratch
@@ -3727,6 +2688,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Ret();
// Do the runtime call to allocate the arguments object.
+ // r0 = address of new object (tagged)
// r2 = argument count (tagged)
__ bind(&runtime);
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
@@ -3855,7 +2817,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// therefore the content of these registers are safe to use after the call.
Register subject = r4;
Register regexp_data = r5;
- Register last_match_info_elements = r6;
+ Register last_match_info_elements = no_reg; // will be r6;
// Ensure that a RegExp stack is allocated.
Isolate* isolate = masm->isolate();
@@ -3988,19 +2950,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kTwoByteStringTag == 0);
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 2), SetCC);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
+ __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
+ __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
// (E) Carry on. String handling is done.
- // r7: irregexp code
+ // r6: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
- __ JumpIfSmi(r7, &runtime);
+ __ JumpIfSmi(r6, &runtime);
// r1: previous index
// r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
+ // r6: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
@@ -4042,7 +3004,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
- __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4053,12 +3015,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 4, r3: End of string data
// Argument 3, r2: Start of string data
// Prepare start and end index of the input.
- __ add(r9, r8, Operand(r9, LSL, r3));
+ __ add(r9, r7, Operand(r9, LSL, r3));
__ add(r2, r9, Operand(r1, LSL, r3));
- __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
- __ SmiUntag(r8);
- __ add(r3, r9, Operand(r8, LSL, r3));
+ __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
+ __ SmiUntag(r7);
+ __ add(r3, r9, Operand(r7, LSL, r3));
// Argument 2 (r1): Previous index.
// Already there
@@ -4067,11 +3029,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(r0, subject);
// Locate the code entry and call it.
- __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
DirectCEntryStub stub;
- stub.GenerateCall(masm, r7);
+ stub.GenerateCall(masm, r6);
+
+ __ LeaveExitFrame(false, no_reg, true);
- __ LeaveExitFrame(false, no_reg);
+ last_match_info_elements = r6;
// r0: result
// subject: subject string (callee saved)
@@ -4161,7 +3125,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
subject,
- r7,
+ r3,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
__ mov(subject, r2);
@@ -4171,7 +3135,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastInputOffset,
subject,
- r7,
+ r3,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
@@ -4343,6 +3307,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // r0 : number of arguments to the construct function
// r1 : the function to call
// r2 : cache cell for call target
Label initialize, done, miss, megamorphic, not_array_function;
@@ -4364,9 +3329,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in ecx.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
__ ldr(r5, FieldMemOperand(r3, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &miss);
@@ -4403,17 +3365,14 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments register must be smi-tagged to call out.
__ SmiTag(r0);
- __ push(r0);
- __ push(r1);
- __ push(r2);
+ __ Push(r2, r1, r0);
CreateAllocationSiteStub create_stub;
__ CallStub(&create_stub);
- __ pop(r2);
- __ pop(r1);
- __ pop(r0);
+ __ Pop(r2, r1, r0);
__ SmiUntag(r0);
}
__ b(&done);
@@ -4739,7 +3698,6 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
Register scratch2,
Register scratch3,
Register scratch4,
- Register scratch5,
int flags) {
bool ascii = (flags & COPY_ASCII) != 0;
bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
@@ -4814,30 +3772,29 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
__ bind(&loop);
__ ldr(scratch3, MemOperand(src, 4, PostIndex));
- __ sub(scratch5, limit, Operand(dest));
__ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
__ str(scratch1, MemOperand(dest, 4, PostIndex));
__ mov(scratch1, Operand(scratch3, LSR, right_shift));
// Loop if four or more bytes left to copy.
- // Compare to eight, because we did the subtract before increasing dst.
- __ sub(scratch5, scratch5, Operand(8), SetCC);
+ __ sub(scratch3, limit, Operand(dest));
+ __ sub(scratch3, scratch3, Operand(4), SetCC);
__ b(ge, &loop);
}
// There is now between zero and three bytes left to copy (negative that
- // number is in scratch5), and between one and three bytes already read into
+ // number is in scratch3), and between one and three bytes already read into
// scratch1 (eight times that number in scratch4). We may have read past
// the end of the string, but because objects are aligned, we have not read
// past the end of the object.
// Find the minimum of remaining characters to move and preloaded characters
// and write those as bytes.
- __ add(scratch5, scratch5, Operand(4), SetCC);
+ __ add(scratch3, scratch3, Operand(4), SetCC);
__ b(eq, &done);
- __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
+ __ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
// Move minimum of bytes read and bytes left to copy to scratch4.
- __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
- // Between one and three (value in scratch5) characters already read into
+ __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt);
+ // Between one and three (value in scratch3) characters already read into
// scratch ready to write.
- __ cmp(scratch5, Operand(2));
+ __ cmp(scratch3, Operand(2));
__ strb(scratch1, MemOperand(dest, 1, PostIndex));
__ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
__ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
@@ -5177,10 +4134,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_slice);
- __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
+ __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
__ jmp(&set_slice_header);
__ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
+ __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
__ bind(&set_slice_header);
__ mov(r3, Operand(r3, LSL, 1));
__ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
@@ -5221,7 +4178,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ b(eq, &two_byte_sequential);
// Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
+ __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
// Locate first character of substring to copy.
__ add(r5, r5, r3);
@@ -5233,13 +4190,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r2: result string length
// r5: first character of substring to copy
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+ StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9,
COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_r0);
// Allocate and copy the resulting two-byte string.
__ bind(&two_byte_sequential);
- __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
+ __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
// Locate first character of substring to copy.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
@@ -5253,7 +4210,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r5: first character of substring to copy.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
- masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
+ masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED);
__ bind(&return_r0);
Counters* counters = masm->isolate()->counters();
@@ -5519,7 +4476,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3,
&call_runtime);
// Get the two characters forming the sub string.
@@ -5530,7 +4487,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// just allocate a new one.
Label make_two_character_string;
StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
+ masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -5575,7 +4532,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Allocate an ASCII cons string.
__ bind(&ascii_data);
- __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
+ __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
Label skip_write_barrier, after_writing;
@@ -5586,15 +4543,15 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ cmp(r4, Operand::Zero());
__ b(eq, &skip_write_barrier);
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ RecordWriteField(r7,
+ __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
+ __ RecordWriteField(r3,
ConsString::kFirstOffset,
r0,
r4,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
- __ RecordWriteField(r7,
+ __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
+ __ RecordWriteField(r3,
ConsString::kSecondOffset,
r1,
r4,
@@ -5603,12 +4560,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ jmp(&after_writing);
__ bind(&skip_write_barrier);
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
+ __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
+ __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
__ bind(&after_writing);
- __ mov(r0, Operand(r7));
+ __ mov(r0, Operand(r3));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -5628,7 +4585,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ b(eq, &ascii_data);
// Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
+ __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime);
__ jmp(&allocated);
// We cannot encounter sliced strings or cons strings here since:
@@ -5652,14 +4609,15 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
// Check whether both strings have same encoding
- __ eor(r7, r4, Operand(r5));
- __ tst(r7, Operand(kStringEncodingMask));
+ __ eor(ip, r4, Operand(r5));
+ ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask));
+ __ tst(ip, Operand(kStringEncodingMask));
__ b(ne, &call_runtime);
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r4, Operand(kStringRepresentationMask));
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r7,
+ __ add(r6,
r0,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
LeaveCC,
@@ -5669,7 +4627,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kShortExternalStringTag != 0);
__ tst(r4, Operand(kShortExternalStringMask));
__ b(ne, &call_runtime);
- __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
+ __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
__ bind(&first_prepared);
STATIC_ASSERT(kSeqStringTag == 0);
@@ -5689,76 +4647,57 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&second_prepared);
Label non_ascii_string_add_flat_result;
- // r7: first character of first string
+ // r6: first character of first string
// r1: first character of second string
// r2: length of first string.
// r3: length of second string.
- // r6: sum of lengths.
// Both strings have the same encoding.
STATIC_ASSERT(kTwoByteStringTag == 0);
__ tst(r5, Operand(kStringEncodingMask));
__ b(eq, &non_ascii_string_add_flat_result);
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r3));
+ __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime);
+ __ sub(r2, r2, Operand(r3));
+ __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// r0: result string.
- // r7: first character of first string.
+ // r6: first character of first string.
// r1: first character of second string.
// r2: length of first string.
// r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+ // r5: first character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true);
+ // r5: next character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r3));
+ __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime);
+ __ sub(r2, r2, Operand(r3));
+ __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// r0: result string.
- // r7: first character of first string.
+ // r6: first character of first string.
// r1: first character of second string.
// r2: length of first string.
// r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+ // r5: first character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false);
+ // r5: next character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ CallRuntime(Runtime::kStringAdd, 2);
- }
- __ Ret();
- } else {
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
- }
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
- }
- __ Ret();
- } else {
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
@@ -5792,13 +4731,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
// Check the number to string cache.
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
__ mov(arg, scratch1);
__ str(arg, MemOperand(sp, stack_offset));
__ bind(&done);
@@ -6107,8 +5040,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
- __ push(lr);
- __ Push(r1, r0);
+ __ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
__ push(ip);
__ CallExternalReference(miss, 3);
@@ -6116,8 +5048,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
__ pop(lr);
- __ pop(r0);
- __ pop(r1);
+ __ Pop(r1, r0);
}
__ Jump(r2);
@@ -6391,89 +5322,13 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
- // StoreArrayLiteralElementStub::Generate
- { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
- // FastNewClosureStub::Generate
- { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
- // StringAddStub::Generate
- { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
- { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-
-bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
// Hydrogen code stubs need stub2 at snapshot time.
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
+ stub2.GetCode(isolate);
}
@@ -6761,10 +5616,27 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ mov(r1, r0);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ ldr(r0, MemOperand(fp, parameter_count_offset));
+ // The parameter count above includes the receiver for the arguments passed to
+ // the deoptimization handler. Subtract the receiver for the parameter count
+ // for the call.
+ __ sub(r0, r0, Operand(1));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ ParameterCount argument_count(r0);
+ __ InvokeFunction(
+ r1, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
- AllowStubCallsScope allow_stub_calls(masm, true);
ProfileEntryHookStub stub;
__ push(lr);
__ CallStub(&stub);
@@ -6918,11 +5790,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
}
- // Save the resulting elements kind in type info
- __ SmiTag(r3);
- __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
- __ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
- __ SmiUntag(r3);
+ // Save the resulting elements kind in type info. We can't just store r3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ ldr(r4, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
+ __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
+ __ str(r4, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -6955,12 +5829,12 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
(!FLAG_track_allocation_sites &&
(kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
}
}
}
@@ -6982,11 +5856,11 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ stubh1.GetCode(isolate);
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ stubh2.GetCode(isolate);
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ stubh3.GetCode(isolate);
}
}
@@ -7064,6 +5938,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
diff --git a/chromium/v8/src/arm/code-stubs-arm.h b/chromium/v8/src/arm/code-stubs-arm.h
index d05e9a1d840..e4006861df0 100644
--- a/chromium/v8/src/arm/code-stubs-arm.h
+++ b/chromium/v8/src/arm/code-stubs-arm.h
@@ -68,7 +68,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -106,7 +105,6 @@ class StringHelper : public AllStatic {
Register scratch2,
Register scratch3,
Register scratch4,
- Register scratch5,
int flags);
@@ -232,7 +230,6 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
@@ -257,31 +254,6 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
@@ -305,8 +277,6 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
diff --git a/chromium/v8/src/arm/codegen-arm.cc b/chromium/v8/src/arm/codegen-arm.cc
index 1bcf3e3a605..238d34ed27e 100644
--- a/chromium/v8/src/arm/codegen-arm.cc
+++ b/chromium/v8/src/arm/codegen-arm.cc
@@ -55,7 +55,7 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
#if defined(USE_SIMULATOR)
byte* fast_exp_arm_machine_code = NULL;
double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
+ return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
fast_exp_arm_machine_code, x, 0);
}
#endif
@@ -402,8 +402,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
- __ TestJSArrayForAllocationMemento(r2, r4);
- __ b(eq, allocation_memento_found);
+ __ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found);
}
// Set transitioned map.
@@ -432,8 +431,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map, done;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(r2, r4);
- __ b(eq, fail);
+ __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -444,15 +442,16 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ push(lr);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: source FixedArray
// r5: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
- __ Allocate(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
+ __ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
// r6: destination FixedDoubleArray, not tagged as heap object.
+ __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ // r4: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
@@ -483,15 +482,15 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Prepare for conversion loop.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
- __ add(r6, r7, Operand(r5, LSL, 2));
+ __ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
+ __ add(r6, r9, Operand(r5, LSL, 2));
__ mov(r4, Operand(kHoleNanLower32));
__ mov(r5, Operand(kHoleNanUpper32));
// r3: begin of source FixedArray element fields, not tagged
// r4: kHoleNanLower32
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
- // r7: begin of FixedDoubleArray element fields, not tagged
+ // r9: begin of FixedDoubleArray element fields, not tagged
__ b(&entry);
@@ -514,30 +513,30 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Convert and copy elements.
__ bind(&loop);
- __ ldr(r9, MemOperand(r3, 4, PostIndex));
- // r9: current element
- __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
+ __ ldr(lr, MemOperand(r3, 4, PostIndex));
+ // lr: current element
+ __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
// Normal smi, convert to double and store.
- __ vmov(s0, r9);
+ __ vmov(s0, lr);
__ vcvt_f64_s32(d0, s0);
- __ vstr(d0, r7, 0);
- __ add(r7, r7, Operand(8));
+ __ vstr(d0, r9, 0);
+ __ add(r9, r9, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
- __ SmiTag(r9);
- __ orr(r9, r9, Operand(1));
- __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+ __ SmiTag(lr);
+ __ orr(lr, lr, Operand(1));
+ __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
- __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
+ __ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
__ bind(&entry);
- __ cmp(r7, r6);
+ __ cmp(r9, r6);
__ b(lt, &loop);
__ pop(lr);
@@ -558,8 +557,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label entry, loop, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(r2, r4);
- __ b(eq, fail);
+ __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -577,7 +575,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray.
__ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
__ add(r0, r0, Operand(r5, LSL, 1));
- __ Allocate(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+ __ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
@@ -589,14 +587,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ add(r3, r6, Operand(FixedArray::kHeaderSize));
__ add(r6, r6, Operand(kHeapObjectTag));
__ add(r5, r3, Operand(r5, LSL, 1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
__ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in r4 to fully take advantage of post-indexing.
// r3: begin of destination FixedArray element fields, not tagged
// r4: begin of source FixedDoubleArray element fields, not tagged, +4
// r5: end of destination FixedArray, not tagged
// r6: destination FixedArray
- // r7: the-hole pointer
// r9: heap number map
__ b(&entry);
@@ -608,7 +604,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&loop);
__ ldr(r1, MemOperand(r4, 8, PostIndex));
- // lr: current element's upper 32 bit
+ // r1: current element's upper 32 bit
// r4: address of next element's upper 32 bit
__ cmp(r1, Operand(kHoleNanUpper32));
__ b(eq, &convert_hole);
@@ -631,7 +627,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
- __ str(r7, MemOperand(r3, 4, PostIndex));
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ str(r0, MemOperand(r3, 4, PostIndex));
__ bind(&entry);
__ cmp(r3, r5);
@@ -775,50 +772,65 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
ASSERT(!temp2.is(temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
- Label done;
+ Label zero, infinity, done;
__ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
__ vldr(double_scratch1, ExpConstant(0, temp3));
- __ vmov(result, kDoubleRegZero);
__ VFPCompareAndSetFlags(double_scratch1, input);
- __ b(ge, &done);
+ __ b(ge, &zero);
+
__ vldr(double_scratch2, ExpConstant(1, temp3));
__ VFPCompareAndSetFlags(input, double_scratch2);
- __ vldr(result, ExpConstant(2, temp3));
- __ b(ge, &done);
+ __ b(ge, &infinity);
+
__ vldr(double_scratch1, ExpConstant(3, temp3));
__ vldr(result, ExpConstant(4, temp3));
__ vmul(double_scratch1, double_scratch1, input);
__ vadd(double_scratch1, double_scratch1, result);
- __ vmov(temp2, temp1, double_scratch1);
+ __ VmovLow(temp2, double_scratch1);
__ vsub(double_scratch1, double_scratch1, result);
__ vldr(result, ExpConstant(6, temp3));
__ vldr(double_scratch2, ExpConstant(5, temp3));
__ vmul(double_scratch1, double_scratch1, double_scratch2);
__ vsub(double_scratch1, double_scratch1, input);
__ vsub(result, result, double_scratch1);
- __ vmul(input, double_scratch1, double_scratch1);
- __ vmul(result, result, input);
- __ mov(temp1, Operand(temp2, LSR, 11));
+ __ vmul(double_scratch2, double_scratch1, double_scratch1);
+ __ vmul(result, result, double_scratch2);
__ vldr(double_scratch2, ExpConstant(7, temp3));
__ vmul(result, result, double_scratch2);
__ vsub(result, result, double_scratch1);
- __ vldr(double_scratch2, ExpConstant(8, temp3));
+ // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
+ ASSERT(*reinterpret_cast<double*>
+ (ExternalReference::math_exp_constants(8).address()) == 1);
+ __ vmov(double_scratch2, 1);
__ vadd(result, result, double_scratch2);
- __ movw(ip, 0x7ff);
- __ and_(temp2, temp2, Operand(ip));
+ __ mov(temp1, Operand(temp2, LSR, 11));
+ __ Ubfx(temp2, temp2, 0, 11);
__ add(temp1, temp1, Operand(0x3ff));
- __ mov(temp1, Operand(temp1, LSL, 20));
// Must not call ExpConstant() after overwriting temp3!
__ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ ldr(ip, MemOperand(temp3, temp2, LSL, 3));
- __ add(temp3, temp3, Operand(kPointerSize));
- __ ldr(temp2, MemOperand(temp3, temp2, LSL, 3));
- __ orr(temp1, temp1, temp2);
- __ vmov(input, ip, temp1);
- __ vmul(result, result, input);
+ __ add(temp3, temp3, Operand(temp2, LSL, 3));
+ __ ldm(ia, temp3, temp2.bit() | temp3.bit());
+ // The first word is loaded is the lower number register.
+ if (temp2.code() < temp3.code()) {
+ __ orr(temp1, temp3, Operand(temp1, LSL, 20));
+ __ vmov(double_scratch1, temp2, temp1);
+ } else {
+ __ orr(temp1, temp2, Operand(temp1, LSL, 20));
+ __ vmov(double_scratch1, temp3, temp1);
+ }
+ __ vmul(result, result, double_scratch1);
+ __ b(&done);
+
+ __ bind(&zero);
+ __ vmov(result, kDoubleRegZero);
+ __ b(&done);
+
+ __ bind(&infinity);
+ __ vldr(result, ExpConstant(2, temp3));
+
__ bind(&done);
}
@@ -839,7 +851,8 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
PredictableCodeSizeScope scope(patcher.masm(), *length);
patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
patcher.masm()->nop(ip.code());
- patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
+ patcher.masm()->add(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
initialized = true;
}
return byte_sequence;
@@ -859,7 +872,7 @@ bool Code::IsYoungSequence(byte* sequence) {
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
- *age = kNoAge;
+ *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
Address target_address = Memory::Address_at(
@@ -870,20 +883,21 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
+ if (age == kNoAgeCodeAge) {
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));
- patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ patcher.masm()->emit_code_stub_address(stub);
}
}
diff --git a/chromium/v8/src/arm/codegen-arm.h b/chromium/v8/src/arm/codegen-arm.h
index 54530d87262..ecbe64cbad3 100644
--- a/chromium/v8/src/arm/codegen-arm.h
+++ b/chromium/v8/src/arm/codegen-arm.h
@@ -97,6 +97,7 @@ class StringCharLoadGenerator : public AllStatic {
class MathExpGenerator : public AllStatic {
public:
+ // Register input isn't modified. All other registers are clobbered.
static void EmitMathExp(MacroAssembler* masm,
DwVfpRegister input,
DwVfpRegister result,
diff --git a/chromium/v8/src/arm/constants-arm.h b/chromium/v8/src/arm/constants-arm.h
index 703613932cd..78bb66c49fe 100644
--- a/chromium/v8/src/arm/constants-arm.h
+++ b/chromium/v8/src/arm/constants-arm.h
@@ -50,6 +50,9 @@ inline int DecodeConstantPoolLength(int instr) {
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
}
+// Used in code age prologue - ldr(pc, MemOperand(pc, -4))
+const int kCodeAgeJumpInstruction = 0xe51ff004;
+
// Number of registers in normal ARM mode.
const int kNumRegisters = 16;
diff --git a/chromium/v8/src/arm/deoptimizer-arm.cc b/chromium/v8/src/arm/deoptimizer-arm.cc
index 3c57b643956..6031499dbd1 100644
--- a/chromium/v8/src/arm/deoptimizer-arm.cc
+++ b/chromium/v8/src/arm/deoptimizer-arm.cc
@@ -81,100 +81,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-static const int32_t kBranchBeforeInterrupt = 0x5a000004;
-
-// The back edge bookkeeping code matches the pattern:
-//
-// <decrement profiling counter>
-// 2a 00 00 01 bpl ok
-// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
-// e1 2f ff 3c blx ip
-// ok-label
-//
-// We patch the code to the following form:
-//
-// <decrement profiling counter>
-// e1 a0 00 00 mov r0, r0 (NOP)
-// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
-// e1 2f ff 3c blx ip
-// ok-label
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- static const int kInstrSize = Assembler::kInstrSize;
- // Turn the jump into nops.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->nop();
- // Replace the call address.
- uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address interrupt_address_pointer = pc_after + interrupt_address_offset;
- Memory::uint32_at(interrupt_address_pointer) =
- reinterpret_cast<uint32_t>(replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- static const int kInstrSize = Assembler::kInstrSize;
- // Restore the original jump.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later.
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- // Restore the original call address.
- uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address interrupt_address_pointer = pc_after + interrupt_address_offset;
- Memory::uint32_at(interrupt_address_pointer) =
- reinterpret_cast<uint32_t>(interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- static const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
-
- uint32_t interrupt_address_offset =
- Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff;
- Address interrupt_address_pointer = pc_after + interrupt_address_offset;
-
- if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
- Memory::uint32_at(interrupt_address_pointer));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
- Memory::uint32_at(interrupt_address_pointer));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -201,10 +107,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
+ int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(r0.code(), params);
output_frame->SetRegister(r1.code(), handler);
}
@@ -224,6 +127,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
+}
+
+
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@@ -362,8 +270,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
- __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
- __ push(r7);
+ __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
+ __ push(r6);
__ bind(&inner_loop_header);
__ cmp(r3, Operand::Zero());
__ b(ne, &inner_push_loop); // test for gt?
@@ -409,9 +317,9 @@ void Deoptimizer::EntryGenerator::Generate() {
__ InitializeRootRegister();
__ pop(ip); // remove pc
- __ pop(r7); // get continuation, leave pc on stack
+ __ pop(ip); // get continuation, leave pc on stack
__ pop(lr);
- __ Jump(r7);
+ __ Jump(ip);
__ stop("Unreachable.");
}
diff --git a/chromium/v8/src/arm/disasm-arm.cc b/chromium/v8/src/arm/disasm-arm.cc
index acffaa3f230..49e4126b326 100644
--- a/chromium/v8/src/arm/disasm-arm.cc
+++ b/chromium/v8/src/arm/disasm-arm.cc
@@ -1679,6 +1679,14 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"constant pool begin (length %d)",
DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
+ } else if (instruction_bits == kCodeAgeJumpInstruction) {
+ // The code age prologue has a constant immediatly following the jump
+ // instruction.
+ Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
+ DecodeType2(instr);
+ OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ " (0x%08x)", target->InstructionBits());
+ return 2 * Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
case 0:
diff --git a/chromium/v8/src/arm/frames-arm.h b/chromium/v8/src/arm/frames-arm.h
index d022b414b43..e6ecda1fb53 100644
--- a/chromium/v8/src/arm/frames-arm.h
+++ b/chromium/v8/src/arm/frames-arm.h
@@ -64,8 +64,8 @@ const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
- 1 << 7 | // r7 v4
- 1 << 8 | // r8 v5 (cp in JavaScript code)
+ 1 << 7 | // r7 v4 (cp in JavaScript code)
+ 1 << 8 | // r8 v5 (pp in JavaScript code)
kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
@@ -102,7 +102,8 @@ const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = -3 * kPointerSize;
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
diff --git a/chromium/v8/src/arm/full-codegen-arm.cc b/chromium/v8/src/arm/full-codegen-arm.cc
index b6fb70b5df0..55088033f04 100644
--- a/chromium/v8/src/arm/full-codegen-arm.cc
+++ b/chromium/v8/src/arm/full-codegen-arm.cc
@@ -148,13 +148,10 @@ void FullCodeGenerator::Generate() {
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
- Label ok;
__ cmp(r5, Operand::Zero());
- __ b(eq, &ok);
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ __ str(r2, MemOperand(sp, receiver_offset), ne);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -163,16 +160,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- {
- PredictableCodeSizeScope predictible_code_size_scope(
- masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ nop(ip.code());
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- }
+ __ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -180,9 +168,20 @@ void FullCodeGenerator::Generate() {
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
+ // Emit a loop to initialize stack cells for locals when optimizing for
+ // size. Otherwise, unroll the loop for maximum performance.
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ if (FLAG_optimize_for_size && locals_count > 4) {
+ Label loop;
+ __ mov(r2, Operand(locals_count));
+ __ bind(&loop);
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ push(r9);
+ __ b(&loop, ne);
+ } else {
+ for (int i = 0; i < locals_count; i++) {
+ __ push(r9);
+ }
}
}
}
@@ -625,12 +624,11 @@ void FullCodeGenerator::StackValueContext::Plug(
Label done;
__ bind(materialize_true);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
__ jmp(&done);
__ bind(materialize_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
__ bind(&done);
+ __ push(ip);
}
@@ -1167,7 +1165,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(r1, cell);
+ __ Move(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ str(r2, FieldMemOperand(r1, Cell::kValueOffset));
@@ -1609,9 +1607,8 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ jmp(&allocated);
__ bind(&runtime_allocate);
- __ push(r5);
__ mov(r0, Operand(Smi::FromInt(size)));
- __ push(r0);
+ __ Push(r5, r0);
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ pop(r5);
@@ -1637,6 +1634,8 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
@@ -1651,13 +1650,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -1772,6 +1769,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
@@ -1782,6 +1784,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
@@ -1790,29 +1800,24 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
__ IncrementCounter(
isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
- } else if (expr->depth() > 1) {
- __ Push(r3, r2, r1);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ Push(r3, r2, r1);
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ mov(r0, Operand(Smi::FromInt(flags)));
+ __ Push(r3, r2, r1, r0);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
if (has_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
@@ -2050,8 +2055,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw"
__ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ push(r3); // iter
- __ push(r0); // exception
+ __ Push(r3, r0); // iter, exception
__ jmp(&l_call);
// try { received = %yield result }
@@ -2087,8 +2091,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_next);
__ LoadRoot(r2, Heap::knext_stringRootIndex); // "next"
__ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ push(r3); // iter
- __ push(r0); // received
+ __ Push(r3, r0); // iter, received
// result = receiver[f](arg);
__ bind(&l_call);
@@ -2164,11 +2167,13 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ bl(&resume_frame);
__ jmp(&done);
__ bind(&resume_frame);
- __ push(lr); // Return address.
- __ push(fp); // Caller's frame pointer.
- __ mov(fp, sp);
- __ push(cp); // Callee's context.
- __ push(r4); // Callee's JS Function.
+ // lr = return address.
+ // fp = caller's frame pointer.
+ // cp = callee's context,
+ // r4 = callee's JS function.
+ __ Push(lr, fp, cp, r4);
+ // Adjust FP to point to saved FP.
+ __ add(fp, sp, Operand(2 * kPointerSize));
// Load the operand stack size.
__ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
@@ -2200,8 +2205,8 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(r2);
__ b(&push_operand_holes);
__ bind(&call_resume);
- __ push(r1);
- __ push(result_register());
+ ASSERT(!result_register().is(r1));
+ __ Push(r1, result_register());
__ Push(Smi::FromInt(resume_mode));
__ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
@@ -2293,7 +2298,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2302,7 +2307,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&smi_case);
// Smi case. This code works the same way as the smi-smi case in the type
// recording binary operation stub, see
- // BinaryOpStub::GenerateSmiSmiOperation for comments.
switch (op) {
case Token::SAR:
__ GetLeastBitsFromSmi(scratch1, right, 5);
@@ -2371,7 +2375,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(r1);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
@@ -2423,8 +2427,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(r1, r0);
- __ pop(r2);
- __ pop(r0); // Restore value.
+ __ Pop(r0, r2); // r0 = restored value.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
@@ -2558,8 +2561,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
- __ pop(r1); // Key.
- __ pop(r2);
+ __ Pop(r2, r1); // r1 = key.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
@@ -2688,27 +2690,25 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
+ // r4: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ __ ldr(r4, MemOperand(sp, arg_count * kPointerSize));
} else {
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
}
- __ push(r1);
- // Push the receiver of the enclosing function.
+ // r3: the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
- __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
- __ push(r1);
- // Push the language mode.
- __ mov(r1, Operand(Smi::FromInt(language_mode())));
- __ push(r1);
+ __ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
+
+ // r2: the language mode.
+ __ mov(r2, Operand(Smi::FromInt(language_mode())));
- // Push the start position of the scope the calls resides in.
+ // r1: the start position of the scope the calls resides in.
__ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
- __ push(r1);
// Do the runtime call.
+ __ Push(r4, r3, r2, r1);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
@@ -2782,9 +2782,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in r0)
// and the object holding it (returned in edx).
- __ push(context_register());
+ ASSERT(!context_register().is(r2));
__ mov(r2, Operand(proxy->name()));
- __ push(r2);
+ __ Push(context_register(), r2);
__ CallRuntime(Runtime::kLoadContextSlot, 2);
__ Push(r0, r1); // Function, receiver.
@@ -3111,6 +3111,32 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ cmp(r2, Operand(0x80000000));
+ __ cmp(r1, Operand(0x00000000), eq);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -3330,50 +3356,6 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r4, Operand(r0));
-
- __ bind(&heapnumber_allocated);
-
- // Convert 32 random bits in r0 to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ PrepareCallCFunction(1, r0);
- __ ldr(r0,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand::Zero());
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
- __ sub(r0, r4, Operand(kHeapObjectTag));
- __ vstr(d7, r0, HeapNumber::kValueOffset);
- __ mov(r0, r4);
-
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
@@ -3466,31 +3448,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask) {
- __ SmiTst(index);
- __ Check(eq, kNonSmiIndex);
- __ SmiTst(value);
- __ Check(eq, kNonSmiValue);
-
- __ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
- __ cmp(index, ip);
- __ Check(lt, kIndexIsTooLarge);
-
- __ cmp(index, Operand(Smi::FromInt(0)));
- __ Check(ge, kIndexIsNegative);
-
- __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
-
- __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
- __ cmp(ip, Operand(encoding_mask));
- __ Check(eq, kUnexpectedStringType);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
@@ -3501,13 +3458,18 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(value);
- __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(index, value);
if (FLAG_debug_code) {
+ __ SmiTst(value);
+ __ ThrowIf(ne, kNonSmiValue);
+ __ SmiTst(index);
+ __ ThrowIf(ne, kNonSmiIndex);
+ __ SmiUntag(index, index);
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ SmiTag(index, index);
}
__ SmiUntag(value, value);
@@ -3529,13 +3491,18 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(value);
- __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(index, value);
if (FLAG_debug_code) {
+ __ SmiTst(value);
+ __ ThrowIf(ne, kNonSmiValue);
+ __ SmiTst(index);
+ __ ThrowIf(ne, kNonSmiIndex);
+ __ SmiUntag(index, index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ SmiTag(index, index);
}
__ SmiUntag(value, value);
@@ -3592,8 +3559,8 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into r0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@@ -3714,11 +3681,21 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
- __ CallStub(&stub);
+ if (FLAG_new_string_add) {
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ __ pop(r1);
+ NewStringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+ } else {
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
+ __ CallStub(&stub);
+ }
context()->Plug(r0);
}
@@ -3735,42 +3712,6 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -3964,9 +3905,8 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
+ Label bailout, done, one_char_separator, long_separator, non_trivial_array,
+ not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
@@ -3984,19 +3924,18 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Register string = r4;
Register element = r5;
Register elements_end = r6;
- Register scratch1 = r7;
- Register scratch2 = r9;
+ Register scratch = r9;
// Separator operand is on the stack.
__ pop(separator);
// Check that the array is a JSArray.
__ JumpIfSmi(array, &bailout);
- __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
+ __ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE);
__ b(ne, &bailout);
// Check that the array has fast elements.
- __ CheckFastElements(scratch1, scratch2, &bailout);
+ __ CheckFastElements(scratch, array_length, &bailout);
// If the array has length zero, return the empty string.
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
@@ -4033,11 +3972,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&loop);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ add(string_length, string_length, Operand(scratch1), SetCC);
+ __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
+ __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
__ b(lt, &loop);
@@ -4058,23 +3997,23 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Check that the separator is a flat ASCII string.
__ JumpIfSmi(separator, &bailout);
- __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
- __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ sub(string_length, string_length, Operand(scratch1));
- __ smull(scratch2, ip, array_length, scratch1);
+ __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ sub(string_length, string_length, Operand(scratch));
+ __ smull(scratch, ip, array_length, scratch);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
__ cmp(ip, Operand::Zero());
__ b(ne, &bailout);
- __ tst(scratch2, Operand(0x80000000));
+ __ tst(scratch, Operand(0x80000000));
__ b(ne, &bailout);
- __ add(string_length, string_length, Operand(scratch2), SetCC);
+ __ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ SmiUntag(string_length);
@@ -4091,9 +4030,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// array_length: Length of the array.
__ AllocateAsciiString(result,
string_length,
- scratch1,
- scratch2,
- elements_end,
+ scratch,
+ string, // used as scratch
+ elements_end, // used as scratch
&bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
@@ -4106,8 +4045,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(1)));
+ __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ cmp(scratch, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);
@@ -4125,7 +4064,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -4157,7 +4096,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -4178,7 +4117,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
separator,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
@@ -4187,7 +4126,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -4276,9 +4215,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ push(context_register());
+ ASSERT(!context_register().is(r2));
__ mov(r2, Operand(var->name()));
- __ push(r2);
+ __ Push(context_register(), r2);
__ CallRuntime(Runtime::kDeleteContextSlot, 2);
context()->Plug(r0);
}
@@ -4409,14 +4348,44 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
if (ShouldInlineSmiCase(expr->op())) {
- __ JumpIfSmi(r0, &no_conversion);
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(r0, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r0);
+ break;
+ case NAMED_PROPERTY:
+ __ str(r0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
+ __ b(vc, &done);
+ // Call stub. Undo operation first.
+ __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
+ __ jmp(&stub_call);
+ __ bind(&slow);
}
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4439,29 +4408,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
- __ b(vs, &stub_call);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(r0, &done);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
- }
+ __ bind(&stub_call);
__ mov(r1, r0);
__ mov(r0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
- BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+ BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
CallIC(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
expr->CountBinOpFeedbackId());
@@ -4508,8 +4462,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ pop(r1); // Key.
- __ pop(r2); // Receiver.
+ __ Pop(r2, r1); // r1 = key. r2 = receiver.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
@@ -4894,6 +4847,91 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+static const int32_t kBranchBeforeInterrupt = 0x5a000004;
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ Address branch_address = pc - 3 * kInstrSize;
+ CodePatcher patcher(branch_address, 1);
+
+ switch (target_state) {
+ case INTERRUPT:
+ // <decrement profiling counter>
+ // 2a 00 00 01 bpl ok
+ // e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
+ // e1 2f ff 3c blx ip
+ // ok-label
+ patcher.masm()->b(4 * kInstrSize, pl); // Jump offset is 4 instructions.
+ ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(branch_address));
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // <decrement profiling counter>
+ // e1 a0 00 00 mov r0, r0 (NOP)
+ // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
+ // e1 2f ff 3c blx ip
+ // ok-label
+ patcher.masm()->nop();
+ break;
+ }
+
+ Address pc_immediate_load_address = pc - 2 * kInstrSize;
+ // Replace the call address.
+ uint32_t interrupt_address_offset =
+ Memory::uint16_at(pc_immediate_load_address) & 0xfff;
+ Address interrupt_address_pointer = pc + interrupt_address_offset;
+ Memory::uint32_at(interrupt_address_pointer) =
+ reinterpret_cast<uint32_t>(replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_immediate_load_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp);
+
+ Address branch_address = pc - 3 * kInstrSize;
+ Address pc_immediate_load_address = pc - 2 * kInstrSize;
+ uint32_t interrupt_address_offset =
+ Memory::uint16_at(pc_immediate_load_address) & 0xfff;
+ Address interrupt_address_pointer = pc + interrupt_address_offset;
+
+ if (Memory::int32_at(branch_address) == kBranchBeforeInterrupt) {
+ ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->InterruptCheck()->entry()));
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_immediate_load_address)));
+ return INTERRUPT;
+ }
+
+ ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address)));
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_immediate_load_address)));
+
+ if (Memory::uint32_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry()));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/chromium/v8/src/arm/ic-arm.cc b/chromium/v8/src/arm/ic-arm.cc
index f15d4b11f84..ea247b37639 100644
--- a/chromium/v8/src/arm/ic-arm.cc
+++ b/chromium/v8/src/arm/ic-arm.cc
@@ -341,7 +341,7 @@ Object* CallIC_Miss(Arguments args);
void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- r1 : receiver
// -- r2 : name
@@ -445,7 +445,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
void CallICBase::GenerateMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -511,7 +511,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
- Code::ExtraICState extra_ic_state) {
+ ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -577,8 +577,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r2); // save the key
- __ Push(r1, r2); // pass the receiver and the key
+ __ Push(r2, r1); // save the key and the receiver
+ __ push(r2); // pass the receiver and the key
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(r2); // restore the key
}
@@ -610,7 +610,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMonomorphicCacheProbe(masm,
argc,
Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
+ kNoExtraICState);
// Fall through on miss.
__ bind(&slow_call);
@@ -656,7 +656,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::HANDLER, MONOMORPHIC, kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@@ -827,7 +827,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(r0, r2);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -856,7 +856,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -887,7 +887,7 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@@ -900,9 +900,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ Push(r1, r0);
// Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
__ TailCallExternalReference(ref, 2, 1);
}
@@ -1120,7 +1119,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -1160,11 +1159,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1);
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1175,10 +1174,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
@@ -1268,6 +1265,21 @@ static void KeyedStoreGenerateGenericHelper(
Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, fast_double);
}
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(scratch_value,
+ MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
+ __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
+ __ b(ne, &holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
+ __ bind(&holecheck_passed1);
+
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(value, &non_smi_value);
@@ -1315,6 +1327,20 @@ static void KeyedStoreGenerateGenericHelper(
__ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
__ b(ne, slow);
}
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ __ add(address, elements,
+ Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32))
+ - kHeapObjectTag));
+ __ ldr(scratch_value,
+ MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
+ __ cmp(scratch_value, Operand(kHoleNanUpper32));
+ __ b(ne, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value, key, elements, r3, d0,
&transition_double_elements);
@@ -1394,7 +1420,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Register receiver = r2;
Register receiver_map = r3;
Register elements_map = r6;
- Register elements = r7; // Elements array of the receiver.
+ Register elements = r9; // Elements array of the receiver.
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
@@ -1403,10 +1429,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
__ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
__ b(ne, &slow);
// Check if the object is a JS array or not.
__ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -1477,7 +1503,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -1487,7 +1513,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, strict_mode,
+ Code::HANDLER, MONOMORPHIC, extra_ic_state,
Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
@@ -1615,12 +1641,10 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
return;
}
-#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
address, cmp_instruction_address, delta);
}
-#endif
Address patch_address =
cmp_instruction_address - delta * Instruction::kInstrSize;
diff --git a/chromium/v8/src/arm/lithium-arm.cc b/chromium/v8/src/arm/lithium-arm.cc
index b8b22df4e3b..6119b248810 100644
--- a/chromium/v8/src/arm/lithium-arm.cc
+++ b/chromium/v8/src/arm/lithium-arm.cc
@@ -272,7 +272,8 @@ void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
- stream->Add(" + %d", offset());
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
}
@@ -412,18 +413,19 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
+ if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
return spill_slot_count_++;
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -439,7 +441,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// which will be subsumed into this frame.
if (graph()->has_osr()) {
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(false);
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
}
}
@@ -655,7 +657,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -710,51 +712,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseRegisterAtStart(right_value);
}
- } else {
- right = UseRegisterAtStart(right_value);
- }
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
}
- }
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -763,29 +758,31 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d0);
+ LOperand* right = UseFixedDouble(instr->right(), d1);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0);
LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -861,9 +858,33 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
@@ -893,14 +914,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
- instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -992,19 +1011,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ HValue* value = instr->value();
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
// deoptimization environment. If the instruction is generic no
@@ -1047,9 +1062,10 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LInstanceOf* result =
- new(zone()) LInstanceOf(UseFixed(instr->left(), r0),
- UseFixed(instr->right(), r1));
+ new(zone()) LInstanceOf(context, UseFixed(instr->left(), r0),
+ UseFixed(instr->right(), r1));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1057,23 +1073,19 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), r0),
- FixedTemp(r4));
+ new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), r0),
+ FixedTemp(r4));
return MarkAsCall(DefineFixed(result, r0), instr);
}
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineSameAsFirst(result));
+ return AssignEnvironment(DefineAsRegister(result));
}
@@ -1091,7 +1103,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = Use(instr->argument());
return new(zone()) LPushArgument(argument);
}
@@ -1106,11 +1117,11 @@ LInstruction* LChunkBuilder::DoStoreCodeEntry(
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* inner_object) {
- LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
- LInnerAllocatedObject* result =
- new(zone()) LInnerAllocatedObject(base_object);
- return DefineAsRegister(result);
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
}
@@ -1122,14 +1133,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- // If there is a non-return use, the context must be allocated in a register.
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- return DefineAsRegister(new(zone()) LContext);
- }
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
}
- return NULL;
+ return DefineAsRegister(new(zone()) LContext);
}
@@ -1140,7 +1150,8 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
@@ -1158,15 +1169,14 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1206,8 +1216,12 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ Representation r = instr->value()->representation();
+ LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+ ? NULL
+ : UseFixed(instr->context(), cp);
LOperand* input = UseRegister(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(input);
+ LMathAbs* result = new(zone()) LMathAbs(context, input);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
@@ -1243,7 +1257,7 @@ LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
+ LOperand* input = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
@@ -1253,73 +1267,74 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseRegister(instr->value());
+ LOperand* input = UseRegisterAtStart(instr->value());
LMathSqrt* result = new(zone()) LMathSqrt(input);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LOperand* temp = FixedTemp(d3);
- LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
- return DefineFixedDouble(result, d2);
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input);
+ return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
- argument_count_ -= instr->argument_count();
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* key = UseFixed(instr->key(), r2);
- return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), r0), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LCallKeyed(context, key), r0), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), r0), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), r0), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
- argument_count_ -= instr->argument_count();
- LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), r0),
- instr);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ LInstruction* result = DefineFixed(call, r0);
+ if (instr->IsTailCall()) return result;
+ return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), r0), instr);
}
@@ -1347,41 +1362,34 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
+ return AssignEnvironment(DefineAsRegister(div));
}
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineAsRegister(div));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
return DoArithmeticT(Token::DIV, instr);
}
@@ -1466,16 +1474,12 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->HasPowerOf2Divisor()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseOrConstant(right));
+ UseConstant(right));
LInstruction* result = DefineAsRegister(mod);
return (left->CanBeNegative() &&
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- return AssignEnvironment(DefineAsRegister(mod));
} else if (CpuFeatures::IsSupported(SUDIV)) {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegister(right));
@@ -1502,17 +1506,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, d1),
- UseFixedDouble(right, d2));
- return MarkAsCall(DefineFixedDouble(mod, d1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1667,6 +1664,15 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
result = AssignEnvironment(result);
}
return result;
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ return result;
} else if (instr->representation().IsDouble()) {
if (instr->left()->IsMul()) {
return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
@@ -1679,7 +1685,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1710,36 +1715,24 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* left = UseFixedDouble(instr->left(), d0);
LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), d2) :
+ UseFixedDouble(instr->right(), d1) :
UseFixed(instr->right(), r2);
LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, d3),
+ return MarkAsCall(DefineFixedDouble(result, d2),
instr,
CAN_DEOPTIMIZE_EAGERLY);
}
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseTempRegister(instr->global_object());
- LOperand* scratch = TempRegister();
- LOperand* scratch2 = TempRegister();
- LOperand* scratch3 = TempRegister();
- LRandom* result = new(zone()) LRandom(
- global_object, scratch, scratch2, scratch3);
- return DefineFixedDouble(result, d7);
-}
-
-
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
- LCmpT* result = new(zone()) LCmpT(left, right);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1766,6 +1759,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1774,8 +1769,18 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
}
@@ -1813,10 +1818,11 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
HStringCompareAndBranch* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
+ new(zone()) LStringCompareAndBranch(context, left, right);
return MarkAsCall(result, instr);
}
@@ -1881,13 +1887,21 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- LOperand* value = UseTempRegister(instr->value());
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineAsRegister(result);
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ return new(zone()) LSeqStringSetChar(context, string, index, value);
}
@@ -1905,9 +1919,17 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), r0);
- return MarkAsCall(new(zone()) LThrow(value), instr);
+ return MarkAsCall(new(zone()) LThrow(context, value), instr);
}
@@ -1936,7 +1958,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
if (from.IsTagged()) {
if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@@ -1996,7 +2017,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegisterAtStart(val);
if (val->CheckFlag(HInstruction::kUint32)) {
LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineAsRegister(new(zone()) LSmiTag(value));
} else {
@@ -2007,8 +2028,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
- : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
+ : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
@@ -2041,12 +2062,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone())
- LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
@@ -2094,8 +2109,11 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), r0),
+ return new(zone()) LReturn(UseFixed(instr->value(), r0), context,
parameter_count);
}
@@ -2128,8 +2146,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), r0);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -2145,10 +2165,11 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), r1);
LOperand* value = UseFixed(instr->value(), r0);
LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(global_object, value);
+ new(zone()) LStoreGlobalGeneric(context, global_object, value);
return MarkAsCall(result, instr);
}
@@ -2183,8 +2204,10 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), r0);
- LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object), r0);
return MarkAsCall(result, instr);
}
@@ -2196,6 +2219,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2212,7 +2240,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (!instr->is_external()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
- obj = UseTempRegister(instr->elements());
+ obj = UseRegister(instr->elements());
} else {
ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
@@ -2240,18 +2268,17 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), r1);
LOperand* key = UseFixed(instr->key(), r0);
LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), r0);
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), r0);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
-
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2261,15 +2288,19 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
+ val = UseRegister(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(instr->value()->representation().IsSmiOrTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = needs_write_barrier ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
}
return new(zone()) LStoreKeyed(object, key, val);
@@ -2277,17 +2308,13 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
+ (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
+ (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->elements()->representation().IsExternal());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
return new(zone()) LStoreKeyed(external_pointer, key, val);
@@ -2295,6 +2322,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), r2);
LOperand* key = UseFixed(instr->key(), r1);
LOperand* val = UseFixed(instr->value(), r0);
@@ -2303,7 +2331,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
- return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
}
@@ -2313,11 +2342,12 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg);
+ new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
return result;
} else {
+ LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL);
+ new(zone()) LTransitionElementsKind(object, context, NULL);
return AssignPointerMap(result);
}
}
@@ -2376,56 +2406,72 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), r1);
LOperand* val = UseFixed(instr->value(), r0);
- LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), r0),
- instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = FLAG_new_string_add
+ ? UseFixed(instr->left(), r1)
+ : UseRegisterAtStart(instr->left());
+ LOperand* right = FLAG_new_string_add
+ ? UseFixed(instr->right(), r0)
+ : UseRegisterAtStart(instr->right());
+ return MarkAsCall(
+ DefineFixed(new(zone()) LStringAdd(context, left, right), r0),
+ instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), r0), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), r0), instr);
}
@@ -2447,7 +2493,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
int index = static_cast<int>(instr->index());
- Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2472,8 +2518,8 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, r0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), r0), instr);
}
@@ -2497,15 +2543,8 @@ LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
- LOperand* length;
- LOperand* index;
- if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
- length = UseRegisterOrConstant(instr->length());
- index = UseOrConstant(instr->index());
- } else {
- length = UseTempRegister(instr->length());
- index = UseRegisterAtStart(instr->index());
- }
+ LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2518,13 +2557,17 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), r0));
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), r0));
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
+ return new(zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
}
@@ -2557,10 +2600,13 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
}
}
@@ -2593,7 +2639,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
@@ -2605,8 +2651,9 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->enumerable(), r0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
diff --git a/chromium/v8/src/arm/lithium-arm.h b/chromium/v8/src/arm/lithium-arm.h
index 76bb9049ebc..cfafc0645a0 100644
--- a/chromium/v8/src/arm/lithium-arm.h
+++ b/chromium/v8/src/arm/lithium-arm.h
@@ -72,6 +72,7 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
+ V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -91,6 +92,7 @@ class LCodeGen;
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
+ V(Dummy) \
V(DummyUse) \
V(ElementsKind) \
V(ForInCacheArray) \
@@ -105,7 +107,6 @@ class LCodeGen;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
@@ -113,13 +114,13 @@ class LCodeGen;
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
- V(IsNumberAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -154,9 +155,9 @@ class LCodeGen;
V(Parameter) \
V(Power) \
V(PushArgument) \
- V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringGetChar) \
V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
@@ -217,7 +218,6 @@ class LInstruction : public ZoneObject {
: environment_(NULL),
hydrogen_value_(NULL),
bit_field_(IsCallBits::encode(false)) {
- set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() {}
@@ -258,15 +258,6 @@ class LInstruction : public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- // The 31 bits PositionBits is used to store the int position value. And the
- // position value may be RelocInfo::kNoPosition (-1). The accessor always
- // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
- // and can fit into the 31 bits PositionBits.
- void set_position(int pos) {
- bit_field_ = PositionBits::update(bit_field_, pos + 1);
- }
- int position() { return PositionBits::decode(bit_field_) - 1; }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -278,7 +269,7 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return IsCall(); }
@@ -306,7 +297,6 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
- class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -405,17 +395,17 @@ class LInstructionGap V8_FINAL : public LGap {
class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
virtual bool IsControl() const V8_OVERRIDE { return true; }
- int block_id() const { return block_id_; }
+ int block_id() const { return block_->block_id(); }
private:
- int block_id_;
+ HBasicBlock* block_;
};
@@ -435,6 +425,13 @@ class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
@@ -484,8 +481,14 @@ class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -786,12 +789,14 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LMathAbs(LOperand* value) {
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
inputs_[0] = value;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
@@ -881,15 +886,13 @@ class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathPowHalf(LOperand* value, LOperand* temp) {
+ explicit LMathPowHalf(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
};
@@ -923,9 +926,9 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -933,23 +936,26 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
- explicit LIsNumberAndBranch(LOperand* value) {
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
@@ -1003,15 +1009,17 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
public:
- LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
@@ -1087,15 +1095,17 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -1104,28 +1114,32 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
temps_[0] = temp;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@@ -1146,19 +1160,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1319,7 +1320,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1374,45 +1375,59 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
Smi* index() const { return index_; }
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
- inputs_[2] = value;
}
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ }
+
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
+ LThrow(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@@ -1463,28 +1478,6 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
- public:
- LRandom(LOperand* global_object,
- LOperand* scratch,
- LOperand* scratch2,
- LOperand* scratch3) {
- inputs_[0] = global_object;
- temps_[0] = scratch;
- temps_[1] = scratch2;
- temps_[2] = scratch3;
- }
-
- LOperand* global_object() const { return inputs_[0]; }
- LOperand* scratch() const { return temps_[0]; }
- LOperand* scratch2() const { return temps_[1]; }
- LOperand* scratch3() const { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1508,16 +1501,21 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
: op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
virtual Opcode opcode() const V8_OVERRIDE {
@@ -1531,11 +1529,12 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value, LOperand* parameter_count) {
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
- inputs_[1] = parameter_count;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
}
LOperand* value() { return inputs_[0]; }
@@ -1547,7 +1546,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
ASSERT(has_constant_parameter_count());
return LConstantOperand::cast(parameter_count());
}
- LOperand* parameter_count() { return inputs_[1]; }
+ LOperand* parameter_count() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
@@ -1566,13 +1565,15 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1594,6 +1595,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -1632,15 +1642,17 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyedGeneric(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
};
@@ -1653,13 +1665,15 @@ class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@@ -1684,16 +1698,19 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
+ LStoreGlobalGeneric(LOperand* context,
+ LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ inputs_[2] = value;
}
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
@@ -1781,19 +1798,19 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInnerAllocatedObject(LOperand* base_object) {
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
+ inputs_[1] = offset;
}
- LOperand* base_object() { return inputs_[0]; }
- int offset() { return hydrogen()->offset(); }
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
- DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@@ -1823,8 +1840,14 @@ class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
@@ -1866,13 +1889,15 @@ class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
@@ -1883,13 +1908,15 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
+ LCallKeyed(LOperand* context, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = key;
}
- LOperand* key() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
@@ -1901,8 +1928,14 @@ class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallNamed(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
@@ -1913,13 +1946,15 @@ class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@@ -1928,8 +1963,14 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallGlobal(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
@@ -1951,13 +1992,15 @@ class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
@@ -1968,13 +2011,15 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNewArray(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
@@ -1985,13 +2030,24 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
@@ -2133,7 +2189,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2205,15 +2261,17 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2256,17 +2314,22 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = value;
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2277,14 +2340,17 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
+ LOperand* context,
LOperand* new_map_temp) {
inputs_[0] = object;
+ inputs_[1] = context;
temps_[0] = new_map_temp;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* object() { return inputs_[0]; }
LOperand* new_map_temp() { return temps_[0]; }
@@ -2294,8 +2360,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2317,15 +2385,17 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
@@ -2333,28 +2403,32 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
}
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
}
- LOperand* char_code() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
@@ -2465,12 +2539,17 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
inputs_[1] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
+ LOperand* context() { return inputs_[0]; }
LOperand* size() { return inputs_[1]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
@@ -2480,15 +2559,27 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
@@ -2507,13 +2598,15 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -2560,8 +2653,14 @@ class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2572,13 +2671,15 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
@@ -2634,8 +2735,8 @@ class LPlatformChunk V8_FINAL : public LChunk {
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
};
@@ -2659,6 +2760,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2792,7 +2895,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
diff --git a/chromium/v8/src/arm/lithium-codegen-arm.cc b/chromium/v8/src/arm/lithium-codegen-arm.cc
index 1e06f8b7557..56990ca2284 100644
--- a/chromium/v8/src/arm/lithium-codegen-arm.cc
+++ b/chromium/v8/src/arm/lithium-codegen-arm.cc
@@ -98,21 +98,35 @@ void LCodeGen::Abort(BailoutReason reason) {
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
}
@@ -139,33 +153,16 @@ bool LCodeGen::GeneratePrologue() {
// receiver object). r5 is zero for method calls and non-zero for
// function calls.
if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
__ cmp(r5, Operand::Zero());
- __ b(eq, &ok);
int receiver_offset = scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ __ str(r2, MemOperand(sp, receiver_offset), ne);
}
}
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- __ Push(Smi::FromInt(StackFrame::STUB));
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- } else {
- PredictableCodeSizeScope predictible_code_size_scope(
- masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ nop(ip.code());
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- }
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
frame_is_built_ = true;
info_->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -193,16 +190,7 @@ bool LCodeGen::GeneratePrologue() {
}
if (info()->saves_caller_doubles()) {
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ SaveCallerDoubles();
}
// Possibly allocate a local context.
@@ -248,6 +236,8 @@ bool LCodeGen::GeneratePrologue() {
// Trace the call.
if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
@@ -269,45 +259,15 @@ void LCodeGen::GenerateOsrPrologue() {
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
-
- // Don't emit code for basic blocks with a replacement.
- if (instr->IsLabel()) {
- emit_instructions = !LLabel::cast(instr)->HasReplacement();
- }
- if (!emit_instructions) continue;
-
- if (FLAG_code_comments && instr->HasInterestingComment(this)) {
- Comment(";;; <@%d,#%d> %s",
- current_instruction_,
- instr->hydrogen_value()->id(),
- instr->Mnemonic());
- }
-
- RecordAndUpdatePosition(instr->position());
-
- instr->CompileToNative(this);
- }
- EnsureSpaceForLazyDeopt();
- last_lazy_deopt_pc_ = masm()->pc_offset();
- return !is_aborted();
-}
-
-
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
- int pos = instructions_->at(code->instruction_index())->position();
- RecordAndUpdatePosition(pos);
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -323,7 +283,7 @@ bool LCodeGen::GenerateDeferredCode() {
__ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
code->Generate();
@@ -376,6 +336,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
}
if (deopt_jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
if (needs_frame.is_bound()) {
__ b(&needs_frame);
@@ -388,11 +349,15 @@ bool LCodeGen::GenerateDeoptJumpTable() {
ASSERT(info()->IsStub());
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, ip);
}
} else {
+ if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ RestoreCallerDoubles();
+ }
__ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
}
@@ -448,7 +413,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
ASSERT(r.IsSmiOrTagged());
- __ LoadObject(scratch, literal);
+ __ Move(scratch, literal);
}
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
@@ -572,17 +537,36 @@ Operand LCodeGen::ToOperand(LOperand* op) {
}
+static int ArgumentsOffsetWithoutFrame(int index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()));
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
}
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
ASSERT(op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(
+ sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ }
}
@@ -727,13 +711,11 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode,
TargetAddressStorageMode storage_mode) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
@@ -748,20 +730,36 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- ASSERT(pointers != NULL);
- RecordPosition(pointers->position());
- __ CallRuntime(function, num_arguments);
+ __ CallRuntime(function, num_arguments, save_doubles);
+
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Move(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ ldr(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr) {
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
@@ -818,13 +816,39 @@ void LCodeGen::DeoptimizeIf(Condition condition,
return;
}
- ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
- if (FLAG_deopt_every_n_times == 1 &&
- !info()->IsStub() &&
- info()->opt_count() == id) {
- ASSERT(frame_is_built_);
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- return;
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Register scratch = scratch0();
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+
+ // Store the condition on the stack if necessary
+ if (condition != al) {
+ __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
+ __ mov(scratch, Operand(1), LeaveCC, condition);
+ __ push(scratch);
+ }
+
+ __ push(r1);
+ __ mov(scratch, Operand(count));
+ __ ldr(r1, MemOperand(scratch));
+ __ sub(r1, r1, Operand(1), SetCC);
+ __ movw(r1, FLAG_deopt_every_n_times, eq);
+ __ str(r1, MemOperand(scratch));
+ __ pop(r1);
+
+ if (condition != al) {
+ // Clean up the stack before the deoptimizer call
+ __ pop(scratch);
+ }
+
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
+
+ // 'Restore' the condition in a slightly hacky way. (It would be better
+ // to use 'msr' and 'mrs' instructions here, but they are not supported by
+ // our ARM simulator).
+ if (condition != al) {
+ condition = ne;
+ __ cmp(scratch, Operand::Zero());
+ }
}
if (info()->ShouldTrapOnDeopt()) {
@@ -832,7 +856,10 @@ void LCodeGen::DeoptimizeIf(Condition condition,
}
ASSERT(info()->IsStub() || frame_is_built_);
- if (condition == al && frame_is_built_) {
+ // Go through jump table if we need to handle condition, build frame, or
+ // restore caller doubles.
+ if (condition == al && frame_is_built_ &&
+ !info()->saves_caller_doubles()) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
@@ -862,26 +889,31 @@ void LCodeGen::DeoptimizeIf(Condition condition,
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
@@ -977,10 +1009,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp, zone());
- }
}
@@ -991,7 +1019,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -1013,17 +1041,10 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles(
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::RecordAndUpdatePosition(int position) {
- if (position >= 0 && position != old_position_) {
- masm()->positions_recorder()->RecordPosition(position);
- old_position_ = position;
- }
+ masm()->positions_recorder()->WriteRecordedPositions();
}
@@ -1073,6 +1094,7 @@ void LCodeGen::DoParameter(LParameter* instr) {
void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
@@ -1090,11 +1112,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1147,36 +1164,6 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&left_is_not_negative);
__ and_(result_reg, left_reg, Operand(divisor - 1));
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- Register left_reg = ToRegister(instr->left());
- Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- __ cmp(right_reg, Operand(divisor));
- DeoptimizeIf(ne, instr->environment());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ cmp(left_reg, Operand::Zero());
- __ b(pl, &left_is_not_negative);
- __ rsb(result_reg, left_reg, Operand::Zero());
- __ and_(result_reg, result_reg, Operand(divisor - 1));
- __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
- }
- __ b(&done);
- }
-
- __ bind(&left_is_not_negative);
- __ and_(result_reg, left_reg, Operand(divisor - 1));
- __ bind(&done);
-
} else if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
@@ -1383,7 +1370,8 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
void LCodeGen::DoDivI(LDivI* instr) {
if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
+ const Register dividend = ToRegister(instr->left());
+ const Register result = ToRegister(instr->result());
int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
int32_t test_value = 0;
int32_t power = 0;
@@ -1394,7 +1382,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
} else {
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ tst(dividend, Operand(dividend));
+ __ cmp(dividend, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for (kMinInt / -1).
@@ -1409,20 +1397,26 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (test_value != 0) {
if (instr->hydrogen()->CheckFlag(
HInstruction::kAllUsesTruncatingToInt32)) {
- __ cmp(dividend, Operand(0));
- __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
- __ mov(dividend, Operand(dividend, ASR, power));
- if (divisor > 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
- if (divisor < 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, gt);
+ __ sub(result, dividend, Operand::Zero(), SetCC);
+ __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
+ __ mov(result, Operand(result, ASR, power));
+ if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
+ if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
return; // Don't fall through to "__ rsb" below.
} else {
// Deoptimize if remainder is not 0.
__ tst(dividend, Operand(test_value));
DeoptimizeIf(ne, instr->environment());
- __ mov(dividend, Operand(dividend, ASR, power));
+ __ mov(result, Operand(dividend, ASR, power));
+ if (divisor < 0) __ rsb(result, result, Operand(0));
+ }
+ } else {
+ if (divisor < 0) {
+ __ rsb(result, dividend, Operand(0));
+ } else {
+ __ Move(result, dividend);
}
}
- if (divisor < 0) __ rsb(dividend, dividend, Operand(0));
return;
}
@@ -1439,12 +1433,15 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
+ Label positive;
+ if (!instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ // Do the test only if it hadn't be done above.
+ __ cmp(right, Operand::Zero());
+ }
+ __ b(pl, &positive);
__ cmp(left, Operand::Zero());
- __ b(ne, &left_not_zero);
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
- __ bind(&left_not_zero);
+ DeoptimizeIf(eq, instr->environment());
+ __ bind(&positive);
}
// Check for (kMinInt / -1).
@@ -1886,7 +1883,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
- __ LoadObject(ToRegister(instr->result()), value);
+ __ Move(ToRegister(instr->result()), value);
}
@@ -1973,41 +1970,85 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
+ Register scratch = scratch0();
+ ASSERT(!scratch.is(string));
+ ASSERT(!scratch.is(ToRegister(index)));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ add(scratch, string, Operand(ToRegister(index)));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
+ }
+ return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register value = ToRegister(instr->value());
- String::Encoding encoding = instr->encoding();
+ Register result = ToRegister(instr->result());
if (FLAG_debug_code) {
- __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+ Register scratch = scratch0();
+ __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ __ and_(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
+ __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
__ Check(eq, kUnexpectedStringType);
}
- __ add(ip,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ ldrb(result, operand);
+ } else {
+ __ ldrh(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
+
+ if (FLAG_debug_code) {
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (encoding == String::ONE_BYTE_ENCODING) {
- __ strb(value, MemOperand(ip, index));
+ __ strb(value, operand);
} else {
- // MemOperand with ip as the base register is not allowed for strh, so
- // we do the address calculation explicitly.
- __ add(ip, ip, Operand(index, LSL, 1));
- __ strh(value, MemOperand(ip));
+ __ strh(value, operand);
}
}
void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->value(), ip);
- __ push(input_reg);
+ __ push(ToRegister(instr->value()));
+ ASSERT(ToRegister(instr->context()).is(cp));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
@@ -2122,9 +2163,6 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ vdiv(result, left, right);
break;
case Token::MOD: {
- // Save r0-r3 on the stack.
- __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
-
__ PrepareCallCFunction(0, 2, scratch0());
__ SetCallCDoubleArguments(left, right);
__ CallCFunction(
@@ -2132,9 +2170,6 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
0, 2);
// Move the result in the double result register.
__ GetCFunctionDoubleResult(result);
-
- // Restore r0-r3.
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
break;
}
default:
@@ -2145,11 +2180,12 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->left()).is(r1));
ASSERT(ToRegister(instr->right()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
@@ -2158,13 +2194,6 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
}
-int LCodeGen::GetNextEmittedBlock() const {
- for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
- if (!chunk_->GetLabel(i)->HasReplacement()) return i;
- }
- return -1;
-}
-
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
int left_block = instr->TrueDestination(chunk_);
@@ -2197,25 +2226,6 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, al);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, al);
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ ldr(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
- __ CompareRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- EmitBranch(instr, eq);
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
@@ -2371,6 +2381,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = eq;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
case Token::LT:
cond = is_unsigned ? lo : lt;
break;
@@ -2467,6 +2481,33 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ __ VFPCompareAndSetFlags(value, 0.0);
+ EmitFalseBranch(instr, ne);
+ __ VmovHigh(scratch, value);
+ __ cmp(scratch, Operand(0x80000000));
+ } else {
+ Register value = ToRegister(instr->value());
+ __ CheckMap(value,
+ scratch,
+ Heap::kHeapNumberMapRootIndex,
+ instr->FalseLabel(chunk()),
+ DO_SMI_CHECK);
+ __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+ __ cmp(scratch, Operand(0x80000000));
+ __ cmp(ip, Operand(0x00000000), eq);
+ }
+ EmitBranch(instr, eq);
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -2575,6 +2616,7 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -2735,6 +2777,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
@@ -2844,13 +2887,14 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub stub(flags);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
// Get the temp register reserved by the instruction. This needs to be r4 as
// its slot of the pushing of safepoint registers is used to communicate the
// offset to the location of the map check.
Register temp = ToRegister(instr->temp());
ASSERT(temp.is(r4));
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
+ __ Move(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 5;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
@@ -2879,15 +2923,8 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -2908,21 +2945,15 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
+ // Runtime::TraceExit returns its parameter in r0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
__ push(r0);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
- ASSERT(NeedsEagerFrame());
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ RestoreCallerDoubles();
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
@@ -2953,7 +2984,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
+ __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -2964,6 +2995,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->global_object()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -2980,7 +3012,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register cell = scratch0();
// Load the cell.
- __ mov(cell, Operand(instr->hydrogen()->cell()));
+ __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -3001,6 +3033,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->global_object()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
@@ -3073,7 +3106,8 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (access.IsExternalMemory()) {
Register result = ToRegister(instr->result());
- __ ldr(result, MemOperand(object, offset));
+ MemOperand operand = MemOperand(object, offset);
+ __ Load(result, operand, access.representation());
return;
}
@@ -3084,16 +3118,17 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (access.IsInobject()) {
- __ ldr(result, FieldMemOperand(object, offset));
- } else {
+ if (!access.IsInobject()) {
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, offset));
+ object = result;
}
+ MemOperand operand = FieldMemOperand(object, offset);
+ __ Load(result, operand, access.representation());
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -3148,6 +3183,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
@@ -3160,20 +3201,35 @@ void LCodeGen::DoLoadExternalArrayPointer(
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
- if (instr->length()->IsConstantOperand() &&
- instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ if (instr->length()->IsConstantOperand()) {
int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- int index = (const_length - const_index) + 1;
- __ ldr(result, MemOperand(arguments, index * kPointerSize));
- } else {
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int index = (const_length - const_index) + 1;
+ __ ldr(result, MemOperand(arguments, index * kPointerSize));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ rsb(result, index, Operand(const_length + 1));
+ __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
+ }
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister(instr->length());
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = const_index - 1;
+ if (loc != 0) {
+ __ sub(result, length, Operand(loc));
+ __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
+ } else {
+ __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
+ }
+ } else {
Register length = ToRegister(instr->length());
Register index = ToRegister(instr->index());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, length, index);
- __ add(length, length, Operand(1));
- __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
+ __ sub(result, length, index);
+ __ add(result, result, Operand(1));
+ __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
}
}
@@ -3265,27 +3321,30 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
Register scratch = scratch0();
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int constant_key = 0;
+
+ int base_offset =
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ (instr->additional_index() << element_size_shift);
if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- } else {
- key = ToRegister(instr->key());
+ base_offset += constant_key << element_size_shift;
}
+ __ add(scratch, elements, Operand(base_offset));
- int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- ((constant_key + instr->additional_index()) << element_size_shift);
if (!key_is_constant) {
- __ add(elements, elements, Operand(key, LSL, shift_size));
+ key = ToRegister(instr->key());
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ add(scratch, scratch, Operand(key, LSL, shift_size));
}
- __ add(elements, elements, Operand(base_offset));
- __ vldr(result, elements, 0);
+
+ __ vldr(result, scratch, 0);
+
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
@@ -3305,7 +3364,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
instr->additional_index());
store_base = elements;
} else {
- Register key = EmitLoadRegister(instr->key(), scratch0());
+ Register key = ToRegister(instr->key());
// Even though the HLoadKeyed instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
@@ -3381,6 +3440,7 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->key()).is(r0));
@@ -3435,12 +3495,13 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
Register scratch = scratch0();
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
+ Label global_object, result_in_receiver;
// Do not transform the receiver to object for strict mode
// functions.
@@ -3450,11 +3511,11 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(scratch,
Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
- __ b(ne, &receiver_ok);
+ __ b(ne, &result_in_receiver);
// Do not transform the receiver to object for builtins.
__ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &receiver_ok);
+ __ b(ne, &result_in_receiver);
// Normal function. Replace undefined or null with global receiver.
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
@@ -3469,13 +3530,23 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
DeoptimizeIf(eq, instr->environment());
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
DeoptimizeIf(lt, instr->environment());
- __ jmp(&receiver_ok);
+ __ b(&result_in_receiver);
__ bind(&global_object);
- __ ldr(receiver, GlobalObjectOperand());
- __ ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
- __ bind(&receiver_ok);
+
+ __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ ldr(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(result,
+ FieldMemOperand(result, JSGlobalObject::kGlobalReceiverOffset));
+ if (result.is(receiver)) {
+ __ bind(&result_in_receiver);
+ } else {
+ Label result_ok;
+ __ b(&result_ok);
+ __ bind(&result_in_receiver);
+ __ mov(result, receiver);
+ __ bind(&result_ok);
+ }
}
@@ -3517,7 +3588,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is r0, as expected
@@ -3525,7 +3595,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
ParameterCount actual(receiver);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3554,11 +3623,11 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
// If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
- for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- __ mov(result, cp);
- return;
- }
+ if (info()->IsOptimizing()) {
+ __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ ASSERT(result.is(cp));
}
}
@@ -3572,8 +3641,9 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
__ push(cp); // The context is the first argument.
- __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
+ __ Move(scratch0(), instr->hydrogen()->pairs());
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
@@ -3582,8 +3652,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
}
@@ -3606,11 +3677,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
dont_adapt_arguments || formal_parameter_count == arity;
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
if (can_invoke_directly) {
if (r1_state == R1_UNINITIALIZED) {
- __ LoadHeapObject(r1, function);
+ __ Move(r1, function);
}
// Change context.
@@ -3636,9 +3706,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ InvokeFunction(
function, expected, count, CALL_FUNCTION, generator, call_kind);
}
-
- // Restore context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3654,6 +3721,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ ASSERT(instr->context() != NULL);
+ ASSERT(ToRegister(instr->context()).is(cp));
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3697,7 +3766,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
// Restore input_reg after call to runtime.
@@ -3837,7 +3907,7 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
- DwVfpRegister temp = ToDoubleRegister(instr->temp());
+ DwVfpRegister temp = double_scratch0();
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
@@ -3860,11 +3930,11 @@ void LCodeGen::DoPower(LPower* instr) {
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
ASSERT(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(d2));
+ ToDoubleRegister(instr->right()).is(d1));
ASSERT(!instr->right()->IsRegister() ||
ToRegister(instr->right()).is(r2));
- ASSERT(ToDoubleRegister(instr->left()).is(d1));
- ASSERT(ToDoubleRegister(instr->result()).is(d3));
+ ASSERT(ToDoubleRegister(instr->left()).is(d0));
+ ASSERT(ToDoubleRegister(instr->result()).is(d2));
if (exponent_type.IsSmi()) {
MathPowStub stub(MathPowStub::TAGGED);
@@ -3872,9 +3942,9 @@ void LCodeGen::DoPower(LPower* instr) {
} else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(r2, &no_deopt);
- __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, Operand(ip));
+ __ cmp(r6, Operand(ip));
DeoptimizeIf(ne, instr->environment());
__ bind(&no_deopt);
MathPowStub stub(MathPowStub::TAGGED);
@@ -3890,68 +3960,6 @@ void LCodeGen::DoPower(LPower* instr) {
}
-void LCodeGen::DoRandom(LRandom* instr) {
- // Assert that the register size is indeed the size of each seed.
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- // Load native context
- Register global_object = ToRegister(instr->global_object());
- Register native_context = global_object;
- __ ldr(native_context, FieldMemOperand(
- global_object, GlobalObject::kNativeContextOffset));
-
- // Load state (FixedArray of the native context's random seeds)
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- Register state = native_context;
- __ ldr(state, FieldMemOperand(native_context, kRandomSeedOffset));
-
- // Load state[0].
- Register state0 = ToRegister(instr->scratch());
- __ ldr(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
- // Load state[1].
- Register state1 = ToRegister(instr->scratch2());
- __ ldr(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- Register scratch3 = ToRegister(instr->scratch3());
- Register scratch4 = scratch0();
- __ and_(scratch3, state0, Operand(0xFFFF));
- __ mov(scratch4, Operand(18273));
- __ mul(scratch3, scratch3, scratch4);
- __ add(state0, scratch3, Operand(state0, LSR, 16));
- // Save state[0].
- __ str(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ and_(scratch3, state1, Operand(0xFFFF));
- __ mov(scratch4, Operand(36969));
- __ mul(scratch3, scratch3, scratch4);
- __ add(state1, scratch3, Operand(state1, LSR, 16));
- // Save state[1].
- __ str(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- Register random = scratch4;
- __ and_(random, state1, Operand(0x3FFFF));
- __ add(random, random, Operand(state0, LSL, 14));
-
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(scratch3, Operand(0x41000000));
- __ orr(scratch3, scratch3, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- DwVfpRegister result = ToDoubleRegister(instr->result());
- __ vmov(result, random, scratch3);
- // Move 0x4130000000000000 to VFP.
- __ mov(scratch4, Operand::Zero());
- DwVfpRegister scratch5 = double_scratch0();
- __ vmov(scratch5, scratch4, scratch3);
- __ vsub(result, result, scratch5);
-}
-
-
void LCodeGen::DoMathExp(LMathExp* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
@@ -3968,6 +3976,9 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3976,6 +3987,9 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3984,6 +3998,9 @@ void LCodeGen::DoMathTan(LMathTan* instr) {
void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3992,6 +4009,9 @@ void LCodeGen::DoMathCos(LMathCos* instr) {
void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, Operand::Zero());
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3999,17 +4019,16 @@ void LCodeGen::DoMathSin(LMathSin* instr) {
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(instr->HasPointerMap());
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -4022,17 +4041,18 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallNamed(LCallNamed* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
@@ -4041,23 +4061,27 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ mov(sp, fp);
+ __ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ } else {
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ }
}
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
@@ -4066,7 +4090,6 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -4082,6 +4105,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->constructor()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -4095,6 +4119,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->constructor()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -4155,7 +4180,13 @@ void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
- __ add(result, base, Operand(instr->offset()));
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ add(result, base, Operand(ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ add(result, base, offset);
+ }
}
@@ -4169,7 +4200,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (access.IsExternalMemory()) {
Register value = ToRegister(instr->value());
- __ str(value, MemOperand(object, offset));
+ MemOperand operand = MemOperand(object, offset);
+ __ Store(value, operand, representation);
return;
}
@@ -4214,7 +4246,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
instr->hydrogen()->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
- __ str(value, FieldMemOperand(object, offset));
+ MemOperand operand = FieldMemOperand(object, offset);
+ __ Store(value, operand, representation);
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
@@ -4228,7 +4261,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ str(value, FieldMemOperand(scratch, offset));
+ MemOperand operand = FieldMemOperand(scratch, offset);
+ __ Store(value, operand, representation);
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
@@ -4246,6 +4280,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
@@ -4311,16 +4346,23 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ Register address = scratch0();
DwVfpRegister value(ToDoubleRegister(instr->value()));
- Operand operand(key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size));
- __ add(scratch0(), external_pointer, operand);
+ if (key_is_constant) {
+ if (constant_key != 0) {
+ __ add(address, external_pointer,
+ Operand(constant_key << element_size_shift));
+ } else {
+ address = external_pointer;
+ }
+ } else {
+ __ add(address, external_pointer, Operand(key, LSL, shift_size));
+ }
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
- __ vstr(double_scratch0().low(), scratch0(), additional_offset);
+ __ vstr(double_scratch0().low(), address, additional_offset);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vstr(value, scratch0(), additional_offset);
+ __ vstr(value, address, additional_offset);
}
} else {
Register value(ToRegister(instr->value()));
@@ -4362,32 +4404,28 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
- Register key = no_reg;
Register scratch = scratch0();
+ DwVfpRegister double_scratch = double_scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
// Calculate the effective address of the slot in the array to store the
// double value.
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
+ __ add(scratch, elements,
+ Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- Operand operand = key_is_constant
- ? Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(scratch, elements, operand);
- if (!key_is_constant) {
- __ add(scratch, scratch,
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ add(scratch, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, scratch,
+ Operand(ToRegister(instr->key()), LSL, shift_size));
}
if (instr->NeedsCanonicalization()) {
@@ -4397,9 +4435,12 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
__ Assert(ne, kDefaultNaNModeNotSet);
}
- __ VFPCanonicalizeNaN(value);
+ __ VFPCanonicalizeNaN(double_scratch, value);
+ __ vstr(double_scratch, scratch,
+ instr->additional_index() << element_size_shift);
+ } else {
+ __ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
- __ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
@@ -4463,6 +4504,7 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(r2));
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
@@ -4496,6 +4538,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, GetLinkRegisterState(), kDontSaveFPRegs);
} else {
+ ASSERT(ToRegister(instr->context()).is(cp));
PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles);
__ Move(r0, object_reg);
@@ -4512,16 +4555,27 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationMemento(object, temp);
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(eq, instr->environment());
+ __ bind(&no_memento_found);
}
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- StringAddStub stub(instr->hydrogen()->flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ ASSERT(ToRegister(instr->context()).is(cp));
+ if (FLAG_new_string_add) {
+ ASSERT(ToRegister(instr->left()).is(r1));
+ ASSERT(ToRegister(instr->right()).is(r0));
+ NewStringAddStub stub(instr->hydrogen()->flags(),
+ isolate()->heap()->GetPretenureMode());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ push(ToRegister(instr->left()));
+ __ push(ToRegister(instr->right()));
+ StringAddStub stub(instr->hydrogen()->flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4573,7 +4627,8 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ instr->context());
__ AssertSmi(r0);
__ SmiUntag(r0);
__ StoreToSafepointRegisterSlot(r0, result);
@@ -4625,7 +4680,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -4650,10 +4705,13 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
- __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
+ ASSERT(output->IsRegister());
if (!instr->hydrogen()->value()->HasRange() ||
!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ SmiTag(ToRegister(output), ToRegister(input));
}
}
@@ -4720,14 +4778,13 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
LNumberTagU* instr_;
};
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ cmp(reg, Operand(Smi::kMaxValue));
+ __ cmp(input, Operand(Smi::kMaxValue));
__ b(hi, deferred->entry());
- __ SmiTag(reg, reg);
+ __ SmiTag(result, input);
__ bind(deferred->exit());
}
@@ -4774,7 +4831,15 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// integer value.
__ mov(ip, Operand::Zero());
__ StoreToSafepointRegisterSlot(ip, dst);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ Move(dst, r0);
__ sub(dst, dst, Operand(kHeapObjectTag));
@@ -4830,7 +4895,15 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ mov(reg, Operand::Zero());
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ sub(r0, r0, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(r0, reg);
}
@@ -4865,36 +4938,20 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
ASSERT(!result_reg.is(double_scratch0()));
-
- Label load_smi, heap_number, done;
-
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
// Heap number map check.
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(ne, env);
+ if (can_convert_undefined_to_nan) {
+ __ b(ne, &convert);
} else {
- Label heap_number, convert;
- __ b(eq, &heap_number);
-
- // Convert undefined (and hole) to NaN.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
DeoptimizeIf(ne, env);
-
- __ bind(&convert);
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
- __ jmp(&done);
-
- __ bind(&heap_number);
}
- // Heap number to double register conversion.
+ // load heap number
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
if (deoptimize_on_minus_zero) {
__ VmovLow(scratch, result_reg);
@@ -4905,11 +4962,20 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(eq, env);
}
__ jmp(&done);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(input_reg, Operand(ip));
+ DeoptimizeIf(ne, env);
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
+ __ jmp(&done);
+ }
} else {
__ SmiUntag(scratch, input_reg);
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
-
// Smi to double register conversion
__ bind(&load_smi);
// scratch: untagged value of input_reg
@@ -4945,18 +5011,33 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
if (instr->truncating()) {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
- Label heap_number;
- __ b(eq, &heap_number);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
+ Label no_heap_number, check_bools, check_false;
+ __ b(ne, &no_heap_number);
+ __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ b(&done);
+
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ bind(&no_heap_number);
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(scratch2, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
+ __ b(ne, &check_bools);
__ mov(input_reg, Operand::Zero());
__ b(&done);
- __ bind(&heap_number);
- __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ bind(&check_bools);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(scratch2, Operand(ip));
+ __ b(ne, &check_false);
+ __ mov(input_reg, Operand(1));
+ __ b(&done);
+
+ __ bind(&check_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(scratch2, Operand(ip));
+ DeoptimizeIf(ne, instr->environment());
+ __ mov(input_reg, Operand::Zero());
+ __ b(&done);
} else {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment());
@@ -4997,15 +5078,19 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- // Optimistically untag the input.
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(input_reg, SetCC);
- // Branch to deferred code if the input was tagged.
- // The deferred code will take care of restoring the tag.
- __ b(cs, deferred->entry());
- __ bind(deferred->exit());
+ // Optimistically untag the input.
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
+ __ SmiUntag(input_reg, SetCC);
+ // Branch to deferred code if the input was tagged.
+ // The deferred code will take care of restoring the tag.
+ __ b(cs, deferred->entry());
+ __ bind(deferred->exit());
+ }
}
@@ -5143,7 +5228,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object();
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
@@ -5162,7 +5247,10 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(object);
- CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
+ __ mov(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(r0, scratch0());
}
__ tst(scratch0(), Operand(kSmiTagMask));
@@ -5195,7 +5283,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
__ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
@@ -5204,14 +5291,15 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
__ CompareMap(map_reg, map, &success);
__ b(eq, &success);
}
- Handle<Map> map = map_set->last();
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
__ CompareMap(map_reg, map, &success);
if (instr->hydrogen()->has_migration_target()) {
__ b(ne, deferred->entry());
@@ -5309,7 +5397,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
+ }
} else {
Register size = ToRegister(instr->size());
__ Allocate(size,
@@ -5362,16 +5454,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(size));
}
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
} else {
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
+ __ Push(Smi::FromInt(flags));
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -5384,26 +5482,27 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Label materialized;
// Registers will be used as follows:
- // r7 = literals array.
+ // r6 = literals array.
// r1 = regexp literal.
// r0 = regexp literal clone.
- // r2 and r4-r6 are used as temporaries.
+ // r2-5 are used as temporaries.
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(r7, instr->hydrogen()->literals());
- __ ldr(r1, FieldMemOperand(r7, literal_offset));
+ __ Move(r6, instr->hydrogen()->literals());
+ __ ldr(r1, FieldMemOperand(r6, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r1, ip);
__ b(ne, &materialized);
// Create regexp literal using runtime function
// Result will be in r0.
- __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r5, Operand(instr->hydrogen()->pattern()));
- __ mov(r4, Operand(instr->hydrogen()->flags()));
- __ Push(r7, r6, r5, r4);
+ __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ mov(r4, Operand(instr->hydrogen()->pattern()));
+ __ mov(r3, Operand(instr->hydrogen()->flags()));
+ __ Push(r6, r5, r4, r3);
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
__ mov(r1, r0);
@@ -5427,6 +5526,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
@@ -5473,22 +5573,21 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Register scratch = scratch0();
if (type_name->Equals(heap()->number_string())) {
__ JumpIfSmi(input, true_label);
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(input, Operand(ip));
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
final_branch_condition = eq;
} else if (type_name->Equals(heap()->string_string())) {
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
+ __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
__ b(ge, false_label);
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
} else if (type_name->Equals(heap()->symbol_string())) {
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch, SYMBOL_TYPE);
+ __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
final_branch_condition = eq;
} else if (type_name->Equals(heap()->boolean_string())) {
@@ -5506,33 +5605,35 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ b(eq, true_label);
__ JumpIfSmi(input, false_label);
// Check for undetectable objects => true.
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = ne;
} else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ Register type_reg = scratch;
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
+ __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
__ b(eq, true_label);
- __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
final_branch_condition = eq;
} else if (type_name->Equals(heap()->object_string())) {
+ Register map = scratch;
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ b(eq, true_label);
}
- __ CompareObjectType(input, input, scratch,
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(lt, false_label);
- __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(gt, false_label);
+ __ CheckObjectTypeRange(input,
+ map,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ false_label);
// Check for undetectable objects => false.
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
} else {
@@ -5570,16 +5671,15 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block literal pool emission for duration of padding.
Assembler::BlockConstPoolScope block_const_pool(masm());
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) {
__ nop();
@@ -5590,7 +5690,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5614,6 +5714,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
}
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
void LCodeGen::DoDummyUse(LDummyUse* instr) {
// Nothing to see here, move on!
}
@@ -5621,6 +5726,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -5654,10 +5760,12 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(hs, &done);
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(cp));
CallCode(isolate()->builtins()->StackCheck(),
- RelocInfo::CODE_TARGET,
- instr);
- EnsureSpaceForLazyDeopt();
+ RelocInfo::CODE_TARGET,
+ instr);
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5670,7 +5778,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
diff --git a/chromium/v8/src/arm/lithium-codegen-arm.h b/chromium/v8/src/arm/lithium-codegen-arm.h
index 4b6b5ca8e36..3f2ba35899a 100644
--- a/chromium/v8/src/arm/lithium-codegen-arm.h
+++ b/chromium/v8/src/arm/lithium-codegen-arm.h
@@ -32,6 +32,7 @@
#include "arm/lithium-gap-resolver-arm.h"
#include "deoptimizer.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
@@ -43,43 +44,26 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen V8_FINAL BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- old_position_(RelocInfo::kNoPosition) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -178,30 +162,15 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk()->graph(); }
Register scratch0() { return r9; }
LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; }
- int GetNextEmittedBlock() const;
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
@@ -214,14 +183,15 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
- void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
@@ -249,7 +219,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
@@ -258,9 +229,11 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
CallRuntime(function, num_arguments, instr);
}
+ void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ LOperand* context);
enum R1State {
R1_UNINITIALIZED,
@@ -276,8 +249,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
CallKind call_kind,
R1State r1_state);
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -305,6 +276,10 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Register ToRegister(int index) const;
DwVfpRegister ToDoubleRegister(int index) const;
+ MemOperand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
@@ -320,11 +295,13 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
- void RecordAndUpdatePosition(int position);
+
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition condition);
template<class InstrType>
@@ -383,7 +360,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Register scratch,
LEnvironment* environment);
- void EnsureSpaceForLazyDeopt();
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -391,24 +368,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@@ -420,8 +387,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- int old_position_;
-
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
diff --git a/chromium/v8/src/arm/lithium-gap-resolver-arm.cc b/chromium/v8/src/arm/lithium-gap-resolver-arm.cc
index 88ac7a2a21d..0c6b2adadfd 100644
--- a/chromium/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/chromium/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -252,7 +252,7 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsInteger32(constant_source)) {
__ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ __ Move(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DwVfpRegister result = cgen_->ToDoubleRegister(destination);
@@ -267,7 +267,7 @@ void LGapResolver::EmitMove(int index) {
__ mov(kSavedValueRegister,
Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ LoadObject(kSavedValueRegister,
+ __ Move(kSavedValueRegister,
cgen_->ToHandle(constant_source));
}
__ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
diff --git a/chromium/v8/src/arm/macro-assembler-arm.cc b/chromium/v8/src/arm/macro-assembler-arm.cc
index 7df785776dd..5f6076b41df 100644
--- a/chromium/v8/src/arm/macro-assembler-arm.cc
+++ b/chromium/v8/src/arm/macro-assembler-arm.cc
@@ -35,6 +35,7 @@
#include "codegen.h"
#include "cpu-profiler.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime.h"
namespace v8 {
@@ -43,7 +44,6 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -233,7 +233,19 @@ void MacroAssembler::Push(Handle<Object> handle) {
void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, Operand(value));
+ AllowDeferredHandleDereference smi_check;
+ if (value->IsSmi()) {
+ mov(dst, Operand(value));
+ } else {
+ ASSERT(value->IsHeapObject());
+ if (isolate()->heap()->InNewSpace(*value)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(value);
+ mov(dst, Operand(cell));
+ ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
+ } else {
+ mov(dst, Operand(value));
+ }
+ }
}
@@ -371,6 +383,38 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
}
+void MacroAssembler::Load(Register dst,
+ const MemOperand& src,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ ldrsb(dst, src);
+ } else if (r.IsUInteger8()) {
+ ldrb(dst, src);
+ } else if (r.IsInteger16()) {
+ ldrsh(dst, src);
+ } else if (r.IsUInteger16()) {
+ ldrh(dst, src);
+ } else {
+ ldr(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src,
+ const MemOperand& dst,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ strb(src, dst);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ strh(src, dst);
+ } else {
+ str(src, dst);
+ }
+}
+
+
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
@@ -394,19 +438,6 @@ void MacroAssembler::StoreRoot(Register source,
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- mov(result, Operand(cell));
- ldr(result, FieldMemOperand(result, Cell::kValueOffset));
- } else {
- mov(result, Operand(object));
- }
-}
-
-
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
@@ -478,17 +509,18 @@ void MacroAssembler::RecordWrite(Register object,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
-
if (emit_debug_code()) {
ldr(ip, MemOperand(address));
cmp(ip, value);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
@@ -593,22 +625,26 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
// Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled());
PushSafepointRegisters();
- sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
- kDoubleSize));
- for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
- vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
+ // Only save allocatable registers.
+ ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
+ ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ vstm(db_w, sp, d16, d31);
}
+ vstm(db_w, sp, d0, d13);
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
// Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled());
- for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
- vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
+ // Only save allocatable registers.
+ ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
+ ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
+ vldm(ia_w, sp, d0, d13);
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ vldm(ia_w, sp, d16, d31);
}
- add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
- kDoubleSize));
PopSafepointRegisters();
}
@@ -733,9 +769,11 @@ void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
bind(&fpscr_done);
}
-void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister value,
+
+void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
+ const DwVfpRegister src,
const Condition cond) {
- vsub(value, value, kDoubleRegZero, cond);
+ vsub(dst, src, kDoubleRegZero, cond);
}
@@ -829,93 +867,30 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
-void MacroAssembler::LoadNumber(Register object,
- LowDwVfpRegister dst,
- Register heap_number_map,
- Register scratch,
- Label* not_number) {
- Label is_smi, done;
-
- UntagAndJumpIfSmi(scratch, object, &is_smi);
- JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
-
- vldr(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
- b(&done);
-
- // Handle loading a double from a smi.
- bind(&is_smi);
- vmov(dst.high(), scratch);
- vcvt_f64_s32(dst, dst.high());
-
- bind(&done);
-}
-
-
-void MacroAssembler::LoadNumberAsInt32Double(Register object,
- DwVfpRegister double_dst,
- Register heap_number_map,
- Register scratch,
- LowDwVfpRegister double_scratch,
- Label* not_int32) {
- ASSERT(!scratch.is(object));
- ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch));
-
- Label done, obj_is_not_smi;
-
- UntagAndJumpIfNotSmi(scratch, object, &obj_is_not_smi);
- vmov(double_scratch.low(), scratch);
- vcvt_f64_s32(double_dst, double_scratch.low());
- b(&done);
-
- bind(&obj_is_not_smi);
- JumpIfNotHeapNumber(object, heap_number_map, scratch, not_int32);
-
- // Load the number.
- // Load the double value.
- vldr(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- TestDoubleIsInt32(double_dst, double_scratch);
- // Jump to not_int32 if the operation did not succeed.
- b(ne, not_int32);
-
- bind(&done);
-}
-
-
-void MacroAssembler::LoadNumberAsInt32(Register object,
- Register dst,
- Register heap_number_map,
- Register scratch,
- DwVfpRegister double_scratch0,
- LowDwVfpRegister double_scratch1,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch.is(object));
-
- Label done, maybe_undefined;
-
- UntagAndJumpIfSmi(dst, object, &done);
-
- JumpIfNotHeapNumber(object, heap_number_map, scratch, &maybe_undefined);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- // Load the double value.
- vldr(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1);
- // Jump to not_int32 if the operation did not succeed.
- b(ne, not_int32);
- b(&done);
-
- bind(&maybe_undefined);
- CompareRoot(object, Heap::kUndefinedValueRootIndex);
- b(ne, not_int32);
- // |undefined| is truncated to 0.
- mov(dst, Operand(Smi::FromInt(0)));
- // Fall through.
-
- bind(&done);
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ add(r0, pc, Operand(-8));
+ ldr(pc, MemOperand(pc, -4));
+ emit_code_stub_address(stub);
+ } else {
+ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ nop(ip.code());
+ // Adjust FP to point to saved FP.
+ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ }
+ }
}
@@ -926,7 +901,9 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ip);
mov(ip, Operand(CodeObject()));
push(ip);
- add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
+ // Adjust FP to point to saved FP.
+ add(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -1020,7 +997,8 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count) {
+ Register argument_count,
+ bool restore_context) {
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
@@ -1035,10 +1013,14 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
str(r3, MemOperand(ip));
+
// Restore current context from top and clear it in debug mode.
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- ldr(cp, MemOperand(ip));
+ if (restore_context) {
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ ldr(cp, MemOperand(ip));
+ }
#ifdef DEBUG
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
str(r3, MemOperand(ip));
#endif
@@ -1246,7 +1228,7 @@ void MacroAssembler::InvokeFunction(Register fun,
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
@@ -1255,8 +1237,10 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
+ // Contract with called JS functions requires that function is passed in r1.
+ ASSERT(function.is(r1));
+
// Get the function and setup the context.
- LoadHeapObject(r1, function);
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
@@ -1267,6 +1251,17 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
}
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ Move(r1, function);
+ InvokeFunction(r1, expected, actual, flag, call_wrapper, call_kind);
+}
+
+
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
Register map,
Register scratch,
@@ -1330,7 +1325,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
- // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
+ // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
// We will build up the handler from the bottom by pushing on the stack.
// Set up the code object (r5) and the state (r6) for pushing.
unsigned state =
@@ -1341,9 +1336,9 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
- mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
+ mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
mov(ip, Operand::Zero()); // NULL frame pointer.
- stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
+ stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
} else {
stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
}
@@ -1529,6 +1524,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
// First of all we assign the hash seed to scratch.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -1595,8 +1593,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
sub(t1, t1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use t2 for index calculations and keep the hash intact in t0.
mov(t2, t0);
// Compute the masked index: (hash + i + i * i) & mask.
@@ -1613,7 +1610,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
cmp(key, Operand(ip));
- if (i != kProbes - 1) {
+ if (i != kNumberDictionaryProbes - 1) {
b(eq, &done);
} else {
b(ne, miss);
@@ -2037,14 +2034,36 @@ void MacroAssembler::CompareObjectType(Register object,
Register map,
Register type_reg,
InstanceType type) {
+ const Register temp = type_reg.is(no_reg) ? ip : type_reg;
+
+ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(map, temp, type);
+}
+
+
+void MacroAssembler::CheckObjectTypeRange(Register object,
+ Register map,
+ InstanceType min_type,
+ InstanceType max_type,
+ Label* false_label) {
+ STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
+ STATIC_ASSERT(LAST_TYPE < 256);
ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(map, type_reg, type);
+ ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ sub(ip, ip, Operand(min_type));
+ cmp(ip, Operand(max_type - min_type));
+ b(hi, false_label);
}
void MacroAssembler::CompareInstanceType(Register map,
Register type_reg,
InstanceType type) {
+ // Registers map and type_reg can be ip. These two lines assert
+ // that ip can be used with the two instructions (the constants
+ // will never need ip).
+ STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
+ STATIC_ASSERT(LAST_TYPE < 256);
ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmp(type_reg, Operand(type));
}
@@ -2269,8 +2288,6 @@ void MacroAssembler::CallStub(CodeStub* stub,
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls_ ||
- stub->CompilingCallsToThisStubIsGCSafe(isolate()));
Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
}
@@ -2280,12 +2297,14 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
}
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- Address function_address,
- ExternalReference thunk_ref,
- Register thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
@@ -2296,13 +2315,15 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
ExternalReference::handle_scope_level_address(isolate()),
next_address);
+ ASSERT(!thunk_last_arg.is(r3));
+
// Allocate HandleScope in callee-save registers.
- mov(r7, Operand(next_address));
- ldr(r4, MemOperand(r7, kNextOffset));
- ldr(r5, MemOperand(r7, kLimitOffset));
- ldr(r6, MemOperand(r7, kLevelOffset));
+ mov(r9, Operand(next_address));
+ ldr(r4, MemOperand(r9, kNextOffset));
+ ldr(r5, MemOperand(r9, kLimitOffset));
+ ldr(r6, MemOperand(r9, kLevelOffset));
add(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
+ str(r6, MemOperand(r9, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
@@ -2313,7 +2334,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
PopSafepointRegisters();
}
- ASSERT(!thunk_last_arg.is(r3));
Label profiler_disabled;
Label end_profiler_check;
bool* is_profiling_flag =
@@ -2349,24 +2369,25 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
}
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
// load value from ReturnValue
- ldr(r0, MemOperand(fp, return_value_offset*kPointerSize));
+ ldr(r0, return_value_operand);
bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
- str(r4, MemOperand(r7, kNextOffset));
+ str(r4, MemOperand(r9, kNextOffset));
if (emit_debug_code()) {
- ldr(r1, MemOperand(r7, kLevelOffset));
+ ldr(r1, MemOperand(r9, kLevelOffset));
cmp(r1, r6);
Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
}
sub(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
- ldr(ip, MemOperand(r7, kLimitOffset));
+ str(r6, MemOperand(r9, kLevelOffset));
+ ldr(ip, MemOperand(r9, kLimitOffset));
cmp(r5, ip);
b(ne, &delete_allocated_handles);
@@ -2377,21 +2398,29 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
ldr(r5, MemOperand(ip));
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
+ bind(&exception_handled);
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ ldr(cp, *context_restore_operand);
+ }
// LeaveExitFrame expects unwind space to be in a register.
mov(r4, Operand(stack_space));
- LeaveExitFrame(false, r4);
+ LeaveExitFrame(false, r4, !restore_context);
mov(pc, lr);
bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
- str(r5, MemOperand(r7, kLimitOffset));
+ str(r5, MemOperand(r9, kLimitOffset));
mov(r4, r0);
PrepareCallCFunction(1, r5);
mov(r0, Operand(ExternalReference::isolate_address(isolate())));
@@ -2403,8 +2432,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -2603,7 +2631,8 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// All parameters are on the stack. r0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -2620,21 +2649,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
mov(r0, Operand(num_arguments));
mov(r1, Operand(ExternalReference(f, isolate())));
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- mov(r0, Operand(function->nargs));
- mov(r1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1, kSaveFPRegs);
+ CEntryStub stub(1, save_doubles);
CallStub(&stub);
}
@@ -3079,6 +3094,88 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
+ sub(mask, mask, Operand(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ add(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
+ eor(scratch1, scratch1, Operand(scratch2));
+ and_(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ add(scratch1,
+ number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ sub(scratch2, object, Operand(kHeapObjectTag));
+ vldr(d0, scratch2, HeapNumber::kValueOffset);
+ sub(probe, probe, Operand(kHeapObjectTag));
+ vldr(d1, probe, HeapNumber::kValueOffset);
+ VFPCompareAndSetFlags(d0, d1);
+ b(ne, not_found); // The cache did not contain this value.
+ b(&load_result_from_cache);
+
+ bind(&is_smi);
+ Register scratch = scratch1;
+ and_(scratch, mask, Operand(object, ASR, 1));
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ add(scratch,
+ number_string_cache,
+ Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ cmp(object, probe);
+ b(ne, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
@@ -3191,20 +3288,19 @@ void MacroAssembler::CopyBytes(Register src,
Register dst,
Register length,
Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+ Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
- bind(&align_loop);
- cmp(length, Operand::Zero());
- b(eq, &done);
+ cmp(length, Operand(kPointerSize));
+ b(le, &byte_loop);
+
bind(&align_loop_1);
tst(src, Operand(kPointerSize - 1));
b(eq, &word_loop);
ldrb(scratch, MemOperand(src, 1, PostIndex));
strb(scratch, MemOperand(dst, 1, PostIndex));
sub(length, length, Operand(1), SetCC);
- b(ne, &byte_loop_1);
-
+ b(&align_loop_1);
// Copy bytes in word size chunks.
bind(&word_loop);
if (emit_debug_code()) {
@@ -3334,6 +3430,42 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ SmiTst(string);
+ ThrowIf(eq, kNonObject);
+
+ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+
+ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ cmp(ip, Operand(encoding_mask));
+ ThrowIf(ne, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ Label index_tag_ok, index_tag_bad;
+ TrySmiTag(index, index, &index_tag_bad);
+ b(&index_tag_ok);
+ bind(&index_tag_bad);
+ Throw(kIndexIsTooLarge);
+ bind(&index_tag_ok);
+
+ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
+ cmp(index, ip);
+ ThrowIf(ge, kIndexIsTooLarge);
+
+ cmp(index, Operand(Smi::FromInt(0)));
+ ThrowIf(lt, kIndexIsNegative);
+
+ SmiUntag(index, index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
@@ -3361,9 +3493,8 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
- if (use_eabi_hardfloat()) {
- Move(d0, dreg);
- } else {
+ ASSERT(dreg.is(d0));
+ if (!use_eabi_hardfloat()) {
vmov(r0, r1, dreg);
}
}
@@ -3371,16 +3502,9 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
DwVfpRegister dreg2) {
- if (use_eabi_hardfloat()) {
- if (dreg2.is(d0)) {
- ASSERT(!dreg1.is(d1));
- Move(d1, dreg2);
- Move(d0, dreg1);
- } else {
- Move(d0, dreg1);
- Move(d1, dreg2);
- }
- } else {
+ ASSERT(dreg1.is(d0));
+ ASSERT(dreg2.is(d1));
+ if (!use_eabi_hardfloat()) {
vmov(r0, r1, dreg1);
vmov(r2, r3, dreg2);
}
@@ -3389,8 +3513,8 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
Register reg) {
+ ASSERT(dreg.is(d0));
if (use_eabi_hardfloat()) {
- Move(d0, dreg);
Move(r0, reg);
} else {
Move(r2, reg);
@@ -3717,6 +3841,52 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
}
+void MacroAssembler::Throw(BailoutReason reason) {
+ Label throw_start;
+ bind(&throw_start);
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ mov(r0, Operand(Smi::FromInt(reason)));
+ push(r0);
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // will not return here
+ if (is_const_pool_blocked()) {
+ // If the calling code cares throw the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the ThrowMessage macro constant.
+ static const int kExpectedThrowMessageInstructions = 10;
+ int throw_instructions = InstructionsGeneratedSince(&throw_start);
+ ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
+ while (throw_instructions++ < kExpectedThrowMessageInstructions) {
+ nop();
+ }
+ }
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label L;
+ b(NegateCondition(cc), &L);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
@@ -3747,7 +3917,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
EnumLength(r3, r1);
- cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
+ cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
b(eq, call_runtime);
jmp(&start);
@@ -3776,8 +3946,8 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
- Register scratch_reg) {
- Label no_memento_available;
+ Register scratch_reg,
+ Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
@@ -3785,15 +3955,14 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
add(scratch_reg, receiver_reg,
Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Operand(new_space_start));
- b(lt, &no_memento_available);
+ b(lt, no_memento_found);
mov(ip, Operand(new_space_allocation_top));
ldr(ip, MemOperand(ip));
cmp(scratch_reg, ip);
- b(gt, &no_memento_available);
+ b(gt, no_memento_found);
ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
cmp(scratch_reg,
- Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
- bind(&no_memento_available);
+ Operand(isolate()->factory()->allocation_memento_map()));
}
@@ -3821,6 +3990,32 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
}
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // scratch contained elements pointer.
+ mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
+ b(eq, found);
+ ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ cmp(current, Operand(factory->null_value()));
+ b(ne, &loop_again);
+}
+
+
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
diff --git a/chromium/v8/src/arm/macro-assembler-arm.h b/chromium/v8/src/arm/macro-assembler-arm.h
index 9abd5a0c3da..f71c1a3852c 100644
--- a/chromium/v8/src/arm/macro-assembler-arm.h
+++ b/chromium/v8/src/arm/macro-assembler-arm.h
@@ -45,8 +45,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
// Give alias names to registers
-const Register cp = { 8 }; // JavaScript context pointer
-const Register kRootRegister = { 10 }; // Roots array pointer.
+const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
+const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
+const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
// Flags used for AllocateHeapNumber
enum TaggingMode {
@@ -160,6 +161,9 @@ class MacroAssembler: public Assembler {
void Move(Register dst, Register src, Condition cond = al);
void Move(DwVfpRegister dst, DwVfpRegister src);
+ void Load(Register dst, const MemOperand& src, Representation r);
+ void Store(Register src, const MemOperand& dst, Representation r);
+
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index,
@@ -169,17 +173,6 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
- void LoadHeapObject(Register dst, Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, object);
- }
- }
-
// ---------------------------------------------------------------------------
// GC Support
@@ -469,8 +462,13 @@ class MacroAssembler: public Assembler {
void VFPEnsureFPSCRState(Register scratch);
// If the value is a NaN, canonicalize the value else, do nothing.
- void VFPCanonicalizeNaN(const DwVfpRegister value,
+ void VFPCanonicalizeNaN(const DwVfpRegister dst,
+ const DwVfpRegister src,
const Condition cond = al);
+ void VFPCanonicalizeNaN(const DwVfpRegister value,
+ const Condition cond = al) {
+ VFPCanonicalizeNaN(value, value, cond);
+ }
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
@@ -533,6 +531,8 @@ class MacroAssembler: public Assembler {
LowDwVfpRegister double_scratch1,
Label* not_int32);
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@@ -541,7 +541,9 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles, Register argument_count);
+ void LeaveExitFrame(bool save_doubles,
+ Register argument_count,
+ bool restore_context);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -611,6 +613,13 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
+
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
@@ -659,6 +668,12 @@ class MacroAssembler: public Assembler {
// handler chain.
void ThrowUncatchable(Register value);
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason);
+
// ---------------------------------------------------------------------------
// Inline caching support
@@ -837,11 +852,21 @@ class MacroAssembler: public Assembler {
// are the same register). It leaves the heap object in the heap_object
// register unless the heap_object register is the same register as one of the
// other registers.
+ // Type_reg can be no_reg. In that case ip is used.
void CompareObjectType(Register heap_object,
Register map,
Register type_reg,
InstanceType type);
+ // Compare object type for heap object. Branch to false_label if type
+ // is lower than min_type or greater than max_type.
+ // Load map into the register map.
+ void CheckObjectTypeRange(Register heap_object,
+ Register map,
+ InstanceType min_type,
+ InstanceType max_type,
+ Label* false_label);
+
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -1037,11 +1062,20 @@ class MacroAssembler: public Assembler {
void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@@ -1111,7 +1145,8 @@ class MacroAssembler: public Assembler {
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- int return_value_offset_from_fp);
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
@@ -1163,8 +1198,6 @@ class MacroAssembler: public Assembler {
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
@@ -1286,6 +1319,18 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
// Checks if both objects are sequential ASCII strings and jumps to label
// if either is not. Assumes that neither object is a smi.
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
@@ -1319,6 +1364,11 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask);
+
// ---------------------------------------------------------------------------
// Patching helpers.
@@ -1360,9 +1410,24 @@ class MacroAssembler: public Assembler {
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
- // If allocation info is present, condition flags are set to eq
+ // If allocation info is present, condition flags are set to eq.
void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg);
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ b(eq, memento_found);
+ bind(&no_memento_found);
+ }
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
private:
void CallCFunctionHelper(Register function,
@@ -1411,7 +1476,6 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
bool generating_stub_;
- bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
diff --git a/chromium/v8/src/arm/regexp-macro-assembler-arm.h b/chromium/v8/src/arm/regexp-macro-assembler-arm.h
index 9f07489e1fc..8d9d515c76b 100644
--- a/chromium/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/chromium/v8/src/arm/regexp-macro-assembler-arm.h
@@ -223,11 +223,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// are always 0..num_saved_registers_-1)
int num_saved_registers_;
- // Manage a small pre-allocated pool for writing label targets
- // to for pushing backtrack addresses.
- int backtrack_constant_pool_offset_;
- int backtrack_constant_pool_capacity_;
-
// Labels used internally.
Label entry_label_;
Label start_label_;
diff --git a/chromium/v8/src/arm/simulator-arm.cc b/chromium/v8/src/arm/simulator-arm.cc
index def18186305..461d032b99f 100644
--- a/chromium/v8/src/arm/simulator-arm.cc
+++ b/chromium/v8/src/arm/simulator-arm.cc
@@ -912,6 +912,12 @@ double Simulator::get_double_from_register_pair(int reg) {
}
+void Simulator::set_register_pair_from_double(int reg, double* value) {
+ ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
+ memcpy(registers_ + reg, value, sizeof(*value));
+}
+
+
void Simulator::set_dw_register(int dreg, const int* dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
@@ -1026,27 +1032,22 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
}
-// Runtime FP routines take up to two double arguments and zero
-// or one integer arguments. All are consructed here.
-// from r0-r3 or d0 and d1.
+// Runtime FP routines take:
+// - two double arguments
+// - one double argument and zero or one integer arguments.
+// All are consructed here from r0-r3 or d0, d1 and r0.
void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- *y = vfp_registers_[1];
- *z = registers_[1];
+ *x = get_double_from_d_register(0);
+ *y = get_double_from_d_register(1);
+ *z = get_register(0);
} else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
- OS::MemCopy(buffer, registers_, sizeof(*x));
- OS::MemCopy(x, buffer, sizeof(*x));
+ *x = get_double_from_register_pair(0);
// Register 2 and 3 -> y.
- OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
- OS::MemCopy(y, buffer, sizeof(*y));
+ *y = get_double_from_register_pair(2);
// Register 2 -> z
- memcpy(buffer, registers_ + 2, sizeof(*z));
- memcpy(z, buffer, sizeof(*z));
+ *z = get_register(2);
}
}
@@ -1718,32 +1719,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
- if (use_eabi_hardfloat()) {
- // With the hard floating point calling convention, double
- // arguments are passed in VFP registers. Fetch the arguments
- // from there and call the builtin using soft floating point
- // convention.
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- arg2 = vfp_registers_[2];
- arg3 = vfp_registers_[3];
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- arg2 = get_register(0);
- break;
- default:
- break;
- }
- }
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
@@ -3816,19 +3791,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
-double Simulator::CallFP(byte* entry, double d0, double d1) {
+void Simulator::CallFP(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);
} else {
- int buffer[2];
- ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- OS::MemCopy(buffer, &d0, sizeof(d0));
- set_dw_register(0, buffer);
- OS::MemCopy(buffer, &d1, sizeof(d1));
- set_dw_register(2, buffer);
+ set_register_pair_from_double(0, &d0);
+ set_register_pair_from_double(2, &d1);
}
CallInternal(entry);
+}
+
+
+int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
+ int32_t result = get_register(r0);
+ return result;
+}
+
+
+double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
if (use_eabi_hardfloat()) {
return get_double_from_d_register(0);
} else {
diff --git a/chromium/v8/src/arm/simulator-arm.h b/chromium/v8/src/arm/simulator-arm.h
index 7fca7432bf7..0af5162e938 100644
--- a/chromium/v8/src/arm/simulator-arm.h
+++ b/chromium/v8/src/arm/simulator-arm.h
@@ -163,6 +163,7 @@ class Simulator {
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
+ void set_register_pair_from_double(int reg, double* value);
void set_dw_register(int dreg, const int* dbl);
// Support for VFP.
@@ -220,7 +221,9 @@ class Simulator {
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
- double CallFP(byte* entry, double d0, double d1);
+ void CallFP(byte* entry, double d0, double d1);
+ int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
+ double CallFPReturnsDouble(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -266,7 +269,7 @@ class Simulator {
// Checks if the current instruction should be executed based on its
// condition bits.
- bool ConditionallyExecute(Instruction* instr);
+ inline bool ConditionallyExecute(Instruction* instr);
// Helper functions to set the conditional flags in the architecture state.
void SetNZFlags(int32_t val);
@@ -444,6 +447,10 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
+#define CALL_GENERATED_FP_INT(entry, p0, p1) \
+ Simulator::current(Isolate::Current())->CallFPReturnsInt( \
+ FUNCTION_ADDR(entry), p0, p1)
+
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
diff --git a/chromium/v8/src/arm/stub-cache-arm.cc b/chromium/v8/src/arm/stub-cache-arm.cc
index ba3d362804e..4ca5e27ed71 100644
--- a/chromium/v8/src/arm/stub-cache-arm.cc
+++ b/chromium/v8/src/arm/stub-cache-arm.cc
@@ -376,31 +376,27 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
// Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
__ Ret();
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, Operand(JS_VALUE_TYPE));
+ __ b(ne, miss);
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
- }
+ // Unwrap the value and check if the wrapped value is a string.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
+ __ Ret();
}
@@ -433,7 +429,7 @@ void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+void StoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -453,19 +449,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
-void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss_label,
- Label* slow) {
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
// r0 : value
Label exit;
@@ -477,7 +473,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
- __ LoadObject(scratch1, constant);
+ __ Move(scratch1, constant);
__ cmp(value_reg, scratch1);
__ b(ne, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
@@ -617,15 +613,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
-void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// r0 : value
Label exit;
@@ -736,9 +732,9 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ mov(this->name(), Operand(name));
@@ -746,36 +742,6 @@ void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
}
-static void GenerateCallFunction(MacroAssembler* masm,
- Handle<Object> object,
- const ParameterCount& arguments,
- Label* miss,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- r0: receiver
- // -- r1: function to call
- // -----------------------------------
-
- // Check that the function really is a function.
- __ JumpIfSmi(r1, miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
@@ -802,17 +768,12 @@ static void CompileCallLoadPropertyWithInterceptor(
Register receiver,
Register holder,
Register name,
- Handle<JSObject> holder_obj) {
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ mov(r0, Operand(StubCache::kInterceptorArgsLength));
- __ mov(r1, Operand(ref));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
}
@@ -839,25 +800,26 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee JS function
- // -- sp[8] : call data
- // -- sp[12] : isolate
- // -- sp[16] : ReturnValue default value
- // -- sp[20] : ReturnValue
- // -- sp[24] : last JS argument
+ // -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
+ // : holder (set by CheckPrototypes)
+ // -- sp[28] : last JS argument
// -- ...
- // -- sp[(argc + 5) * 4] : first JS argument
- // -- sp[(argc + 6) * 4] : receiver
+ // -- sp[(argc + 6) * 4] : first JS argument
+ // -- sp[(argc + 7) * 4] : receiver
// -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+ // Save calling context.
+ __ str(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(r5, function);
+ __ Move(r5, function);
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
+ __ str(r5, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
- // Pass the additional arguments.
+ // Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
@@ -866,15 +828,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
} else {
__ Move(r6, call_data);
}
- __ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate())));
- // Store JS function, call data, isolate ReturnValue default and ReturnValue.
- __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
+ // Store call data.
+ __ str(r6, MemOperand(sp, FCA::kDataIndex * kPointerSize));
+ // Store isolate.
+ __ mov(r5, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ str(r5, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
+ // Store ReturnValue default and ReturnValue.
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ str(r5, MemOperand(sp, 4 * kPointerSize));
- __ str(r5, MemOperand(sp, 5 * kPointerSize));
+ __ str(r5, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
+ __ str(r5, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
// Prepare arguments.
- __ add(r2, sp, Operand(5 * kPointerSize));
+ __ mov(r2, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -883,18 +848,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // r0 = v8::Arguments&
+ // r0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ add(r0, sp, Operand(1 * kPointerSize));
- // v8::Arguments::implicit_args_
+ // FunctionCallbackInfo::implicit_args_
__ str(r2, MemOperand(r0, 0 * kPointerSize));
- // v8::Arguments::values_
- __ add(ip, r2, Operand(argc * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ add(ip, r2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
__ str(ip, MemOperand(r0, 1 * kPointerSize));
- // v8::Arguments::length_ = argc
+ // FunctionCallbackInfo::length_ = argc
__ mov(ip, Operand(argc));
__ str(ip, MemOperand(r0, 2 * kPointerSize));
- // v8::Arguments::is_construct_call = 0
+ // FunctionCallbackInfo::is_construct_call = 0
__ mov(ip, Operand::Zero());
__ str(ip, MemOperand(r0, 3 * kPointerSize));
@@ -912,12 +877,19 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ MemOperand return_value_operand(fp,
+ (2 + FCA::kReturnValueOffset) * kPointerSize);
+
__ CallApiFunctionAndReturn(ref,
function_address,
thunk_ref,
r1,
kStackUnwindSpace,
- kFastApiCallArguments + 1);
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
}
@@ -931,11 +903,12 @@ static void GenerateFastApiCall(MacroAssembler* masm,
ASSERT(optimization.is_simple_api_call());
ASSERT(!receiver.is(scratch));
+ typedef FunctionCallbackArguments FCA;
const int stack_space = kFastApiCallArguments + argc + 1;
// Assign stack space for the call arguments.
__ sub(sp, sp, Operand(stack_space * kPointerSize));
// Write holder to stack frame.
- __ str(receiver, MemOperand(sp, 0));
+ __ str(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
// Write receiver to stack frame.
int index = stack_space - 1;
__ str(receiver, MemOperand(sp, index * kPointerSize));
@@ -946,16 +919,16 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ str(receiver, MemOperand(sp, index-- * kPointerSize));
}
- GenerateFastApiDirectCall(masm, optimization, argc);
+ GenerateFastApiDirectCall(masm, optimization, argc, true);
}
class CallInterceptorCompiler BASE_EMBEDDED {
public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
+ CallInterceptorCompiler(CallStubCompiler* stub_compiler,
const ParameterCount& arguments,
Register name,
- Code::ExtraICState extra_ic_state)
+ ExtraICState extra_ic_state)
: stub_compiler_(stub_compiler),
arguments_(arguments),
name_(name),
@@ -1030,9 +1003,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(object, masm->isolate()), receiver,
+ interceptor_holder, scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -1046,10 +1020,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(interceptor_holder, masm->isolate()), holder,
+ handle(lookup->holder()), scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -1060,15 +1034,12 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiDirectCall(
+ masm, optimization, arguments_.immediate(), false);
} else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
Handle<JSFunction> function = optimization.constant_function();
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ __ Move(r0, receiver);
+ stub_compiler_->GenerateJumpFunction(object, function);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -1095,19 +1066,19 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(object, masm->isolate()), receiver,
+ interceptor_holder, scratch1, scratch2, scratch3, name, miss_label);
// Call a runtime function to load the interceptor property.
FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- StubCache::kInterceptorArgsLength);
+
+ CompileCallLoadPropertyWithInterceptor(
+ masm, receiver, holder, name_, interceptor_holder,
+ IC::kLoadPropertyWithInterceptorForCall);
+
// Restore the name_ register.
__ pop(name_);
// Leave the internal frame.
@@ -1121,14 +1092,14 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label* interceptor_succeeded) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(receiver);
__ Push(holder, name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
+ CompileCallLoadPropertyWithInterceptor(
+ masm, receiver, holder, name_, holder_obj,
+ IC::kLoadPropertyWithInterceptorOnly);
+ __ pop(name_);
+ __ pop(holder);
+ __ pop(receiver);
}
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
@@ -1136,33 +1107,13 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ b(ne, interceptor_succeeded);
}
- StubCompiler* stub_compiler_;
+ CallStubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
- Code::ExtraICState extra_ic_state_;
+ ExtraICState extra_ic_state_;
};
-void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<JSGlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1172,7 +1123,7 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
@@ -1182,11 +1133,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ mov(scratch1, Operand(Handle<Map>(object->map())));
+ __ mov(scratch1, Operand(receiver_map));
- Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1196,29 +1147,36 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
+ typedef FunctionCallbackArguments FCA;
if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
+ __ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
if (!name->IsUniqueName()) {
ASSERT(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
+ ASSERT(current.is_null() ||
+ current->property_dictionary()->FindEntry(*name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -1229,8 +1187,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Register map_reg = scratch1;
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- Handle<Map> current_map(current->map());
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
// CheckMap implicitly loads the map of |reg| into |map_reg|.
__ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
} else {
@@ -1240,9 +1197,14 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
- if (current->IsJSGlobalProxy()) {
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
}
+
reg = holder_reg; // From now on the object will be in holder_reg.
if (heap()->InNewSpace(*prototype)) {
@@ -1256,70 +1218,65 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
+ __ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Go to the next object in the prototype chain.
current = prototype;
+ current_map = handle(current->map());
}
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
- DONT_DO_SMI_CHECK);
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
// Return the register containing the holder.
return reg;
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ b(success);
+ Label success;
+ __ b(&success);
__ bind(miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ b(success);
+ Label success;
+ __ b(&success);
GenerateRestoreName(masm(), miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
+Register LoadStubCompiler::CallbackHandlerFrontend(
+ Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
- Label* success,
Handle<Object> callback) {
Label miss;
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
ASSERT(!reg.is(scratch2()));
@@ -1352,15 +1309,15 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
__ b(ne, &miss);
}
- HandlerFrontendFooter(name, success, &miss);
+ HandlerFrontendFooter(name, &miss);
return reg;
}
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
@@ -1376,36 +1333,36 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadObject(r0, value);
+ __ Move(r0, value);
__ Ret();
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 0, NULL);
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
ASSERT(!scratch2().is(reg));
ASSERT(!scratch3().is(reg));
ASSERT(!scratch4().is(reg));
__ push(receiver());
- __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
__ Move(scratch3(), callback);
__ ldr(scratch3(), FieldMemOperand(scratch3(),
@@ -1419,19 +1376,21 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ Push(scratch3(), scratch4());
__ mov(scratch4(),
Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch4(), reg, name());
+ __ Push(scratch4(), reg);
+ __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_
+ __ push(name());
__ mov(r0, sp); // r0 = Handle<Name>
const int kApiStackSpace = 1;
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // Create AccessorInfo instance on the stack above the exit frame with
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object** args_) as the data.
__ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
- const int kStackUnwindSpace = kFastApiCallArguments + 1;
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
@@ -1449,13 +1408,14 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
thunk_ref,
r2,
kStackUnwindSpace,
- 6);
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
}
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
+void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
- Handle<JSObject> object,
+ Handle<Object> object,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Handle<Name> name) {
@@ -1504,11 +1464,10 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
+
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
@@ -1550,21 +1509,12 @@ void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(r0, miss);
- CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
+void CallStubCompiler::GenerateFunctionCheck(Register function,
+ Register scratch,
+ Label* miss) {
+ __ JumpIfSmi(function, miss);
+ __ CompareObjectType(function, scratch, scratch, JS_FUNCTION_TYPE);
+ __ b(ne, miss);
}
@@ -1583,9 +1533,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
- __ JumpIfSmi(r1, miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, miss);
+ GenerateFunctionCheck(r1, r3, miss);
// Check the shared function info. Make sure it hasn't changed.
__ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
@@ -1602,7 +1550,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
- extra_state_);
+ extra_state());
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1611,34 +1559,18 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
PropertyIndex index,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
Label miss;
- GenerateNameCheck(name, &miss);
-
- const int argc = arguments().immediate();
-
- // Get the receiver of the function from the stack into r0.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r0, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
+ Register reg = HandlerFrontendHeader(
+ object, holder, name, RECEIVER_MAP_CHECK, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, index.is_inobject(holder),
index.translate(holder), Representation::Tagged());
+ GenerateJumpFunction(object, r1, &miss);
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
- return GetCode(Code::FIELD, name);
+ return GetCode(Code::FAST, name);
}
@@ -1651,30 +1583,16 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
Code::StubType type) {
Label miss;
- // Check that function is still array
- const int argc = arguments().immediate();
- GenerateNameCheck(name, &miss);
- Register receiver = r1;
-
- if (cell.is_null()) {
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0,
- r4, name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ site->SetElementsKind(GetInitialFastElementsKind());
Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
+ const int argc = arguments().immediate();
__ mov(r0, Operand(argc));
__ mov(r2, Operand(site_feedback_cell));
__ mov(r1, Operand(function));
@@ -1682,8 +1600,7 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
ArrayConstructorStub stub(isolate());
__ TailCallStub(&stub);
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -1697,32 +1614,22 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
+ // If object is not an array or is observed or sealed, bail out to regular
+ // call.
+ if (!object->IsJSArray() ||
+ !cell.is_null() ||
+ Handle<JSArray>::cast(object)->map()->is_observed() ||
+ !Handle<JSArray>::cast(object)->map()->is_extensible()) {
+ return Handle<Code>::null();
+ }
Label miss;
- GenerateNameCheck(name, &miss);
- Register receiver = r1;
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
- name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ Register receiver = r0;
+ Register scratch = r1;
+ const int argc = arguments().immediate();
if (argc == 0) {
// Nothing to do, just return the length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1741,20 +1648,20 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
- r0,
+ scratch,
Heap::kFixedArrayMapRootIndex,
&check_double,
DONT_DO_SMI_CHECK);
- // Get the array's length into r0 and calculate new length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ add(r0, r0, Operand(Smi::FromInt(argc)));
+ // Get the array's length into scratch and calculate new length.
+ __ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ add(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
- __ cmp(r0, r4);
+ __ cmp(scratch, r4);
__ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
@@ -1762,49 +1669,50 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ JumpIfNotSmi(r4, &with_write_barrier);
// Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
- __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
+ __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi.
__ Drop(argc + 1);
+ __ mov(r0, scratch);
__ Ret();
__ bind(&check_double);
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
- r0,
+ scratch,
Heap::kFixedDoubleArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
- // Get the array's length into r0 and calculate new length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ add(r0, r0, Operand(Smi::FromInt(argc)));
+ // Get the array's length into scratch and calculate new length.
+ __ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ add(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
- __ cmp(r0, r4);
+ __ cmp(scratch, r4);
__ b(gt, &call_builtin);
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(r4, r0, elements, r5, d0,
+ __ StoreNumberToDoubleElements(r4, scratch, elements, r5, d0,
&call_builtin, argc * kDoubleSize);
// Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- // Check for a smi.
__ Drop(argc + 1);
+ __ mov(r0, scratch);
__ Ret();
__ bind(&with_write_barrier);
@@ -1813,15 +1721,15 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
- __ CheckFastObjectElements(r3, r7, &not_fast_object);
+ __ CheckFastObjectElements(r3, r9, &not_fast_object);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
- __ CheckFastSmiElements(r3, r7, &call_builtin);
+ __ CheckFastSmiElements(r3, r9, &call_builtin);
- __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, ip);
+ __ cmp(r9, ip);
__ b(eq, &call_builtin);
// edx: receiver
// r3: map
@@ -1829,7 +1737,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r3,
- r7,
+ r9,
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
@@ -1842,7 +1750,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
r3,
- r7,
+ r9,
&call_builtin);
__ mov(r2, receiver);
ElementsTransitionGenerator::
@@ -1855,12 +1763,12 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
}
// Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
- __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
+ __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
__ RecordWrite(elements,
@@ -1871,11 +1779,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Drop(argc + 1);
+ __ mov(r0, scratch);
__ Ret();
__ bind(&attempt_to_grow_elements);
- // r0: array's length + 1.
- // r4: elements' length.
+ // scratch: array's length + 1.
if (!FLAG_inline_new) {
__ b(&call_builtin);
@@ -1886,8 +1794,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(r2, &no_fast_elements_check);
- __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(r7, r7, &call_builtin);
+ __ ldr(r9, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(r9, r9, &call_builtin);
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
@@ -1897,10 +1805,10 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
const int kAllocationDelta = 4;
// Load top and check if it is the end of elements.
- __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
+ __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
- __ mov(r7, Operand(new_space_allocation_top));
- __ ldr(r3, MemOperand(r7));
+ __ mov(r4, Operand(new_space_allocation_top));
+ __ ldr(r3, MemOperand(r4));
__ cmp(end_elements, r3);
__ b(ne, &call_builtin);
@@ -1912,7 +1820,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// We fit and could grow elements.
// Update new_space_allocation_top.
- __ str(r3, MemOperand(r7));
+ __ str(r3, MemOperand(r4));
// Push the argument.
__ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
@@ -1922,12 +1830,14 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
}
// Update elements' and array's sizes.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
__ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required.
__ Drop(argc + 1);
+ __ mov(r0, scratch);
__ Ret();
}
__ bind(&call_builtin);
@@ -1935,9 +1845,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
}
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -1951,38 +1859,28 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
+ // If object is not an array or is observed or sealed, bail out to regular
+ // call.
+ if (!object->IsJSArray() ||
+ !cell.is_null() ||
+ Handle<JSArray>::cast(object)->map()->is_observed() ||
+ !Handle<JSArray>::cast(object)->map()->is_extensible()) {
+ return Handle<Code>::null();
+ }
Label miss, return_undefined, call_builtin;
- Register receiver = r1;
+ Register receiver = r0;
+ Register scratch = r1;
Register elements = r3;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
- r4, r0, name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
// Get the elements array of the object.
__ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
- r0,
+ scratch,
Heap::kFixedArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
@@ -1997,8 +1895,8 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// We can't address the last element in one operation. Compute the more
// expensive shift first, and use an offset later on.
__ add(elements, elements, Operand::PointerOffsetFromSmiKey(r4));
- __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ cmp(r0, r6);
+ __ ldr(scratch, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ cmp(scratch, r6);
__ b(eq, &call_builtin);
// Set the array's length.
@@ -2006,7 +1904,9 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// Fill with the hole.
__ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ const int argc = arguments().immediate();
__ Drop(argc + 1);
+ __ mov(r0, scratch);
__ Ret();
__ bind(&return_undefined);
@@ -2018,9 +1918,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1);
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2034,43 +1932,26 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
// If object is not a string, bail out to regular call.
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
- const int argc = arguments().immediate();
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
+ (CallICBase::StringStubState::decode(extra_state()) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r1, r3, r4, name, &miss);
- Register receiver = r1;
+ HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
+
+ Register receiver = r0;
Register index = r4;
- Register result = r0;
+ Register result = r1;
+ const int argc = arguments().immediate();
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
__ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
@@ -2087,6 +1968,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm());
__ Drop(argc + 1);
+ __ mov(r0, result);
__ Ret();
StubRuntimeCallHelper call_helper;
@@ -2102,8 +1984,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in r2.
__ Move(r2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&name_miss);
// Return the generated code.
return GetCode(type, name);
@@ -2117,14 +1998,6 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
// If object is not a string, bail out to regular call.
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
@@ -2134,27 +2007,17 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
+ (CallICBase::StringStubState::decode(extra_state()) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r1, r3, r4, name, &miss);
+
+ HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
Register receiver = r0;
Register index = r4;
Register scratch = r3;
- Register result = r0;
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
+ Register result = r1;
if (argc > 0) {
__ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
} else {
@@ -2171,6 +2034,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm());
__ Drop(argc + 1);
+ __ mov(r0, result);
__ Ret();
StubRuntimeCallHelper call_helper;
@@ -2186,8 +2050,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
__ bind(&miss);
// Restore function name in r2.
__ Move(r2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&name_miss);
// Return the generated code.
return GetCode(type, name);
@@ -2201,14 +2064,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
@@ -2216,19 +2071,10 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- __ JumpIfSmi(r1, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2251,16 +2097,12 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2274,31 +2116,16 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss, slow;
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2361,15 +2188,11 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ Ret();
__ bind(&slow);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2383,30 +2206,16 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
+
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2459,16 +2268,12 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ Drop(argc + 1);
__ Ret();
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2510,41 +2315,66 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
ReserveSpaceForFastApiCall(masm(), r0);
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
- depth, &miss);
+ CheckPrototypes(
+ IC::CurrentTypeOf(object, isolate()),
+ r1, holder, r0, r3, r4, name, depth, &miss);
- GenerateFastApiDirectCall(masm(), optimization, argc);
+ GenerateFastApiDirectCall(masm(), optimization, argc, false);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss_before_stack_reserved);
// Return the generated code.
return GetCode(function);
}
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* success) {
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
+ Label success;
+ // Check that the object is a boolean.
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(object, ip);
+ __ b(eq, &success);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(object, ip);
+ __ b(ne, miss);
+ __ bind(&success);
+}
+
+
+void CallStubCompiler::PatchGlobalProxy(Handle<Object> object) {
+ if (object->IsGlobalObject()) {
+ const int argc = arguments().immediate();
+ const int receiver_offset = argc * kPointerSize;
+ __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+ __ str(r3, MemOperand(sp, receiver_offset));
+ }
+}
+
+
+Register CallStubCompiler::HandlerFrontendHeader(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Label* miss) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
+ GenerateNameCheck(name, miss);
+
+ Register reg = r0;
// Get the receiver from the stack
const int argc = arguments().immediate();
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+ const int receiver_offset = argc * kPointerSize;
+ __ ldr(r0, MemOperand(sp, receiver_offset));
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ JumpIfSmi(r1, &miss);
+ __ JumpIfSmi(r0, miss);
}
// Make sure that it's okay not to patch the on stack receiver
@@ -2552,130 +2382,82 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
switch (check) {
case RECEIVER_MAP_CHECK:
- __ IncrementCounter(isolate()->counters()->call_const(), 1, r0, r3);
+ __ IncrementCounter(isolate()->counters()->call_const(), 1, r1, r3);
// Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
+ reg = CheckPrototypes(
+ IC::CurrentTypeOf(object, isolate()),
+ reg, holder, r1, r3, r4, name, miss);
break;
- case STRING_CHECK:
+ case STRING_CHECK: {
// Check that the object is a string.
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, &miss);
+ __ CompareObjectType(reg, r3, r3, FIRST_NONSTRING_TYPE);
+ __ b(ge, miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
+ masm(), Context::STRING_FUNCTION_INDEX, r1, miss);
break;
-
- case SYMBOL_CHECK:
+ }
+ case SYMBOL_CHECK: {
// Check that the object is a symbol.
- __ CompareObjectType(r1, r1, r3, SYMBOL_TYPE);
- __ b(ne, &miss);
+ __ CompareObjectType(reg, r3, r3, SYMBOL_TYPE);
+ __ b(ne, miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
+ masm(), Context::SYMBOL_FUNCTION_INDEX, r1, miss);
break;
-
+ }
case NUMBER_CHECK: {
Label fast;
// Check that the object is a smi or a heap number.
- __ JumpIfSmi(r1, &fast);
- __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
+ __ JumpIfSmi(reg, &fast);
+ __ CompareObjectType(reg, r3, r3, HEAP_NUMBER_TYPE);
+ __ b(ne, miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
+ masm(), Context::NUMBER_FUNCTION_INDEX, r1, miss);
break;
}
case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &fast);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&fast);
+ GenerateBooleanCheck(reg, miss);
+
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, r1, miss);
break;
}
}
- __ b(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
+ if (check != RECEIVER_MAP_CHECK) {
+ Handle<Object> prototype(object->GetPrototype(isolate()), isolate());
+ reg = CheckPrototypes(
+ IC::CurrentTypeOf(prototype, isolate()),
+ r1, holder, r1, r3, r4, name, miss);
+ }
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ return reg;
}
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Handle<JSFunction> function) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<Cell>::null(),
- function, Handle<String>::cast(name),
- Code::CONSTANT);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label success;
-
- CompileHandlerFrontend(object, holder, name, check, &success);
- __ bind(&success);
- CompileHandlerBackend(function);
+void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
+ Register function,
+ Label* miss) {
+ ASSERT(function.is(r1));
+ // Check that the function really is a function.
+ GenerateFunctionCheck(function, r3, miss);
+ PatchGlobalProxy(object);
- // Return the generated code.
- return GetCode(function);
+ // Invoke the function.
+ __ InvokeFunction(r1, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind());
}
Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
Label miss;
GenerateNameCheck(name, &miss);
@@ -2687,7 +2469,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
// Get the receiver from the stack.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
+ CallInterceptorCompiler compiler(this, arguments(), r2, extra_state());
compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
&miss);
@@ -2696,14 +2478,12 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
// Restore receiver.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
+ GenerateJumpFunction(object, r1, &miss);
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
+ return GetCode(Code::FAST, name);
}
@@ -2713,10 +2493,6 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<PropertyCell> cell,
Handle<JSFunction> function,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(
object, holder, cell, function, Handle<String>::cast(name),
@@ -2726,41 +2502,15 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
}
Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ // Potentially loads a closure that matches the shared function info of the
+ // function, rather than function.
GenerateLoadFunctionFromCell(cell, function, &miss);
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Set up the context (function already in r1).
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
- GenerateMissBranch();
+ GenerateJumpFunction(object, r1, function);
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(Code::NORMAL, name);
@@ -2772,9 +2522,8 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
// Stub never generated for non-global objects that require access checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -2791,7 +2540,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ TailCallExternalReference(store_callback_property, 4, 1);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2800,16 +2549,15 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
Register values[] = { value() };
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 1, values);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2879,40 +2627,32 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
- __ mov(scratch1(), Operand(Smi::FromInt(strict_mode())));
- __ push(scratch1()); // strict mode
-
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
+ __ TailCallExternalReference(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Handle<JSGlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<Type> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
- __ bind(&success);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ Ret();
// Return the generated code.
- return GetCode(kind(), Code::NONEXISTENT, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2965,6 +2705,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- r0 : receiver
@@ -2976,7 +2717,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(r0);
+ __ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
@@ -2999,17 +2740,14 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
+ Handle<Type> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
- Label success, miss;
+ Label miss;
- __ CheckMap(
- receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(cell));
@@ -3022,8 +2760,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ b(eq, &miss);
}
- HandlerFrontendFooter(name, &success, &miss);
- __ bind(&success);
+ HandlerFrontendFooter(name, &miss);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
@@ -3031,12 +2768,12 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Ret();
// Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), Code::NORMAL, name);
}
Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
+ TypeHandleList* types,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -3047,18 +2784,26 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
GenerateNameCheck(name, this->name(), &miss);
}
- __ JumpIfSmi(receiver(), &miss);
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
Register map_reg = scratch1();
- int receiver_count = receiver_maps->length();
+ int receiver_count = types->length();
int number_of_handled_maps = 0;
__ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = receiver_maps->at(current);
+ Handle<Type> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
- __ mov(ip, Operand(receiver_maps->at(current)));
+ __ mov(ip, Operand(map));
__ cmp(map_reg, ip);
+ if (type->Is(Type::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ bind(&number_case);
+ }
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
}
}
@@ -3117,12 +2862,12 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- Label slow, miss_force_generic;
+ Label slow, miss;
Register key = r0;
Register receiver = r1;
- __ UntagAndJumpIfNotSmi(r2, key, &miss_force_generic);
+ __ UntagAndJumpIfNotSmi(r2, key, &miss);
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
__ Ret();
@@ -3140,14 +2885,14 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
// Miss case, call the runtime.
- __ bind(&miss_force_generic);
+ __ bind(&miss);
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
diff --git a/chromium/v8/src/array-iterator.js b/chromium/v8/src/array-iterator.js
index defd7342ab2..a8c5e001c48 100644
--- a/chromium/v8/src/array-iterator.js
+++ b/chromium/v8/src/array-iterator.js
@@ -36,9 +36,9 @@ var ARRAY_ITERATOR_KIND_VALUES = 2;
var ARRAY_ITERATOR_KIND_ENTRIES = 3;
// The spec draft also has "sparse" but it is never used.
-var iteratorObjectSymbol = %CreateSymbol(void 0);
-var arrayIteratorNextIndexSymbol = %CreateSymbol(void 0);
-var arrayIterationKindSymbol = %CreateSymbol(void 0);
+var iteratorObjectSymbol = NEW_PRIVATE("iterator_object");
+var arrayIteratorNextIndexSymbol = NEW_PRIVATE("iterator_next");
+var arrayIterationKindSymbol = NEW_PRIVATE("iterator_kind");
function ArrayIterator() {}
@@ -46,9 +46,9 @@ function ArrayIterator() {}
function CreateArrayIterator(array, kind) {
var object = ToObject(array);
var iterator = new ArrayIterator;
- iterator[iteratorObjectSymbol] = object;
- iterator[arrayIteratorNextIndexSymbol] = 0;
- iterator[arrayIterationKindSymbol] = kind;
+ SET_PRIVATE(iterator, iteratorObjectSymbol, object);
+ SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, 0);
+ SET_PRIVATE(iterator, arrayIterationKindSymbol, kind);
return iterator;
}
@@ -60,24 +60,24 @@ function CreateIteratorResultObject(value, done) {
// 15.4.5.2.2 ArrayIterator.prototype.next( )
function ArrayIteratorNext() {
var iterator = ToObject(this);
- var array = iterator[iteratorObjectSymbol];
+ var array = GET_PRIVATE(iterator, iteratorObjectSymbol);
if (!array) {
throw MakeTypeError('incompatible_method_receiver',
['Array Iterator.prototype.next']);
}
- var index = iterator[arrayIteratorNextIndexSymbol];
- var itemKind = iterator[arrayIterationKindSymbol];
+ var index = GET_PRIVATE(iterator, arrayIteratorNextIndexSymbol);
+ var itemKind = GET_PRIVATE(iterator, arrayIterationKindSymbol);
var length = TO_UINT32(array.length);
// "sparse" is never used.
if (index >= length) {
- iterator[arrayIteratorNextIndexSymbol] = 1 / 0; // Infinity
- return CreateIteratorResultObject(void 0, true);
+ SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, INFINITY);
+ return CreateIteratorResultObject(UNDEFINED, true);
}
- iterator[arrayIteratorNextIndexSymbol] = index + 1;
+ SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, index + 1);
if (itemKind == ARRAY_ITERATOR_KIND_VALUES)
return CreateIteratorResultObject(array[index], false);
diff --git a/chromium/v8/src/array.js b/chromium/v8/src/array.js
index 5f89ebb7a6b..26bf7282e18 100644
--- a/chromium/v8/src/array.js
+++ b/chromium/v8/src/array.js
@@ -399,14 +399,13 @@ function ObservedArrayPop(n) {
n--;
var value = this[n];
- EnqueueSpliceRecord(this, n, [value], 0);
-
try {
BeginPerformSplice(this);
delete this[n];
this.length = n;
} finally {
EndPerformSplice(this);
+ EnqueueSpliceRecord(this, n, [value], 0);
}
return value;
@@ -426,12 +425,17 @@ function ArrayPop() {
return;
}
+ if ($Object.isSealed(this)) {
+ throw MakeTypeError("array_functions_change_sealed",
+ ["Array.prototype.pop"]);
+ }
+
if (%IsObserved(this))
return ObservedArrayPop.call(this, n);
n--;
var value = this[n];
- delete this[n];
+ Delete(this, ToName(n), true);
this.length = n;
return value;
}
@@ -441,8 +445,6 @@ function ObservedArrayPush() {
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
- EnqueueSpliceRecord(this, n, [], m);
-
try {
BeginPerformSplice(this);
for (var i = 0; i < m; i++) {
@@ -451,6 +453,7 @@ function ObservedArrayPush() {
this.length = n + m;
} finally {
EndPerformSplice(this);
+ EnqueueSpliceRecord(this, n, [], m);
}
return this.length;
@@ -464,11 +467,16 @@ function ArrayPush() {
["Array.prototype.push"]);
}
+ var n = TO_UINT32(this.length);
+ var m = %_ArgumentsLength();
+ if (m > 0 && $Object.isSealed(this)) {
+ throw MakeTypeError("array_functions_change_sealed",
+ ["Array.prototype.push"]);
+ }
+
if (%IsObserved(this))
return ObservedArrayPush.apply(this, arguments);
- var n = TO_UINT32(this.length);
- var m = %_ArgumentsLength();
for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
}
@@ -581,14 +589,13 @@ function ArrayReverse() {
function ObservedArrayShift(len) {
var first = this[0];
- EnqueueSpliceRecord(this, 0, [first], 0);
-
try {
BeginPerformSplice(this);
SimpleMove(this, 0, 1, len, 0);
this.length = len - 1;
} finally {
EndPerformSplice(this);
+ EnqueueSpliceRecord(this, 0, [first], 0);
}
return first;
@@ -607,6 +614,11 @@ function ArrayShift() {
return;
}
+ if ($Object.isSealed(this)) {
+ throw MakeTypeError("array_functions_change_sealed",
+ ["Array.prototype.shift"]);
+ }
+
if (%IsObserved(this))
return ObservedArrayShift.call(this, len);
@@ -627,8 +639,6 @@ function ObservedArrayUnshift() {
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
- EnqueueSpliceRecord(this, 0, [], num_arguments);
-
try {
BeginPerformSplice(this);
SimpleMove(this, 0, 0, len, num_arguments);
@@ -638,6 +648,7 @@ function ObservedArrayUnshift() {
this.length = len + num_arguments;
} finally {
EndPerformSplice(this);
+ EnqueueSpliceRecord(this, 0, [], num_arguments);
}
return len + num_arguments;
@@ -649,15 +660,32 @@ function ArrayUnshift(arg1) { // length == 1
["Array.prototype.unshift"]);
}
- if (%IsObserved(this))
- return ObservedArrayUnshift.apply(this, arguments);
-
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
+ var is_sealed = $Object.isSealed(this);
- if (IS_ARRAY(this)) {
+ if (num_arguments > 0 && is_sealed) {
+ throw MakeTypeError("array_functions_change_sealed",
+ ["Array.prototype.unshift"]);
+ }
+
+ if (%IsObserved(this))
+ return ObservedArrayUnshift.apply(this, arguments);
+
+ if (IS_ARRAY(this) && !is_sealed) {
SmartMove(this, 0, 0, len, num_arguments);
} else {
+ if (num_arguments == 0 && $Object.isFrozen(this)) {
+ // In the zero argument case, values from the prototype come into the
+ // object. This can't be allowed on frozen arrays.
+ for (var i = 0; i < len; i++) {
+ if (!this.hasOwnProperty(i) && !IS_UNDEFINED(this[i])) {
+ throw MakeTypeError("array_functions_on_frozen",
+ ["Array.prototype.shift"]);
+ }
+ }
+ }
+
SimpleMove(this, 0, 0, len, num_arguments);
}
@@ -667,7 +695,7 @@ function ArrayUnshift(arg1) { // length == 1
this.length = len + num_arguments;
- return len + num_arguments;
+ return this.length;
}
@@ -681,7 +709,7 @@ function ArraySlice(start, end) {
var start_i = TO_INTEGER(start);
var end_i = len;
- if (end !== void 0) end_i = TO_INTEGER(end);
+ if (!IS_UNDEFINED(end)) end_i = TO_INTEGER(end);
if (start_i < 0) {
start_i += len;
@@ -806,6 +834,14 @@ function ArraySplice(start, delete_count) {
deleted_elements.length = del_count;
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
+ if (del_count != num_elements_to_add && $Object.isSealed(this)) {
+ throw MakeTypeError("array_functions_change_sealed",
+ ["Array.prototype.splice"]);
+ } else if (del_count > 0 && $Object.isFrozen(this)) {
+ throw MakeTypeError("array_functions_on_frozen",
+ ["Array.prototype.splice"]);
+ }
+
var use_simple_splice = true;
if (IS_ARRAY(this) &&
num_elements_to_add !== del_count) {
@@ -1020,7 +1056,7 @@ function ArraySort(comparefn) {
var proto_length = indices;
for (var i = from; i < proto_length; i++) {
if (proto.hasOwnProperty(i)) {
- obj[i] = void 0;
+ obj[i] = UNDEFINED;
}
}
} else {
@@ -1028,7 +1064,7 @@ function ArraySort(comparefn) {
var index = indices[i];
if (!IS_UNDEFINED(index) && from <= index &&
proto.hasOwnProperty(index)) {
- obj[index] = void 0;
+ obj[index] = UNDEFINED;
}
}
}
@@ -1065,7 +1101,7 @@ function ArraySort(comparefn) {
if (first_undefined < last_defined) {
// Fill in hole or undefined.
obj[first_undefined] = obj[last_defined];
- obj[last_defined] = void 0;
+ obj[last_defined] = UNDEFINED;
}
}
// If there were any undefineds in the entire array, first_undefined
@@ -1077,12 +1113,12 @@ function ArraySort(comparefn) {
// an undefined should be and vice versa.
var i;
for (i = first_undefined; i < length - num_holes; i++) {
- obj[i] = void 0;
+ obj[i] = UNDEFINED;
}
for (i = length - num_holes; i < length; i++) {
// For compatability with Webkit, do not expose elements in the prototype.
if (i in %GetPrototype(obj)) {
- obj[i] = void 0;
+ obj[i] = UNDEFINED;
} else {
delete obj[i];
}
diff --git a/chromium/v8/src/arraybuffer.js b/chromium/v8/src/arraybuffer.js
index 4a4f5701465..6125f0f61cb 100644
--- a/chromium/v8/src/arraybuffer.js
+++ b/chromium/v8/src/arraybuffer.js
@@ -81,6 +81,10 @@ function ArrayBufferSlice(start, end) {
return result;
}
+function ArrayBufferIsView(obj) {
+ return %ArrayBufferIsView(obj);
+}
+
function SetUpArrayBuffer() {
%CheckIsBootstrapping();
@@ -93,11 +97,13 @@ function SetUpArrayBuffer() {
InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength);
+ InstallFunctions($ArrayBuffer, DONT_ENUM, $Array(
+ "isView", ArrayBufferIsView
+ ));
+
InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
"slice", ArrayBufferSlice
));
}
SetUpArrayBuffer();
-
-
diff --git a/chromium/v8/src/assembler.cc b/chromium/v8/src/assembler.cc
index fbff62dd65e..febae63ea1a 100644
--- a/chromium/v8/src/assembler.cc
+++ b/chromium/v8/src/assembler.cc
@@ -98,6 +98,7 @@ struct DoubleConstant BASE_EMBEDDED {
double negative_infinity;
double canonical_non_hole_nan;
double the_hole_nan;
+ double uint32_bias;
};
static DoubleConstant double_constants;
@@ -207,6 +208,24 @@ CpuFeatureScope::~CpuFeatureScope() {
// -----------------------------------------------------------------------------
+// Implementation of PlatformFeatureScope
+
+PlatformFeatureScope::PlatformFeatureScope(CpuFeature f)
+ : old_cross_compile_(CpuFeatures::cross_compile_) {
+ // CpuFeatures is a global singleton, therefore this is only safe in
+ // single threaded code.
+ ASSERT(Serializer::enabled());
+ uint64_t mask = static_cast<uint64_t>(1) << f;
+ CpuFeatures::cross_compile_ |= mask;
+}
+
+
+PlatformFeatureScope::~PlatformFeatureScope() {
+ CpuFeatures::cross_compile_ = old_cross_compile_;
+}
+
+
+// -----------------------------------------------------------------------------
// Implementation of Label
int Label::pos() const {
@@ -800,14 +819,14 @@ void RelocInfo::Print(Isolate* isolate, FILE* out) {
} else if (rmode_ == EXTERNAL_REFERENCE) {
ExternalReferenceEncoder ref_encoder(isolate);
PrintF(out, " (%s) (%p)",
- ref_encoder.NameOfAddress(*target_reference_address()),
- *target_reference_address());
+ ref_encoder.NameOfAddress(target_reference()),
+ target_reference());
} else if (IsCodeTarget(rmode_)) {
Code* code = Code::GetCodeFromTargetAddress(target_address());
PrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()),
target_address());
if (rmode_ == CODE_TARGET_WITH_ID) {
- PrintF(" (id=%d)", static_cast<int>(data_));
+ PrintF(out, " (id=%d)", static_cast<int>(data_));
}
} else if (IsPosition(rmode_)) {
PrintF(out, " (%" V8_PTR_PREFIX "d)", data());
@@ -890,6 +909,8 @@ void ExternalReference::SetUp() {
double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
+ double_constants.uint32_bias =
+ static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
math_exp_data_mutex = new Mutex();
}
@@ -1032,14 +1053,6 @@ ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
}
-ExternalReference ExternalReference::fill_heap_number_with_random_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(V8::FillHeapNumberWithRandom)));
-}
-
-
ExternalReference ExternalReference::delete_handle_scope_extensions(
Isolate* isolate) {
return ExternalReference(Redirect(
@@ -1048,12 +1061,6 @@ ExternalReference ExternalReference::delete_handle_scope_extensions(
}
-ExternalReference ExternalReference::random_uint32_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(V8::Random)));
-}
-
-
ExternalReference ExternalReference::get_date_field_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
@@ -1067,6 +1074,13 @@ ExternalReference ExternalReference::get_make_code_young_function(
}
+ExternalReference ExternalReference::get_mark_code_as_executed_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(Code::MarkCodeAsExecuted)));
+}
+
+
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
@@ -1315,6 +1329,12 @@ ExternalReference ExternalReference::address_of_the_hole_nan() {
}
+ExternalReference ExternalReference::address_of_uint32_bias() {
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.uint32_bias));
+}
+
+
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state(
diff --git a/chromium/v8/src/assembler.h b/chromium/v8/src/assembler.h
index 6b399f20823..0c706c450b0 100644
--- a/chromium/v8/src/assembler.h
+++ b/chromium/v8/src/assembler.h
@@ -134,6 +134,18 @@ class CpuFeatureScope BASE_EMBEDDED {
};
+// Enable a unsupported feature within a scope for cross-compiling for a
+// different CPU.
+class PlatformFeatureScope BASE_EMBEDDED {
+ public:
+ explicit PlatformFeatureScope(CpuFeature f);
+ ~PlatformFeatureScope();
+
+ private:
+ uint64_t old_cross_compile_;
+};
+
+
// -----------------------------------------------------------------------------
// Labels represent pc locations; they are typically jump or call targets.
// After declaration, a label can be freely used to denote known or (yet)
@@ -360,6 +372,9 @@ class RelocInfo BASE_EMBEDDED {
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
double data64() const { return data64_; }
+ uint64_t raw_data64() {
+ return BitCast<uint64_t>(data64_);
+ }
Code* host() const { return host_; }
// Apply a relocation by delta bytes
@@ -378,7 +393,6 @@ class RelocInfo BASE_EMBEDDED {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(Object* target_object());
INLINE(Handle<Object> target_object_handle(Assembler* origin));
- INLINE(Object** target_object_address());
INLINE(void set_target_object(Object* target,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(Address target_runtime_entry(Assembler* origin));
@@ -389,6 +403,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Handle<Cell> target_cell_handle());
INLINE(void set_target_cell(Cell* cell,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
+ INLINE(Handle<Object> code_age_stub_handle(Assembler* origin));
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(Code* stub));
@@ -412,7 +427,7 @@ class RelocInfo BASE_EMBEDDED {
// Read/modify the reference in the instruction this relocation
// applies to; can only be called if rmode_ is external_reference
- INLINE(Address* target_reference_address());
+ INLINE(Address target_reference());
// Read/modify the address of a call instruction. This is used to relocate
// the break points where straight-line code is patched with a call
@@ -423,6 +438,10 @@ class RelocInfo BASE_EMBEDDED {
INLINE(void set_call_object(Object* target));
INLINE(Object** call_object_address());
+ // Wipe out a relocation to a fixed value, used for making snapshots
+ // reproducible.
+ INLINE(void WipeOut());
+
template<typename StaticVisitor> inline void Visit(Heap* heap);
inline void Visit(Isolate* isolate, ObjectVisitor* v);
@@ -473,12 +492,6 @@ class RelocInfo BASE_EMBEDDED {
double data64_;
};
Code* host_;
- // Code and Embedded Object pointers on some platforms are stored split
- // across two consecutive 32-bit instructions. Heap management
- // routines expect to access these pointers indirectly. The following
- // location provides a place for these pointers to exist naturally
- // when accessed via the Iterator.
- Object* reconstructed_obj_ptr_;
// External-reference pointers are also split across instruction-pairs
// on some platforms, but are accessed via indirect pointers. This location
// provides a place for that pointer to exist naturally. Its address
@@ -705,9 +718,6 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference flush_icache_function(Isolate* isolate);
static ExternalReference perform_gc_function(Isolate* isolate);
- static ExternalReference fill_heap_number_with_random_function(
- Isolate* isolate);
- static ExternalReference random_uint32_function(Isolate* isolate);
static ExternalReference transcendental_cache_array_address(Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
@@ -715,6 +725,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference date_cache_stamp(Isolate* isolate);
static ExternalReference get_make_code_young_function(Isolate* isolate);
+ static ExternalReference get_mark_code_as_executed_function(Isolate* isolate);
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
@@ -798,6 +809,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference address_of_negative_infinity();
static ExternalReference address_of_canonical_non_hole_nan();
static ExternalReference address_of_the_hole_nan();
+ static ExternalReference address_of_uint32_bias();
static ExternalReference math_sin_double_function(Isolate* isolate);
static ExternalReference math_cos_double_function(Isolate* isolate);
diff --git a/chromium/v8/src/ast.cc b/chromium/v8/src/ast.cc
index 823dedee098..681b3d46b86 100644
--- a/chromium/v8/src/ast.cc
+++ b/chromium/v8/src/ast.cc
@@ -82,14 +82,13 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) {
}
-VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
- : Expression(isolate),
+VariableProxy::VariableProxy(Isolate* isolate, Variable* var, int position)
+ : Expression(isolate, position),
name_(var->name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
is_trivial_(false),
is_lvalue_(false),
- position_(RelocInfo::kNoPosition),
interface_(var->interface()) {
BindTo(var);
}
@@ -100,13 +99,12 @@ VariableProxy::VariableProxy(Isolate* isolate,
bool is_this,
Interface* interface,
int position)
- : Expression(isolate),
+ : Expression(isolate, position),
name_(name),
var_(NULL),
is_this_(is_this),
is_trivial_(false),
is_lvalue_(false),
- position_(position),
interface_(interface) {
// Names must be canonicalized for fast equality checks.
ASSERT(name->IsInternalizedString());
@@ -133,15 +131,14 @@ Assignment::Assignment(Isolate* isolate,
Expression* target,
Expression* value,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
target_(target),
value_(value),
- pos_(pos),
binary_operation_(NULL),
assignment_id_(GetNextId(isolate)),
- is_monomorphic_(false),
is_uninitialized_(false),
+ is_pre_monomorphic_(false),
store_mode_(STANDARD_STORE) { }
@@ -234,33 +231,6 @@ bool ObjectLiteral::Property::emit_store() {
}
-bool IsEqualString(void* first, void* second) {
- ASSERT((*reinterpret_cast<String**>(first))->IsString());
- ASSERT((*reinterpret_cast<String**>(second))->IsString());
- Handle<String> h1(reinterpret_cast<String**>(first));
- Handle<String> h2(reinterpret_cast<String**>(second));
- return (*h1)->Equals(*h2);
-}
-
-
-bool IsEqualNumber(void* first, void* second) {
- ASSERT((*reinterpret_cast<Object**>(first))->IsNumber());
- ASSERT((*reinterpret_cast<Object**>(second))->IsNumber());
-
- Handle<Object> h1(reinterpret_cast<Object**>(first));
- Handle<Object> h2(reinterpret_cast<Object**>(second));
- if (h1->IsSmi()) {
- return h2->IsSmi() && *h1 == *h2;
- }
- if (h2->IsSmi()) return false;
- Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1);
- Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2);
- ASSERT(std::isfinite(n1->value()));
- ASSERT(std::isfinite(n2->value()));
- return n1->value() == n2->value();
-}
-
-
void ObjectLiteral::CalculateEmitStore(Zone* zone) {
ZoneAllocationPolicy allocator(zone);
@@ -285,6 +255,170 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
}
+bool ObjectLiteral::IsBoilerplateProperty(ObjectLiteral::Property* property) {
+ return property != NULL &&
+ property->kind() != ObjectLiteral::Property::PROTOTYPE;
+}
+
+
+void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
+ if (!constant_properties_.is_null()) return;
+
+ // Allocate a fixed array to hold all the constant properties.
+ Handle<FixedArray> constant_properties = isolate->factory()->NewFixedArray(
+ boilerplate_properties_ * 2, TENURED);
+
+ int position = 0;
+ // Accumulate the value in local variables and store it at the end.
+ bool is_simple = true;
+ int depth_acc = 1;
+ uint32_t max_element_index = 0;
+ uint32_t elements = 0;
+ for (int i = 0; i < properties()->length(); i++) {
+ ObjectLiteral::Property* property = properties()->at(i);
+ if (!IsBoilerplateProperty(property)) {
+ is_simple = false;
+ continue;
+ }
+ MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
+ if (m_literal != NULL) {
+ m_literal->BuildConstants(isolate);
+ if (m_literal->depth() >= depth_acc) depth_acc = m_literal->depth() + 1;
+ }
+
+ // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
+ // value for COMPUTED properties, the real value is filled in at
+ // runtime. The enumeration order is maintained.
+ Handle<Object> key = property->key()->value();
+ Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
+
+ // Ensure objects that may, at any point in time, contain fields with double
+ // representation are always treated as nested objects. This is true for
+ // computed fields (value is undefined), and smi and double literals
+ // (value->IsNumber()).
+ // TODO(verwaest): Remove once we can store them inline.
+ if (FLAG_track_double_fields &&
+ (value->IsNumber() || value->IsUninitialized())) {
+ may_store_doubles_ = true;
+ }
+
+ is_simple = is_simple && !value->IsUninitialized();
+
+ // Keep track of the number of elements in the object literal and
+ // the largest element index. If the largest element index is
+ // much larger than the number of elements, creating an object
+ // literal with fast elements will be a waste of space.
+ uint32_t element_index = 0;
+ if (key->IsString()
+ && Handle<String>::cast(key)->AsArrayIndex(&element_index)
+ && element_index > max_element_index) {
+ max_element_index = element_index;
+ elements++;
+ } else if (key->IsSmi()) {
+ int key_value = Smi::cast(*key)->value();
+ if (key_value > 0
+ && static_cast<uint32_t>(key_value) > max_element_index) {
+ max_element_index = key_value;
+ }
+ elements++;
+ }
+
+ // Add name, value pair to the fixed array.
+ constant_properties->set(position++, *key);
+ constant_properties->set(position++, *value);
+ }
+
+ constant_properties_ = constant_properties;
+ fast_elements_ =
+ (max_element_index <= 32) || ((2 * elements) >= max_element_index);
+ set_is_simple(is_simple);
+ set_depth(depth_acc);
+}
+
+
+void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
+ if (!constant_elements_.is_null()) return;
+
+ // Allocate a fixed array to hold all the object literals.
+ Handle<JSArray> array =
+ isolate->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
+ isolate->factory()->SetElementsCapacityAndLength(
+ array, values()->length(), values()->length());
+
+ // Fill in the literals.
+ bool is_simple = true;
+ int depth_acc = 1;
+ bool is_holey = false;
+ for (int i = 0, n = values()->length(); i < n; i++) {
+ Expression* element = values()->at(i);
+ MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
+ if (m_literal != NULL) {
+ m_literal->BuildConstants(isolate);
+ if (m_literal->depth() + 1 > depth_acc) {
+ depth_acc = m_literal->depth() + 1;
+ }
+ }
+ Handle<Object> boilerplate_value = GetBoilerplateValue(element, isolate);
+ if (boilerplate_value->IsTheHole()) {
+ is_holey = true;
+ } else if (boilerplate_value->IsUninitialized()) {
+ is_simple = false;
+ JSObject::SetOwnElement(
+ array, i, handle(Smi::FromInt(0), isolate), kNonStrictMode);
+ } else {
+ JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode);
+ }
+ }
+
+ Handle<FixedArrayBase> element_values(array->elements());
+
+ // Simple and shallow arrays can be lazily copied, we transform the
+ // elements array to a copy-on-write array.
+ if (is_simple && depth_acc == 1 && values()->length() > 0 &&
+ array->HasFastSmiOrObjectElements()) {
+ element_values->set_map(isolate->heap()->fixed_cow_array_map());
+ }
+
+ // Remember both the literal's constant values as well as the ElementsKind
+ // in a 2-element FixedArray.
+ Handle<FixedArray> literals = isolate->factory()->NewFixedArray(2, TENURED);
+
+ ElementsKind kind = array->GetElementsKind();
+ kind = is_holey ? GetHoleyElementsKind(kind) : GetPackedElementsKind(kind);
+
+ literals->set(0, Smi::FromInt(kind));
+ literals->set(1, *element_values);
+
+ constant_elements_ = literals;
+ set_is_simple(is_simple);
+ set_depth(depth_acc);
+}
+
+
+Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
+ Isolate* isolate) {
+ if (expression->AsLiteral() != NULL) {
+ return expression->AsLiteral()->value();
+ }
+ if (CompileTimeValue::IsCompileTimeValue(expression)) {
+ return CompileTimeValue::GetValue(isolate, expression);
+ }
+ return isolate->factory()->uninitialized_value();
+}
+
+
+void MaterializedLiteral::BuildConstants(Isolate* isolate) {
+ if (IsArrayLiteral()) {
+ return AsArrayLiteral()->BuildConstantElements(isolate);
+ }
+ if (IsObjectLiteral()) {
+ return AsObjectLiteral()->BuildConstantProperties(isolate);
+ }
+ ASSERT(IsRegExpLiteral());
+ ASSERT(depth() >= 1); // Depth should be initialized.
+}
+
+
void TargetCollector::AddTarget(Label* target, Zone* zone) {
// Add the label to the collector, but discard duplicates.
int length = targets_.length();
@@ -440,100 +574,11 @@ bool FunctionDeclaration::IsInlineable() const {
// once we use the common type field in the AST consistently.
-void ForInStatement::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- for_in_type_ = static_cast<ForInType>(oracle->ForInType(this));
-}
-
-
void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
to_boolean_types_ = oracle->ToBooleanTypes(test_id());
}
-void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
- Zone* zone) {
- // Record type feedback from the oracle in the AST.
- is_uninitialized_ = oracle->LoadIsUninitialized(this);
- if (is_uninitialized_) return;
-
- is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
- receiver_types_.Clear();
- if (key()->IsPropertyName()) {
- FunctionPrototypeStub proto_stub(Code::LOAD_IC);
- StringLengthStub string_stub(Code::LOAD_IC, false);
- if (oracle->LoadIsStub(this, &string_stub)) {
- is_string_length_ = true;
- } else if (oracle->LoadIsStub(this, &proto_stub)) {
- is_function_prototype_ = true;
- } else {
- Literal* lit_key = key()->AsLiteral();
- ASSERT(lit_key != NULL && lit_key->value()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->value());
- oracle->LoadReceiverTypes(this, name, &receiver_types_);
- }
- } else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
- is_string_access_ = true;
- } else if (is_monomorphic_) {
- receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this),
- zone);
- } else if (oracle->LoadIsPolymorphic(this)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
- oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_);
- }
-}
-
-
-void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
- Zone* zone) {
- Property* prop = target()->AsProperty();
- ASSERT(prop != NULL);
- TypeFeedbackId id = AssignmentFeedbackId();
- is_uninitialized_ = oracle->StoreIsUninitialized(id);
- if (is_uninitialized_) return;
- is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
- receiver_types_.Clear();
- if (prop->key()->IsPropertyName()) {
- Literal* lit_key = prop->key()->AsLiteral();
- ASSERT(lit_key != NULL && lit_key->value()->IsString());
- Handle<String> name = Handle<String>::cast(lit_key->value());
- oracle->StoreReceiverTypes(this, name, &receiver_types_);
- } else if (is_monomorphic_) {
- // Record receiver type for monomorphic keyed stores.
- receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone);
- store_mode_ = oracle->GetStoreMode(id);
- } else if (oracle->StoreIsKeyedPolymorphic(id)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
- oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
- store_mode_ = oracle->GetStoreMode(id);
- }
-}
-
-
-void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
- Zone* zone) {
- TypeFeedbackId id = CountStoreFeedbackId();
- is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
- receiver_types_.Clear();
- if (is_monomorphic_) {
- // Record receiver type for monomorphic keyed stores.
- receiver_types_.Add(
- oracle->StoreMonomorphicReceiverType(id), zone);
- } else if (oracle->StoreIsKeyedPolymorphic(id)) {
- receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
- oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
- } else {
- oracle->CollectPolymorphicStoreReceiverTypes(id, &receiver_types_);
- }
- store_mode_ = oracle->GetStoreMode(id);
- type_ = oracle->IncrementType(this);
-}
-
-
-void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- compare_type_ = oracle->ClauseType(CompareId());
-}
-
-
bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
// If there is an interceptor, we can't compute the target for a direct call.
if (type->has_named_interceptor()) return false;
@@ -636,26 +681,27 @@ Handle<JSObject> Call::GetPrototypeForPrimitiveCheck(
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
CallKind call_kind) {
- is_monomorphic_ = oracle->CallIsMonomorphic(this);
+ is_monomorphic_ = oracle->CallIsMonomorphic(CallFeedbackId());
Property* property = expression()->AsProperty();
if (property == NULL) {
// Function call. Specialize for monomorphic calls.
- if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
- } else {
+ if (is_monomorphic_) target_ = oracle->GetCallTarget(CallFeedbackId());
+ } else if (property->key()->IsPropertyName()) {
// Method call. Specialize for the receiver types seen at runtime.
Literal* key = property->key()->AsLiteral();
ASSERT(key != NULL && key->value()->IsString());
Handle<String> name = Handle<String>::cast(key->value());
- check_type_ = oracle->GetCallCheckType(this);
+ check_type_ = oracle->GetCallCheckType(CallFeedbackId());
receiver_types_.Clear();
if (check_type_ == RECEIVER_MAP_CHECK) {
- oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
+ oracle->CallReceiverTypes(CallFeedbackId(),
+ name, arguments()->length(), call_kind, &receiver_types_);
is_monomorphic_ = is_monomorphic_ && receiver_types_.length() > 0;
} else {
holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
receiver_types_.Add(handle(holder_->map()), oracle->zone());
}
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
for (int i = 0; i < length; i++) {
@@ -668,15 +714,21 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Handle<Map> map = receiver_types_.first();
is_monomorphic_ = ComputeTarget(map, name);
}
+ } else {
+ if (is_monomorphic_) {
+ keyed_array_call_is_holey_ =
+ oracle->KeyedArrayCallIsHoley(CallFeedbackId());
+ }
}
}
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- allocation_info_cell_ = oracle->GetCallNewAllocationInfoCell(this);
- is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
+ allocation_info_cell_ =
+ oracle->GetCallNewAllocationInfoCell(CallNewFeedbackId());
+ is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackId());
if (is_monomorphic_) {
- target_ = oracle->GetCallNewTarget(this);
+ target_ = oracle->GetCallNewTarget(CallNewFeedbackId());
Object* value = allocation_info_cell_->value();
ASSERT(!value->IsTheHole());
if (value->IsAllocationSite()) {
@@ -688,9 +740,11 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this)
- ? oracle->GetObjectLiteralStoreMap(this)
- : Handle<Map>::null();
+ TypeFeedbackId id = key()->LiteralFeedbackId();
+ SmallMapList maps;
+ oracle->CollectReceiverTypes(id, &maps);
+ receiver_type_ = maps.length() == 1 ? maps.at(0)
+ : Handle<Map>::null();
}
@@ -1067,9 +1121,9 @@ CaseClause::CaseClause(Isolate* isolate,
Expression* label,
ZoneList<Statement*>* statements,
int pos)
- : label_(label),
+ : AstNode(pos),
+ label_(label),
statements_(statements),
- position_(pos),
compare_type_(Type::None(), isolate),
compare_id_(AstNode::GetNextId(isolate)),
entry_id_(AstNode::GetNextId(isolate)) {
@@ -1111,6 +1165,7 @@ REGULAR_NODE(ContinueStatement)
REGULAR_NODE(BreakStatement)
REGULAR_NODE(ReturnStatement)
REGULAR_NODE(SwitchStatement)
+REGULAR_NODE(CaseClause)
REGULAR_NODE(Conditional)
REGULAR_NODE(Literal)
REGULAR_NODE(ArrayLiteral)
@@ -1146,7 +1201,7 @@ DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
-DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
+DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
diff --git a/chromium/v8/src/ast.h b/chromium/v8/src/ast.h
index c63090687b9..4e413c5426e 100644
--- a/chromium/v8/src/ast.h
+++ b/chromium/v8/src/ast.h
@@ -97,7 +97,7 @@ namespace internal {
#define EXPRESSION_NODE_LIST(V) \
V(FunctionLiteral) \
- V(SharedFunctionInfoLiteral) \
+ V(NativeFunctionLiteral) \
V(Conditional) \
V(VariableProxy) \
V(Literal) \
@@ -117,11 +117,15 @@ namespace internal {
V(CompareOperation) \
V(ThisFunction)
+#define AUXILIARY_NODE_LIST(V) \
+ V(CaseClause)
+
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
MODULE_NODE_LIST(V) \
STATEMENT_NODE_LIST(V) \
- EXPRESSION_NODE_LIST(V)
+ EXPRESSION_NODE_LIST(V) \
+ AUXILIARY_NODE_LIST(V)
// Forward declarations
class AstConstructionVisitor;
@@ -206,12 +210,12 @@ class AstNode: public ZoneObject {
return zone->New(static_cast<int>(size));
}
- AstNode() {}
-
+ explicit AstNode(int position): position_(position) {}
virtual ~AstNode() {}
virtual void Accept(AstVisitor* v) = 0;
virtual NodeType node_type() const = 0;
+ int position() const { return position_; }
// Type testing & conversion functions overridden by concrete subclasses.
#define DECLARE_NODE_FUNCTIONS(type) \
@@ -248,21 +252,17 @@ class AstNode: public ZoneObject {
void* operator new(size_t size);
friend class CaseClause; // Generates AST IDs.
+
+ int position_;
};
class Statement : public AstNode {
public:
- Statement() : statement_pos_(RelocInfo::kNoPosition) {}
+ explicit Statement(int position) : AstNode(position) {}
bool IsEmpty() { return AsEmptyStatement() != NULL; }
virtual bool IsJump() const { return false; }
-
- void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
- int statement_pos() const { return statement_pos_; }
-
- private:
- int statement_pos_;
};
@@ -279,9 +279,8 @@ class SmallMapList V8_FINAL {
int length() const { return list_.length(); }
void AddMapIfMissing(Handle<Map> map, Zone* zone) {
- Map* updated = map->CurrentMapForDeprecated();
- if (updated == NULL) return;
- map = Handle<Map>(updated);
+ map = Map::CurrentMapForDeprecated(map);
+ if (map.is_null()) return;
for (int i = 0; i < length(); ++i) {
if (at(i).is_identical_to(map)) return;
}
@@ -329,11 +328,6 @@ class Expression : public AstNode {
kTest
};
- virtual int position() const {
- UNREACHABLE();
- return 0;
- }
-
virtual bool IsValidLeftHandSide() { return false; }
// Helpers for ToBoolean conversion.
@@ -387,8 +381,9 @@ class Expression : public AstNode {
TypeFeedbackId test_id() const { return test_id_; }
protected:
- explicit Expression(Isolate* isolate)
- : bounds_(Bounds::Unbounded(isolate)),
+ Expression(Isolate* isolate, int pos)
+ : AstNode(pos),
+ bounds_(Bounds::Unbounded(isolate)),
id_(GetNextId(isolate)),
test_id_(GetNextId(isolate)) {}
void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
@@ -431,8 +426,10 @@ class BreakableStatement : public Statement {
protected:
BreakableStatement(
- Isolate* isolate, ZoneStringList* labels, BreakableType breakable_type)
- : labels_(labels),
+ Isolate* isolate, ZoneStringList* labels,
+ BreakableType breakable_type, int position)
+ : Statement(position),
+ labels_(labels),
breakable_type_(breakable_type),
entry_id_(GetNextId(isolate)),
exit_id_(GetNextId(isolate)) {
@@ -473,8 +470,9 @@ class Block V8_FINAL : public BreakableStatement {
ZoneStringList* labels,
int capacity,
bool is_initializer_block,
+ int pos,
Zone* zone)
- : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
+ : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY, pos),
statements_(capacity, zone),
is_initializer_block_(is_initializer_block),
scope_(NULL) {
@@ -498,8 +496,10 @@ class Declaration : public AstNode {
protected:
Declaration(VariableProxy* proxy,
VariableMode mode,
- Scope* scope)
- : proxy_(proxy),
+ Scope* scope,
+ int pos)
+ : AstNode(pos),
+ proxy_(proxy),
mode_(mode),
scope_(scope) {
ASSERT(IsDeclaredVariableMode(mode));
@@ -525,8 +525,9 @@ class VariableDeclaration V8_FINAL : public Declaration {
protected:
VariableDeclaration(VariableProxy* proxy,
VariableMode mode,
- Scope* scope)
- : Declaration(proxy, mode, scope) {
+ Scope* scope,
+ int pos)
+ : Declaration(proxy, mode, scope, pos) {
}
};
@@ -545,8 +546,9 @@ class FunctionDeclaration V8_FINAL : public Declaration {
FunctionDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
- Scope* scope)
- : Declaration(proxy, mode, scope),
+ Scope* scope,
+ int pos)
+ : Declaration(proxy, mode, scope, pos),
fun_(fun) {
// At the moment there are no "const functions" in JavaScript...
ASSERT(mode == VAR || mode == LET);
@@ -570,8 +572,9 @@ class ModuleDeclaration V8_FINAL : public Declaration {
protected:
ModuleDeclaration(VariableProxy* proxy,
Module* module,
- Scope* scope)
- : Declaration(proxy, MODULE, scope),
+ Scope* scope,
+ int pos)
+ : Declaration(proxy, MODULE, scope, pos),
module_(module) {
}
@@ -592,8 +595,9 @@ class ImportDeclaration V8_FINAL : public Declaration {
protected:
ImportDeclaration(VariableProxy* proxy,
Module* module,
- Scope* scope)
- : Declaration(proxy, LET, scope),
+ Scope* scope,
+ int pos)
+ : Declaration(proxy, LET, scope, pos),
module_(module) {
}
@@ -611,8 +615,8 @@ class ExportDeclaration V8_FINAL : public Declaration {
}
protected:
- ExportDeclaration(VariableProxy* proxy, Scope* scope)
- : Declaration(proxy, LET, scope) {}
+ ExportDeclaration(VariableProxy* proxy, Scope* scope, int pos)
+ : Declaration(proxy, LET, scope, pos) {}
};
@@ -622,11 +626,13 @@ class Module : public AstNode {
Block* body() const { return body_; }
protected:
- explicit Module(Zone* zone)
- : interface_(Interface::NewModule(zone)),
+ Module(Zone* zone, int pos)
+ : AstNode(pos),
+ interface_(Interface::NewModule(zone)),
body_(NULL) {}
- explicit Module(Interface* interface, Block* body = NULL)
- : interface_(interface),
+ Module(Interface* interface, int pos, Block* body = NULL)
+ : AstNode(pos),
+ interface_(interface),
body_(body) {}
private:
@@ -640,7 +646,8 @@ class ModuleLiteral V8_FINAL : public Module {
DECLARE_NODE_TYPE(ModuleLiteral)
protected:
- ModuleLiteral(Block* body, Interface* interface) : Module(interface, body) {}
+ ModuleLiteral(Block* body, Interface* interface, int pos)
+ : Module(interface, pos, body) {}
};
@@ -651,7 +658,7 @@ class ModuleVariable V8_FINAL : public Module {
VariableProxy* proxy() const { return proxy_; }
protected:
- inline explicit ModuleVariable(VariableProxy* proxy);
+ inline ModuleVariable(VariableProxy* proxy, int pos);
private:
VariableProxy* proxy_;
@@ -666,8 +673,8 @@ class ModulePath V8_FINAL : public Module {
Handle<String> name() const { return name_; }
protected:
- ModulePath(Module* module, Handle<String> name, Zone* zone)
- : Module(zone),
+ ModulePath(Module* module, Handle<String> name, Zone* zone, int pos)
+ : Module(zone, pos),
module_(module),
name_(name) {
}
@@ -685,8 +692,8 @@ class ModuleUrl V8_FINAL : public Module {
Handle<String> url() const { return url_; }
protected:
- ModuleUrl(Handle<String> url, Zone* zone)
- : Module(zone), url_(url) {
+ ModuleUrl(Handle<String> url, Zone* zone, int pos)
+ : Module(zone, pos), url_(url) {
}
private:
@@ -702,8 +709,9 @@ class ModuleStatement V8_FINAL : public Statement {
Block* body() const { return body_; }
protected:
- ModuleStatement(VariableProxy* proxy, Block* body)
- : proxy_(proxy),
+ ModuleStatement(VariableProxy* proxy, Block* body, int pos)
+ : Statement(pos),
+ proxy_(proxy),
body_(body) {
}
@@ -730,8 +738,8 @@ class IterationStatement : public BreakableStatement {
Label* continue_target() { return &continue_target_; }
protected:
- IterationStatement(Isolate* isolate, ZoneStringList* labels)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+ IterationStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS, pos),
body_(NULL),
osr_entry_id_(GetNextId(isolate)) {
}
@@ -759,20 +767,14 @@ class DoWhileStatement V8_FINAL : public IterationStatement {
Expression* cond() const { return cond_; }
- // Position where condition expression starts. We need it to make
- // the loop's condition a breakable location.
- int condition_position() { return condition_position_; }
- void set_condition_position(int pos) { condition_position_ = pos; }
-
virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; }
virtual BailoutId StackCheckId() const V8_OVERRIDE { return back_edge_id_; }
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
+ DoWhileStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : IterationStatement(isolate, labels, pos),
cond_(NULL),
- condition_position_(-1),
continue_id_(GetNextId(isolate)),
back_edge_id_(GetNextId(isolate)) {
}
@@ -780,8 +782,6 @@ class DoWhileStatement V8_FINAL : public IterationStatement {
private:
Expression* cond_;
- int condition_position_;
-
const BailoutId continue_id_;
const BailoutId back_edge_id_;
};
@@ -809,8 +809,8 @@ class WhileStatement V8_FINAL : public IterationStatement {
BailoutId BodyId() const { return body_id_; }
protected:
- WhileStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
+ WhileStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : IterationStatement(isolate, labels, pos),
cond_(NULL),
may_have_function_literal_(true),
body_id_(GetNextId(isolate)) {
@@ -860,8 +860,8 @@ class ForStatement V8_FINAL : public IterationStatement {
void set_loop_variable(Variable* var) { loop_variable_ = var; }
protected:
- ForStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
+ ForStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : IterationStatement(isolate, labels, pos),
init_(NULL),
cond_(NULL),
next_(NULL),
@@ -902,8 +902,8 @@ class ForEachStatement : public IterationStatement {
Expression* subject() const { return subject_; }
protected:
- ForEachStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
+ ForEachStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : IterationStatement(isolate, labels, pos),
each_(NULL),
subject_(NULL) {
}
@@ -923,9 +923,9 @@ class ForInStatement V8_FINAL : public ForEachStatement {
}
TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
ForInType for_in_type() const { return for_in_type_; }
+ void set_for_in_type(ForInType type) { for_in_type_ = type; }
BailoutId BodyId() const { return body_id_; }
BailoutId PrepareId() const { return prepare_id_; }
@@ -933,8 +933,8 @@ class ForInStatement V8_FINAL : public ForEachStatement {
virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
protected:
- ForInStatement(Isolate* isolate, ZoneStringList* labels)
- : ForEachStatement(isolate, labels),
+ ForInStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : ForEachStatement(isolate, labels, pos),
for_in_type_(SLOW_FOR_IN),
body_id_(GetNextId(isolate)),
prepare_id_(GetNextId(isolate)) {
@@ -994,8 +994,8 @@ class ForOfStatement V8_FINAL : public ForEachStatement {
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
- ForOfStatement(Isolate* isolate, ZoneStringList* labels)
- : ForEachStatement(isolate, labels),
+ ForOfStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : ForEachStatement(isolate, labels, pos),
assign_iterator_(NULL),
next_result_(NULL),
result_done_(NULL),
@@ -1020,8 +1020,8 @@ class ExpressionStatement V8_FINAL : public Statement {
virtual bool IsJump() const V8_OVERRIDE { return expression_->IsThrow(); }
protected:
- explicit ExpressionStatement(Expression* expression)
- : expression_(expression) { }
+ ExpressionStatement(Expression* expression, int pos)
+ : Statement(pos), expression_(expression) { }
private:
Expression* expression_;
@@ -1033,7 +1033,7 @@ class JumpStatement : public Statement {
virtual bool IsJump() const V8_FINAL V8_OVERRIDE { return true; }
protected:
- JumpStatement() {}
+ explicit JumpStatement(int pos) : Statement(pos) {}
};
@@ -1044,8 +1044,8 @@ class ContinueStatement V8_FINAL : public JumpStatement {
IterationStatement* target() const { return target_; }
protected:
- explicit ContinueStatement(IterationStatement* target)
- : target_(target) { }
+ explicit ContinueStatement(IterationStatement* target, int pos)
+ : JumpStatement(pos), target_(target) { }
private:
IterationStatement* target_;
@@ -1059,8 +1059,8 @@ class BreakStatement V8_FINAL : public JumpStatement {
BreakableStatement* target() const { return target_; }
protected:
- explicit BreakStatement(BreakableStatement* target)
- : target_(target) { }
+ explicit BreakStatement(BreakableStatement* target, int pos)
+ : JumpStatement(pos), target_(target) { }
private:
BreakableStatement* target_;
@@ -1074,8 +1074,8 @@ class ReturnStatement V8_FINAL : public JumpStatement {
Expression* expression() const { return expression_; }
protected:
- explicit ReturnStatement(Expression* expression)
- : expression_(expression) { }
+ explicit ReturnStatement(Expression* expression, int pos)
+ : JumpStatement(pos), expression_(expression) { }
private:
Expression* expression_;
@@ -1091,8 +1091,10 @@ class WithStatement V8_FINAL : public Statement {
Statement* statement() const { return statement_; }
protected:
- WithStatement(Scope* scope, Expression* expression, Statement* statement)
- : scope_(scope),
+ WithStatement(
+ Scope* scope, Expression* expression, Statement* statement, int pos)
+ : Statement(pos),
+ scope_(scope),
expression_(expression),
statement_(statement) { }
@@ -1103,12 +1105,9 @@ class WithStatement V8_FINAL : public Statement {
};
-class CaseClause V8_FINAL : public ZoneObject {
+class CaseClause V8_FINAL : public AstNode {
public:
- CaseClause(Isolate* isolate,
- Expression* label,
- ZoneList<Statement*>* statements,
- int pos);
+ DECLARE_NODE_TYPE(CaseClause)
bool is_default() const { return label_ == NULL; }
Expression* label() const {
@@ -1118,21 +1117,22 @@ class CaseClause V8_FINAL : public ZoneObject {
Label* body_target() { return &body_target_; }
ZoneList<Statement*>* statements() const { return statements_; }
- int position() const { return position_; }
- void set_position(int pos) { position_ = pos; }
-
BailoutId EntryId() const { return entry_id_; }
// Type feedback information.
TypeFeedbackId CompareId() { return compare_id_; }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
Handle<Type> compare_type() { return compare_type_; }
+ void set_compare_type(Handle<Type> type) { compare_type_ = type; }
private:
+ CaseClause(Isolate* isolate,
+ Expression* label,
+ ZoneList<Statement*>* statements,
+ int pos);
+
Expression* label_;
Label body_target_;
ZoneList<Statement*>* statements_;
- int position_;
Handle<Type> compare_type_;
const TypeFeedbackId compare_id_;
@@ -1158,8 +1158,8 @@ class SwitchStatement V8_FINAL : public BreakableStatement {
void set_switch_type(SwitchType switch_type) { switch_type_ = switch_type; }
protected:
- SwitchStatement(Isolate* isolate, ZoneStringList* labels)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+ SwitchStatement(Isolate* isolate, ZoneStringList* labels, int pos)
+ : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS, pos),
tag_(NULL),
cases_(NULL) { }
@@ -1199,8 +1199,10 @@ class IfStatement V8_FINAL : public Statement {
IfStatement(Isolate* isolate,
Expression* condition,
Statement* then_statement,
- Statement* else_statement)
- : condition_(condition),
+ Statement* else_statement,
+ int pos)
+ : Statement(pos),
+ condition_(condition),
then_statement_(then_statement),
else_statement_(else_statement),
if_id_(GetNextId(isolate)),
@@ -1222,7 +1224,8 @@ class IfStatement V8_FINAL : public Statement {
// stack in the compiler; this should probably be reworked.
class TargetCollector V8_FINAL : public AstNode {
public:
- explicit TargetCollector(Zone* zone) : targets_(0, zone) { }
+ explicit TargetCollector(Zone* zone)
+ : AstNode(RelocInfo::kNoPosition), targets_(0, zone) { }
// Adds a jump target to the collector. The collector stores a pointer not
// a copy of the target to make binding work, so make sure not to pass in
@@ -1252,8 +1255,9 @@ class TryStatement : public Statement {
ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
protected:
- TryStatement(int index, Block* try_block)
- : index_(index),
+ TryStatement(int index, Block* try_block, int pos)
+ : Statement(pos),
+ index_(index),
try_block_(try_block),
escaping_targets_(NULL) { }
@@ -1279,8 +1283,9 @@ class TryCatchStatement V8_FINAL : public TryStatement {
Block* try_block,
Scope* scope,
Variable* variable,
- Block* catch_block)
- : TryStatement(index, try_block),
+ Block* catch_block,
+ int pos)
+ : TryStatement(index, try_block, pos),
scope_(scope),
variable_(variable),
catch_block_(catch_block) {
@@ -1300,8 +1305,9 @@ class TryFinallyStatement V8_FINAL : public TryStatement {
Block* finally_block() const { return finally_block_; }
protected:
- TryFinallyStatement(int index, Block* try_block, Block* finally_block)
- : TryStatement(index, try_block),
+ TryFinallyStatement(
+ int index, Block* try_block, Block* finally_block, int pos)
+ : TryStatement(index, try_block, pos),
finally_block_(finally_block) { }
private:
@@ -1314,7 +1320,7 @@ class DebuggerStatement V8_FINAL : public Statement {
DECLARE_NODE_TYPE(DebuggerStatement)
protected:
- DebuggerStatement() {}
+ explicit DebuggerStatement(int pos): Statement(pos) {}
};
@@ -1323,7 +1329,7 @@ class EmptyStatement V8_FINAL : public Statement {
DECLARE_NODE_TYPE(EmptyStatement)
protected:
- EmptyStatement() {}
+ explicit EmptyStatement(int pos): Statement(pos) {}
};
@@ -1380,8 +1386,9 @@ class Literal V8_FINAL : public Expression {
TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
protected:
- Literal(Isolate* isolate, Handle<Object> value)
- : Expression(isolate),
+ Literal(
+ Isolate* isolate, Handle<Object> value, int position)
+ : Expression(isolate, position),
value_(value),
isolate_(isolate) { }
@@ -1401,21 +1408,43 @@ class MaterializedLiteral : public Expression {
int literal_index() { return literal_index_; }
- // A materialized literal is simple if the values consist of only
- // constants and simple object and array literals.
- bool is_simple() const { return is_simple_; }
-
- int depth() const { return depth_; }
+ int depth() const {
+ // only callable after initialization.
+ ASSERT(depth_ >= 1);
+ return depth_;
+ }
protected:
MaterializedLiteral(Isolate* isolate,
int literal_index,
- bool is_simple,
- int depth)
- : Expression(isolate),
+ int pos)
+ : Expression(isolate, pos),
literal_index_(literal_index),
- is_simple_(is_simple),
- depth_(depth) {}
+ is_simple_(false),
+ depth_(0) {}
+
+ // A materialized literal is simple if the values consist of only
+ // constants and simple object and array literals.
+ bool is_simple() const { return is_simple_; }
+ void set_is_simple(bool is_simple) { is_simple_ = is_simple; }
+ friend class CompileTimeValue;
+
+ void set_depth(int depth) {
+ ASSERT(depth >= 1);
+ depth_ = depth;
+ }
+
+ // Populate the constant properties/elements fixed array.
+ void BuildConstants(Isolate* isolate);
+ friend class ArrayLiteral;
+ friend class ObjectLiteral;
+
+ // If the expression is a literal, return the literal value;
+ // if the expression is a materialized literal and is simple return a
+ // compile time value as encoded by CompileTimeValue::GetValue().
+ // Otherwise, return undefined literal as the placeholder
+ // in the object literal boilerplate.
+ Handle<Object> GetBoilerplateValue(Expression* expression, Isolate* isolate);
private:
int literal_index_;
@@ -1484,6 +1513,12 @@ class ObjectLiteral V8_FINAL : public MaterializedLiteral {
bool may_store_doubles() const { return may_store_doubles_; }
bool has_function() const { return has_function_; }
+ // Decide if a property should be in the object boilerplate.
+ static bool IsBoilerplateProperty(Property* property);
+
+ // Populate the constant properties fixed array.
+ void BuildConstantProperties(Isolate* isolate);
+
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
@@ -1503,24 +1538,22 @@ class ObjectLiteral V8_FINAL : public MaterializedLiteral {
protected:
ObjectLiteral(Isolate* isolate,
- Handle<FixedArray> constant_properties,
ZoneList<Property*>* properties,
int literal_index,
- bool is_simple,
- bool fast_elements,
- int depth,
- bool may_store_doubles,
- bool has_function)
- : MaterializedLiteral(isolate, literal_index, is_simple, depth),
- constant_properties_(constant_properties),
+ int boilerplate_properties,
+ bool has_function,
+ int pos)
+ : MaterializedLiteral(isolate, literal_index, pos),
properties_(properties),
- fast_elements_(fast_elements),
- may_store_doubles_(may_store_doubles),
+ boilerplate_properties_(boilerplate_properties),
+ fast_elements_(false),
+ may_store_doubles_(false),
has_function_(has_function) {}
private:
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
+ int boilerplate_properties_;
bool fast_elements_;
bool may_store_doubles_;
bool has_function_;
@@ -1539,16 +1572,20 @@ class RegExpLiteral V8_FINAL : public MaterializedLiteral {
RegExpLiteral(Isolate* isolate,
Handle<String> pattern,
Handle<String> flags,
- int literal_index)
- : MaterializedLiteral(isolate, literal_index, false, 1),
+ int literal_index,
+ int pos)
+ : MaterializedLiteral(isolate, literal_index, pos),
pattern_(pattern),
- flags_(flags) {}
+ flags_(flags) {
+ set_depth(1);
+ }
private:
Handle<String> pattern_;
Handle<String> flags_;
};
+
// An array literal has a literals object that is used
// for minimizing the work when constructing it at runtime.
class ArrayLiteral V8_FINAL : public MaterializedLiteral {
@@ -1563,15 +1600,21 @@ class ArrayLiteral V8_FINAL : public MaterializedLiteral {
return BailoutId(first_element_id_.ToInt() + i);
}
+ // Populate the constant elements fixed array.
+ void BuildConstantElements(Isolate* isolate);
+
+ enum Flags {
+ kNoFlags = 0,
+ kShallowElements = 1,
+ kDisableMementos = 1 << 1
+ };
+
protected:
ArrayLiteral(Isolate* isolate,
- Handle<FixedArray> constant_elements,
ZoneList<Expression*>* values,
int literal_index,
- bool is_simple,
- int depth)
- : MaterializedLiteral(isolate, literal_index, is_simple, depth),
- constant_elements_(constant_elements),
+ int pos)
+ : MaterializedLiteral(isolate, literal_index, pos),
values_(values),
first_element_id_(ReserveIdRange(isolate, values->length())) {}
@@ -1603,7 +1646,6 @@ class VariableProxy V8_FINAL : public Expression {
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
- int position() const { return position_; }
Interface* interface() const { return interface_; }
@@ -1614,7 +1656,7 @@ class VariableProxy V8_FINAL : public Expression {
void BindTo(Variable* var);
protected:
- VariableProxy(Isolate* isolate, Variable* var);
+ VariableProxy(Isolate* isolate, Variable* var, int position);
VariableProxy(Isolate* isolate,
Handle<String> name,
@@ -1629,7 +1671,6 @@ class VariableProxy V8_FINAL : public Expression {
// True if this variable proxy is being used in an assignment
// or with a increment/decrement operator.
bool is_lvalue_;
- int position_;
Interface* interface_;
};
@@ -1642,17 +1683,16 @@ class Property V8_FINAL : public Expression {
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
BailoutId LoadId() const { return load_id_; }
- bool IsStringLength() const { return is_string_length_; }
bool IsStringAccess() const { return is_string_access_; }
bool IsFunctionPrototype() const { return is_function_prototype_; }
// Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE {
+ return receiver_types_.length() == 1;
+ }
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
return &receiver_types_;
}
@@ -1660,6 +1700,15 @@ class Property V8_FINAL : public Expression {
return STANDARD_STORE;
}
bool IsUninitialized() { return is_uninitialized_; }
+ bool IsPreMonomorphic() { return is_pre_monomorphic_; }
+ bool HasNoTypeInformation() {
+ return is_uninitialized_ || is_pre_monomorphic_;
+ }
+ void set_is_uninitialized(bool b) { is_uninitialized_ = b; }
+ void set_is_pre_monomorphic(bool b) { is_pre_monomorphic_ = b; }
+ void set_is_string_access(bool b) { is_string_access_ = b; }
+ void set_is_function_prototype(bool b) { is_function_prototype_ = b; }
+
TypeFeedbackId PropertyFeedbackId() { return reuse(id()); }
protected:
@@ -1667,27 +1716,23 @@ class Property V8_FINAL : public Expression {
Expression* obj,
Expression* key,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
obj_(obj),
key_(key),
- pos_(pos),
load_id_(GetNextId(isolate)),
- is_monomorphic_(false),
+ is_pre_monomorphic_(false),
is_uninitialized_(false),
- is_string_length_(false),
is_string_access_(false),
is_function_prototype_(false) { }
private:
Expression* obj_;
Expression* key_;
- int pos_;
const BailoutId load_id_;
SmallMapList receiver_types_;
- bool is_monomorphic_ : 1;
+ bool is_pre_monomorphic_ : 1;
bool is_uninitialized_ : 1;
- bool is_string_length_ : 1;
bool is_string_access_ : 1;
bool is_function_prototype_ : 1;
};
@@ -1699,7 +1744,6 @@ class Call V8_FINAL : public Expression {
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- virtual int position() const V8_FINAL { return pos_; }
// Type feedback information.
TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
@@ -1708,6 +1752,7 @@ class Call V8_FINAL : public Expression {
return &receiver_types_;
}
virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ bool KeyedArrayCallIsHoley() { return keyed_array_call_is_holey_; }
CheckType check_type() const { return check_type_; }
void set_string_check(Handle<JSObject> holder) {
@@ -1754,20 +1799,20 @@ class Call V8_FINAL : public Expression {
Expression* expression,
ZoneList<Expression*>* arguments,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
expression_(expression),
arguments_(arguments),
- pos_(pos),
is_monomorphic_(false),
+ keyed_array_call_is_holey_(true),
check_type_(RECEIVER_MAP_CHECK),
return_id_(GetNextId(isolate)) { }
private:
Expression* expression_;
ZoneList<Expression*>* arguments_;
- int pos_;
bool is_monomorphic_;
+ bool keyed_array_call_is_holey_;
CheckType check_type_;
SmallMapList receiver_types_;
Handle<JSFunction> target_;
@@ -1784,7 +1829,6 @@ class CallNew V8_FINAL : public Expression {
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
// Type feedback information.
TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
@@ -1803,10 +1847,9 @@ class CallNew V8_FINAL : public Expression {
Expression* expression,
ZoneList<Expression*>* arguments,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
expression_(expression),
arguments_(arguments),
- pos_(pos),
is_monomorphic_(false),
elements_kind_(GetInitialFastElementsKind()),
return_id_(GetNextId(isolate)) { }
@@ -1814,7 +1857,6 @@ class CallNew V8_FINAL : public Expression {
private:
Expression* expression_;
ZoneList<Expression*>* arguments_;
- int pos_;
bool is_monomorphic_;
Handle<JSFunction> target_;
@@ -1844,8 +1886,9 @@ class CallRuntime V8_FINAL : public Expression {
CallRuntime(Isolate* isolate,
Handle<String> name,
const Runtime::Function* function,
- ZoneList<Expression*>* arguments)
- : Expression(isolate),
+ ZoneList<Expression*>* arguments,
+ int pos)
+ : Expression(isolate, pos),
name_(name),
function_(function),
arguments_(arguments) { }
@@ -1863,7 +1906,6 @@ class UnaryOperation V8_FINAL : public Expression {
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
BailoutId MaterializeTrueId() { return materialize_true_id_; }
BailoutId MaterializeFalseId() { return materialize_false_id_; }
@@ -1876,10 +1918,9 @@ class UnaryOperation V8_FINAL : public Expression {
Token::Value op,
Expression* expression,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
expression_(expression),
- pos_(pos),
materialize_true_id_(GetNextId(isolate)),
materialize_false_id_(GetNextId(isolate)) {
ASSERT(Token::IsUnaryOp(op));
@@ -1888,7 +1929,6 @@ class UnaryOperation V8_FINAL : public Expression {
private:
Token::Value op_;
Expression* expression_;
- int pos_;
// For unary not (Token::NOT), the AST ids where true and false will
// actually be materialized, respectively.
@@ -1906,7 +1946,6 @@ class BinaryOperation V8_FINAL : public Expression {
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
BailoutId RightId() const { return right_id_; }
@@ -1923,11 +1962,10 @@ class BinaryOperation V8_FINAL : public Expression {
Expression* left,
Expression* right,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
left_(left),
right_(right),
- pos_(pos),
right_id_(GetNextId(isolate)) {
ASSERT(Token::IsBinaryOp(op));
}
@@ -1936,7 +1974,6 @@ class BinaryOperation V8_FINAL : public Expression {
Token::Value op_;
Expression* left_;
Expression* right_;
- int pos_;
// TODO(rossberg): the fixed arg should probably be represented as a Constant
// type for the RHS.
@@ -1961,17 +1998,19 @@ class CountOperation V8_FINAL : public Expression {
}
Expression* expression() const { return expression_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE {
+ return receiver_types_.length() == 1;
+ }
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
return &receiver_types_;
}
virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
- TypeInfo type() const { return type_; }
+ Handle<Type> type() const { return type_; }
+ void set_store_mode(KeyedAccessStoreMode mode) { store_mode_ = mode; }
+ void set_type(Handle<Type> type) { type_ = type; }
BailoutId AssignmentId() const { return assignment_id_; }
@@ -1984,26 +2023,22 @@ class CountOperation V8_FINAL : public Expression {
bool is_prefix,
Expression* expr,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
is_prefix_(is_prefix),
- is_monomorphic_(false),
store_mode_(STANDARD_STORE),
expression_(expr),
- pos_(pos),
assignment_id_(GetNextId(isolate)),
count_id_(GetNextId(isolate)) {}
private:
Token::Value op_;
bool is_prefix_ : 1;
- bool is_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
- TypeInfo type_;
+ Handle<Type> type_;
Expression* expression_;
- int pos_;
const BailoutId assignment_id_;
const TypeFeedbackId count_id_;
SmallMapList receiver_types_;
@@ -2017,7 +2052,6 @@ class CompareOperation V8_FINAL : public Expression {
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
@@ -2035,12 +2069,11 @@ class CompareOperation V8_FINAL : public Expression {
Expression* left,
Expression* right,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
op_(op),
left_(left),
right_(right),
- pos_(pos),
- combined_type_(Type::Null(), isolate) {
+ combined_type_(Type::None(), isolate) {
ASSERT(Token::IsCompareOp(op));
}
@@ -2048,7 +2081,6 @@ class CompareOperation V8_FINAL : public Expression {
Token::Value op_;
Expression* left_;
Expression* right_;
- int pos_;
Handle<Type> combined_type_;
};
@@ -2062,9 +2094,6 @@ class Conditional V8_FINAL : public Expression {
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
- int then_expression_position() const { return then_expression_position_; }
- int else_expression_position() const { return else_expression_position_; }
-
BailoutId ThenId() const { return then_id_; }
BailoutId ElseId() const { return else_id_; }
@@ -2073,14 +2102,11 @@ class Conditional V8_FINAL : public Expression {
Expression* condition,
Expression* then_expression,
Expression* else_expression,
- int then_expression_position,
- int else_expression_position)
- : Expression(isolate),
+ int position)
+ : Expression(isolate, position),
condition_(condition),
then_expression_(then_expression),
else_expression_(else_expression),
- then_expression_position_(then_expression_position),
- else_expression_position_(else_expression_position),
then_id_(GetNextId(isolate)),
else_id_(GetNextId(isolate)) { }
@@ -2088,8 +2114,6 @@ class Conditional V8_FINAL : public Expression {
Expression* condition_;
Expression* then_expression_;
Expression* else_expression_;
- int then_expression_position_;
- int else_expression_position_;
const BailoutId then_id_;
const BailoutId else_id_;
};
@@ -2106,7 +2130,6 @@ class Assignment V8_FINAL : public Expression {
Token::Value op() const { return op_; }
Expression* target() const { return target_; }
Expression* value() const { return value_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
BinaryOperation* binary_operation() const { return binary_operation_; }
// This check relies on the definition order of token in token.h.
@@ -2116,15 +2139,23 @@ class Assignment V8_FINAL : public Expression {
// Type feedback information.
TypeFeedbackId AssignmentFeedbackId() { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE {
+ return receiver_types_.length() == 1;
+ }
bool IsUninitialized() { return is_uninitialized_; }
+ bool IsPreMonomorphic() { return is_pre_monomorphic_; }
+ bool HasNoTypeInformation() {
+ return is_uninitialized_ || is_pre_monomorphic_;
+ }
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
return &receiver_types_;
}
virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
+ void set_is_uninitialized(bool b) { is_uninitialized_ = b; }
+ void set_is_pre_monomorphic(bool b) { is_pre_monomorphic_ = b; }
+ void set_store_mode(KeyedAccessStoreMode mode) { store_mode_ = mode; }
protected:
Assignment(Isolate* isolate,
@@ -2137,8 +2168,8 @@ class Assignment V8_FINAL : public Expression {
void Init(Isolate* isolate, AstNodeFactory<Visitor>* factory) {
ASSERT(Token::IsAssignmentOp(op_));
if (is_compound()) {
- binary_operation_ =
- factory->NewBinaryOperation(binary_op(), target_, value_, pos_ + 1);
+ binary_operation_ = factory->NewBinaryOperation(
+ binary_op(), target_, value_, position() + 1);
}
}
@@ -2146,12 +2177,11 @@ class Assignment V8_FINAL : public Expression {
Token::Value op_;
Expression* target_;
Expression* value_;
- int pos_;
BinaryOperation* binary_operation_;
const BailoutId assignment_id_;
- bool is_monomorphic_ : 1;
bool is_uninitialized_ : 1;
+ bool is_pre_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
SmallMapList receiver_types_;
@@ -2172,7 +2202,6 @@ class Yield V8_FINAL : public Expression {
Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; }
Kind yield_kind() const { return yield_kind_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
// Delegating yield surrounds the "yield" in a "try/catch". This index
// locates the catch handler in the handler table, and is equivalent to
@@ -2192,19 +2221,17 @@ class Yield V8_FINAL : public Expression {
Expression* expression,
Kind yield_kind,
int pos)
- : Expression(isolate),
+ : Expression(isolate, pos),
generator_object_(generator_object),
expression_(expression),
yield_kind_(yield_kind),
- index_(-1),
- pos_(pos) { }
+ index_(-1) { }
private:
Expression* generator_object_;
Expression* expression_;
Kind yield_kind_;
int index_;
- int pos_;
};
@@ -2213,15 +2240,13 @@ class Throw V8_FINAL : public Expression {
DECLARE_NODE_TYPE(Throw)
Expression* exception() const { return exception_; }
- virtual int position() const V8_OVERRIDE { return pos_; }
protected:
Throw(Isolate* isolate, Expression* exception, int pos)
- : Expression(isolate), exception_(exception), pos_(pos) {}
+ : Expression(isolate, pos), exception_(exception) {}
private:
Expression* exception_;
- int pos_;
};
@@ -2336,8 +2361,9 @@ class FunctionLiteral V8_FINAL : public Expression {
ParameterFlag has_duplicate_parameters,
IsFunctionFlag is_function,
IsParenthesizedFlag is_parenthesized,
- IsGeneratorFlag is_generator)
- : Expression(isolate),
+ IsGeneratorFlag is_generator,
+ int position)
+ : Expression(isolate, position),
name_(name),
scope_(scope),
body_(body),
@@ -2383,23 +2409,21 @@ class FunctionLiteral V8_FINAL : public Expression {
};
-class SharedFunctionInfoLiteral V8_FINAL : public Expression {
+class NativeFunctionLiteral V8_FINAL : public Expression {
public:
- DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
+ DECLARE_NODE_TYPE(NativeFunctionLiteral)
- Handle<SharedFunctionInfo> shared_function_info() const {
- return shared_function_info_;
- }
+ Handle<String> name() const { return name_; }
+ v8::Extension* extension() const { return extension_; }
protected:
- SharedFunctionInfoLiteral(
- Isolate* isolate,
- Handle<SharedFunctionInfo> shared_function_info)
- : Expression(isolate),
- shared_function_info_(shared_function_info) { }
+ NativeFunctionLiteral(
+ Isolate* isolate, Handle<String> name, v8::Extension* extension, int pos)
+ : Expression(isolate, pos), name_(name), extension_(extension) {}
private:
- Handle<SharedFunctionInfo> shared_function_info_;
+ Handle<String> name_;
+ v8::Extension* extension_;
};
@@ -2408,7 +2432,7 @@ class ThisFunction V8_FINAL : public Expression {
DECLARE_NODE_TYPE(ThisFunction)
protected:
- explicit ThisFunction(Isolate* isolate): Expression(isolate) {}
+ explicit ThisFunction(Isolate* isolate, int pos): Expression(isolate, pos) {}
};
#undef DECLARE_NODE_TYPE
@@ -2775,8 +2799,8 @@ class RegExpEmpty V8_FINAL : public RegExpTree {
// ----------------------------------------------------------------------------
// Out-of-line inline constructors (to side-step cyclic dependencies).
-inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
- : Module(proxy->interface()),
+inline ModuleVariable::ModuleVariable(VariableProxy* proxy, int pos)
+ : Module(proxy->interface(), pos),
proxy_(proxy) {
}
@@ -2893,75 +2917,81 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy,
VariableMode mode,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
VariableDeclaration* decl =
- new(zone_) VariableDeclaration(proxy, mode, scope);
+ new(zone_) VariableDeclaration(proxy, mode, scope, pos);
VISIT_AND_RETURN(VariableDeclaration, decl)
}
FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
FunctionDeclaration* decl =
- new(zone_) FunctionDeclaration(proxy, mode, fun, scope);
+ new(zone_) FunctionDeclaration(proxy, mode, fun, scope, pos);
VISIT_AND_RETURN(FunctionDeclaration, decl)
}
ModuleDeclaration* NewModuleDeclaration(VariableProxy* proxy,
Module* module,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
ModuleDeclaration* decl =
- new(zone_) ModuleDeclaration(proxy, module, scope);
+ new(zone_) ModuleDeclaration(proxy, module, scope, pos);
VISIT_AND_RETURN(ModuleDeclaration, decl)
}
ImportDeclaration* NewImportDeclaration(VariableProxy* proxy,
Module* module,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
ImportDeclaration* decl =
- new(zone_) ImportDeclaration(proxy, module, scope);
+ new(zone_) ImportDeclaration(proxy, module, scope, pos);
VISIT_AND_RETURN(ImportDeclaration, decl)
}
ExportDeclaration* NewExportDeclaration(VariableProxy* proxy,
- Scope* scope) {
+ Scope* scope,
+ int pos) {
ExportDeclaration* decl =
- new(zone_) ExportDeclaration(proxy, scope);
+ new(zone_) ExportDeclaration(proxy, scope, pos);
VISIT_AND_RETURN(ExportDeclaration, decl)
}
- ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface) {
- ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface);
+ ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface, int pos) {
+ ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface, pos);
VISIT_AND_RETURN(ModuleLiteral, module)
}
- ModuleVariable* NewModuleVariable(VariableProxy* proxy) {
- ModuleVariable* module = new(zone_) ModuleVariable(proxy);
+ ModuleVariable* NewModuleVariable(VariableProxy* proxy, int pos) {
+ ModuleVariable* module = new(zone_) ModuleVariable(proxy, pos);
VISIT_AND_RETURN(ModuleVariable, module)
}
- ModulePath* NewModulePath(Module* origin, Handle<String> name) {
- ModulePath* module = new(zone_) ModulePath(origin, name, zone_);
+ ModulePath* NewModulePath(Module* origin, Handle<String> name, int pos) {
+ ModulePath* module = new(zone_) ModulePath(origin, name, zone_, pos);
VISIT_AND_RETURN(ModulePath, module)
}
- ModuleUrl* NewModuleUrl(Handle<String> url) {
- ModuleUrl* module = new(zone_) ModuleUrl(url, zone_);
+ ModuleUrl* NewModuleUrl(Handle<String> url, int pos) {
+ ModuleUrl* module = new(zone_) ModuleUrl(url, zone_, pos);
VISIT_AND_RETURN(ModuleUrl, module)
}
Block* NewBlock(ZoneStringList* labels,
int capacity,
- bool is_initializer_block) {
+ bool is_initializer_block,
+ int pos) {
Block* block = new(zone_) Block(
- isolate_, labels, capacity, is_initializer_block, zone_);
+ isolate_, labels, capacity, is_initializer_block, pos, zone_);
VISIT_AND_RETURN(Block, block)
}
#define STATEMENT_WITH_LABELS(NodeType) \
- NodeType* New##NodeType(ZoneStringList* labels) { \
- NodeType* stmt = new(zone_) NodeType(isolate_, labels); \
+ NodeType* New##NodeType(ZoneStringList* labels, int pos) { \
+ NodeType* stmt = new(zone_) NodeType(isolate_, labels, pos); \
VISIT_AND_RETURN(NodeType, stmt); \
}
STATEMENT_WITH_LABELS(DoWhileStatement)
@@ -2971,14 +3001,15 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
#undef STATEMENT_WITH_LABELS
ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode,
- ZoneStringList* labels) {
+ ZoneStringList* labels,
+ int pos) {
switch (visit_mode) {
case ForEachStatement::ENUMERATE: {
- ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels);
+ ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels, pos);
VISIT_AND_RETURN(ForInStatement, stmt);
}
case ForEachStatement::ITERATE: {
- ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels);
+ ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels, pos);
VISIT_AND_RETURN(ForOfStatement, stmt);
}
}
@@ -2986,44 +3017,47 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
return NULL;
}
- ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) {
- ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body);
+ ModuleStatement* NewModuleStatement(
+ VariableProxy* proxy, Block* body, int pos) {
+ ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body, pos);
VISIT_AND_RETURN(ModuleStatement, stmt)
}
- ExpressionStatement* NewExpressionStatement(Expression* expression) {
- ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression);
+ ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
+ ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression, pos);
VISIT_AND_RETURN(ExpressionStatement, stmt)
}
- ContinueStatement* NewContinueStatement(IterationStatement* target) {
- ContinueStatement* stmt = new(zone_) ContinueStatement(target);
+ ContinueStatement* NewContinueStatement(IterationStatement* target, int pos) {
+ ContinueStatement* stmt = new(zone_) ContinueStatement(target, pos);
VISIT_AND_RETURN(ContinueStatement, stmt)
}
- BreakStatement* NewBreakStatement(BreakableStatement* target) {
- BreakStatement* stmt = new(zone_) BreakStatement(target);
+ BreakStatement* NewBreakStatement(BreakableStatement* target, int pos) {
+ BreakStatement* stmt = new(zone_) BreakStatement(target, pos);
VISIT_AND_RETURN(BreakStatement, stmt)
}
- ReturnStatement* NewReturnStatement(Expression* expression) {
- ReturnStatement* stmt = new(zone_) ReturnStatement(expression);
+ ReturnStatement* NewReturnStatement(Expression* expression, int pos) {
+ ReturnStatement* stmt = new(zone_) ReturnStatement(expression, pos);
VISIT_AND_RETURN(ReturnStatement, stmt)
}
WithStatement* NewWithStatement(Scope* scope,
Expression* expression,
- Statement* statement) {
+ Statement* statement,
+ int pos) {
WithStatement* stmt = new(zone_) WithStatement(
- scope, expression, statement);
+ scope, expression, statement, pos);
VISIT_AND_RETURN(WithStatement, stmt)
}
IfStatement* NewIfStatement(Expression* condition,
Statement* then_statement,
- Statement* else_statement) {
+ Statement* else_statement,
+ int pos) {
IfStatement* stmt = new(zone_) IfStatement(
- isolate_, condition, then_statement, else_statement);
+ isolate_, condition, then_statement, else_statement, pos);
VISIT_AND_RETURN(IfStatement, stmt)
}
@@ -3031,81 +3065,88 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Block* try_block,
Scope* scope,
Variable* variable,
- Block* catch_block) {
+ Block* catch_block,
+ int pos) {
TryCatchStatement* stmt = new(zone_) TryCatchStatement(
- index, try_block, scope, variable, catch_block);
+ index, try_block, scope, variable, catch_block, pos);
VISIT_AND_RETURN(TryCatchStatement, stmt)
}
TryFinallyStatement* NewTryFinallyStatement(int index,
Block* try_block,
- Block* finally_block) {
+ Block* finally_block,
+ int pos) {
TryFinallyStatement* stmt =
- new(zone_) TryFinallyStatement(index, try_block, finally_block);
+ new(zone_) TryFinallyStatement(index, try_block, finally_block, pos);
VISIT_AND_RETURN(TryFinallyStatement, stmt)
}
- DebuggerStatement* NewDebuggerStatement() {
- DebuggerStatement* stmt = new(zone_) DebuggerStatement();
+ DebuggerStatement* NewDebuggerStatement(int pos) {
+ DebuggerStatement* stmt = new(zone_) DebuggerStatement(pos);
VISIT_AND_RETURN(DebuggerStatement, stmt)
}
- EmptyStatement* NewEmptyStatement() {
- return new(zone_) EmptyStatement();
+ EmptyStatement* NewEmptyStatement(int pos) {
+ return new(zone_) EmptyStatement(pos);
+ }
+
+ CaseClause* NewCaseClause(
+ Expression* label, ZoneList<Statement*>* statements, int pos) {
+ CaseClause* clause =
+ new(zone_) CaseClause(isolate_, label, statements, pos);
+ VISIT_AND_RETURN(CaseClause, clause)
}
- Literal* NewLiteral(Handle<Object> handle) {
- Literal* lit = new(zone_) Literal(isolate_, handle);
+ Literal* NewLiteral(Handle<Object> handle, int pos) {
+ Literal* lit = new(zone_) Literal(isolate_, handle, pos);
VISIT_AND_RETURN(Literal, lit)
}
- Literal* NewNumberLiteral(double number) {
- return NewLiteral(isolate_->factory()->NewNumber(number, TENURED));
+ Literal* NewNumberLiteral(double number, int pos) {
+ return NewLiteral(isolate_->factory()->NewNumber(number, TENURED), pos);
}
ObjectLiteral* NewObjectLiteral(
- Handle<FixedArray> constant_properties,
ZoneList<ObjectLiteral::Property*>* properties,
int literal_index,
- bool is_simple,
- bool fast_elements,
- int depth,
- bool may_store_doubles,
- bool has_function) {
+ int boilerplate_properties,
+ bool has_function,
+ int pos) {
ObjectLiteral* lit = new(zone_) ObjectLiteral(
- isolate_, constant_properties, properties, literal_index,
- is_simple, fast_elements, depth, may_store_doubles, has_function);
+ isolate_, properties, literal_index, boilerplate_properties,
+ has_function, pos);
VISIT_AND_RETURN(ObjectLiteral, lit)
}
ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter,
- FunctionLiteral* value) {
+ FunctionLiteral* value,
+ int pos) {
ObjectLiteral::Property* prop =
new(zone_) ObjectLiteral::Property(is_getter, value);
- prop->set_key(NewLiteral(value->name()));
+ prop->set_key(NewLiteral(value->name(), pos));
return prop; // Not an AST node, will not be visited.
}
RegExpLiteral* NewRegExpLiteral(Handle<String> pattern,
Handle<String> flags,
- int literal_index) {
+ int literal_index,
+ int pos) {
RegExpLiteral* lit =
- new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index);
+ new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index, pos);
VISIT_AND_RETURN(RegExpLiteral, lit);
}
- ArrayLiteral* NewArrayLiteral(Handle<FixedArray> constant_elements,
- ZoneList<Expression*>* values,
+ ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int literal_index,
- bool is_simple,
- int depth) {
+ int pos) {
ArrayLiteral* lit = new(zone_) ArrayLiteral(
- isolate_, constant_elements, values, literal_index, is_simple, depth);
+ isolate_, values, literal_index, pos);
VISIT_AND_RETURN(ArrayLiteral, lit)
}
- VariableProxy* NewVariableProxy(Variable* var) {
- VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var);
+ VariableProxy* NewVariableProxy(Variable* var,
+ int pos = RelocInfo::kNoPosition) {
+ VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var, pos);
VISIT_AND_RETURN(VariableProxy, proxy)
}
@@ -3139,9 +3180,10 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
CallRuntime* NewCallRuntime(Handle<String> name,
const Runtime::Function* function,
- ZoneList<Expression*>* arguments) {
+ ZoneList<Expression*>* arguments,
+ int pos) {
CallRuntime* call =
- new(zone_) CallRuntime(isolate_, name, function, arguments);
+ new(zone_) CallRuntime(isolate_, name, function, arguments, pos);
VISIT_AND_RETURN(CallRuntime, call)
}
@@ -3183,11 +3225,9 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
Conditional* NewConditional(Expression* condition,
Expression* then_expression,
Expression* else_expression,
- int then_expression_position,
- int else_expression_position) {
+ int position) {
Conditional* cond = new(zone_) Conditional(
- isolate_, condition, then_expression, else_expression,
- then_expression_position, else_expression_position);
+ isolate_, condition, then_expression, else_expression, position);
VISIT_AND_RETURN(Conditional, cond)
}
@@ -3227,12 +3267,13 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
FunctionLiteral::FunctionType function_type,
FunctionLiteral::IsFunctionFlag is_function,
FunctionLiteral::IsParenthesizedFlag is_parenthesized,
- FunctionLiteral::IsGeneratorFlag is_generator) {
+ FunctionLiteral::IsGeneratorFlag is_generator,
+ int position) {
FunctionLiteral* lit = new(zone_) FunctionLiteral(
isolate_, name, scope, body,
materialized_literal_count, expected_property_count, handler_count,
parameter_count, function_type, has_duplicate_parameters, is_function,
- is_parenthesized, is_generator);
+ is_parenthesized, is_generator, position);
// Top-level literal doesn't count for the AST's properties.
if (is_function == FunctionLiteral::kIsFunction) {
visitor_.VisitFunctionLiteral(lit);
@@ -3240,15 +3281,15 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED {
return lit;
}
- SharedFunctionInfoLiteral* NewSharedFunctionInfoLiteral(
- Handle<SharedFunctionInfo> shared_function_info) {
- SharedFunctionInfoLiteral* lit =
- new(zone_) SharedFunctionInfoLiteral(isolate_, shared_function_info);
- VISIT_AND_RETURN(SharedFunctionInfoLiteral, lit)
+ NativeFunctionLiteral* NewNativeFunctionLiteral(
+ Handle<String> name, v8::Extension* extension, int pos) {
+ NativeFunctionLiteral* lit =
+ new(zone_) NativeFunctionLiteral(isolate_, name, extension, pos);
+ VISIT_AND_RETURN(NativeFunctionLiteral, lit)
}
- ThisFunction* NewThisFunction() {
- ThisFunction* fun = new(zone_) ThisFunction(isolate_);
+ ThisFunction* NewThisFunction(int pos) {
+ ThisFunction* fun = new(zone_) ThisFunction(isolate_, pos);
VISIT_AND_RETURN(ThisFunction, fun)
}
diff --git a/chromium/v8/src/bootstrapper.cc b/chromium/v8/src/bootstrapper.cc
index 0756aefb0b4..2b2f7c71931 100644
--- a/chromium/v8/src/bootstrapper.cc
+++ b/chromium/v8/src/bootstrapper.cc
@@ -40,6 +40,8 @@
#include "objects-visiting.h"
#include "platform.h"
#include "snapshot.h"
+#include "trig-table.h"
+#include "extensions/free-buffer-extension.h"
#include "extensions/externalize-string-extension.h"
#include "extensions/gc-extension.h"
#include "extensions/statistics-extension.h"
@@ -99,6 +101,9 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
void Bootstrapper::InitializeOncePerProcess() {
+#ifdef ADDRESS_SANITIZER
+ FreeBufferExtension::Register();
+#endif
GCExtension::Register();
ExternalizeStringExtension::Register();
StatisticsExtension::Register();
@@ -334,17 +339,6 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
global_proxy->set_native_context(*factory->null_value());
SetObjectPrototype(global_proxy, factory->null_value());
- env->set_global_proxy(env->global_object());
- env->global_object()->set_global_receiver(env->global_object());
-}
-
-
-void Bootstrapper::ReattachGlobal(Handle<Context> env,
- Handle<JSGlobalProxy> global_proxy) {
- env->global_object()->set_global_receiver(*global_proxy);
- env->set_global_proxy(*global_proxy);
- SetObjectPrototype(global_proxy, Handle<JSObject>(env->global_object()));
- global_proxy->set_native_context(*env);
}
@@ -824,7 +818,7 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
// work in the snapshot case is done in HookUpInnerGlobal.
void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function) {
- // --- G l o b a l C o n t e x t ---
+ // --- N a t i v e C o n t e x t ---
// Use the empty function as closure (no scope info).
native_context()->set_closure(*empty_function);
native_context()->set_previous(NULL);
@@ -1043,7 +1037,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
{ // -- J S O N
- Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
+ Handle<String> name = factory->InternalizeUtf8String("JSON");
Handle<JSFunction> cons = factory->NewFunction(name,
factory->the_hole_value());
JSFunction::SetInstancePrototype(cons,
@@ -1308,10 +1302,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Initialize the embedder data slot.
Handle<FixedArray> embedder_data = factory->NewFixedArray(2);
native_context()->set_embedder_data(*embedder_data);
-
- // Allocate the random seed slot.
- Handle<ByteArray> random_seed = factory->NewByteArray(kRandomStateSize);
- native_context()->set_random_seed(*random_seed);
}
@@ -1578,6 +1568,7 @@ void Genesis::InstallNativeFunctions() {
void Genesis::InstallExperimentalNativeFunctions() {
+ INSTALL_NATIVE(JSFunction, "RunMicrotasks", run_microtasks);
if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
@@ -1591,8 +1582,6 @@ void Genesis::InstallExperimentalNativeFunctions() {
observers_begin_perform_splice);
INSTALL_NATIVE(JSFunction, "EndPerformSplice",
observers_end_perform_splice);
- INSTALL_NATIVE(JSFunction, "DeliverChangeRecords",
- observers_deliver_changes);
}
}
@@ -2023,50 +2012,28 @@ bool Genesis::InstallNatives() {
}
+#define INSTALL_EXPERIMENTAL_NATIVE(i, flag, file) \
+ if (FLAG_harmony_##flag && \
+ strcmp(ExperimentalNatives::GetScriptName(i).start(), \
+ "native " file) == 0) { \
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false; \
+ }
+
+
bool Genesis::InstallExperimentalNatives() {
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount();
i++) {
- if (FLAG_harmony_symbols &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native symbol.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_proxies &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native proxy.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_collections &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native collection.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_observation &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native object-observe.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_generators &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native generator.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_iteration &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native array-iterator.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_strings &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native harmony-string.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_arrays &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native harmony-array.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
+ INSTALL_EXPERIMENTAL_NATIVE(i, symbols, "symbol.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, proxies, "proxy.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, collections, "collection.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, observation, "object-observe.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, promises, "promise.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, generators, "generator.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, iteration, "array-iterator.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, strings, "harmony-string.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, arrays, "harmony-array.js")
+ INSTALL_EXPERIMENTAL_NATIVE(i, maths, "harmony-math.js")
}
InstallExperimentalNativeFunctions();
@@ -2276,6 +2243,11 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
current = current->next();
}
+#ifdef ADDRESS_SANITIZER
+ if (FLAG_expose_free_buffer) {
+ InstallExtension(isolate, "v8/free-buffer", &extension_states);
+ }
+#endif
if (FLAG_expose_gc) InstallExtension(isolate, "v8/gc", &extension_states);
if (FLAG_expose_externalize_string) {
InstallExtension(isolate, "v8/externalize", &extension_states);
@@ -2421,8 +2393,8 @@ bool Genesis::ConfigureGlobalObjects(
bool Genesis::ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template) {
ASSERT(!object_template.is_null());
- ASSERT(object->IsInstanceOf(
- FunctionTemplateInfo::cast(object_template->constructor())));
+ ASSERT(FunctionTemplateInfo::cast(object_template->constructor())
+ ->IsTemplateFor(object->map()));;
bool pending_exception = false;
Handle<JSObject> obj =
@@ -2630,13 +2602,67 @@ Genesis::Genesis(Isolate* isolate,
InitializeExperimentalGlobal();
if (!InstallExperimentalNatives()) return;
- // Initially seed the per-context random number generator
- // using the per-isolate random number generator.
- uint32_t* state = reinterpret_cast<uint32_t*>(
- native_context()->random_seed()->GetDataStartAddress());
- do {
- isolate->random_number_generator()->NextBytes(state, kRandomStateSize);
- } while (state[0] == 0 || state[1] == 0);
+ // We can't (de-)serialize typed arrays currently, but we are lucky: The state
+ // of the random number generator needs no initialization during snapshot
+ // creation time and we don't need trigonometric functions then.
+ if (!Serializer::enabled()) {
+ // Initially seed the per-context random number generator using the
+ // per-isolate random number generator.
+ const int num_elems = 2;
+ const int num_bytes = num_elems * sizeof(uint32_t);
+ uint32_t* state = reinterpret_cast<uint32_t*>(malloc(num_bytes));
+
+ do {
+ isolate->random_number_generator()->NextBytes(state, num_bytes);
+ } while (state[0] == 0 || state[1] == 0);
+
+ v8::Local<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(
+ reinterpret_cast<v8::Isolate*>(isolate), state, num_bytes);
+ Utils::OpenHandle(*buffer)->set_should_be_freed(true);
+ v8::Local<v8::Uint32Array> ta = v8::Uint32Array::New(buffer, 0, num_elems);
+ Handle<JSBuiltinsObject> builtins(native_context()->builtins());
+ ForceSetProperty(builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("rngstate")),
+ Utils::OpenHandle(*ta),
+ NONE);
+
+ // Initialize trigonometric lookup tables and constants.
+ const int table_num_bytes = TrigonometricLookupTable::table_num_bytes();
+ v8::Local<v8::ArrayBuffer> sin_buffer = v8::ArrayBuffer::New(
+ reinterpret_cast<v8::Isolate*>(isolate),
+ TrigonometricLookupTable::sin_table(), table_num_bytes);
+ v8::Local<v8::ArrayBuffer> cos_buffer = v8::ArrayBuffer::New(
+ reinterpret_cast<v8::Isolate*>(isolate),
+ TrigonometricLookupTable::cos_x_interval_table(), table_num_bytes);
+ v8::Local<v8::Float64Array> sin_table = v8::Float64Array::New(
+ sin_buffer, 0, TrigonometricLookupTable::table_size());
+ v8::Local<v8::Float64Array> cos_table = v8::Float64Array::New(
+ cos_buffer, 0, TrigonometricLookupTable::table_size());
+
+ ForceSetProperty(builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("kSinTable")),
+ Utils::OpenHandle(*sin_table),
+ NONE);
+ ForceSetProperty(builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("kCosXIntervalTable")),
+ Utils::OpenHandle(*cos_table),
+ NONE);
+ ForceSetProperty(builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("kSamples")),
+ factory()->NewHeapNumber(
+ TrigonometricLookupTable::samples()),
+ NONE);
+ ForceSetProperty(builtins,
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("kIndexConvert")),
+ factory()->NewHeapNumber(
+ TrigonometricLookupTable::samples_over_pi_half()),
+ NONE);
+ }
result_ = native_context();
}
diff --git a/chromium/v8/src/bootstrapper.h b/chromium/v8/src/bootstrapper.h
index bac9f40372d..4f63c87163f 100644
--- a/chromium/v8/src/bootstrapper.h
+++ b/chromium/v8/src/bootstrapper.h
@@ -105,9 +105,6 @@ class Bootstrapper {
// Detach the environment from its outer global object.
void DetachGlobal(Handle<Context> env);
- // Reattach an outer global object to an environment.
- void ReattachGlobal(Handle<Context> env, Handle<JSGlobalProxy> global_proxy);
-
// Traverses the pointers for memory management.
void Iterate(ObjectVisitor* v);
diff --git a/chromium/v8/src/builtins.cc b/chromium/v8/src/builtins.cc
index 9290852dc95..f9c2708ba12 100644
--- a/chromium/v8/src/builtins.cc
+++ b/chromium/v8/src/builtins.cc
@@ -195,79 +195,6 @@ BUILTIN(EmptyFunction) {
}
-static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
- Isolate* isolate,
- JSFunction* constructor) {
- ASSERT(args->length() >= 1);
- Heap* heap = isolate->heap();
- isolate->counters()->array_function_runtime()->Increment();
-
- JSArray* array;
- if (CalledAsConstructor(isolate)) {
- array = JSArray::cast((*args)[0]);
- // Initialize elements and length in case later allocations fail so that the
- // array object is initialized in a valid state.
- MaybeObject* maybe_array = array->Initialize(0);
- if (maybe_array->IsFailure()) return maybe_array;
-
- AllocationMemento* memento = AllocationMemento::FindForJSObject(array);
- if (memento != NULL && memento->IsValid()) {
- AllocationSite* site = memento->GetAllocationSite();
- ElementsKind to_kind = site->GetElementsKind();
- if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
- to_kind)) {
- // We have advice that we should change the elements kind
- if (FLAG_trace_track_allocation_sites) {
- PrintF("AllocationSite: pre-transitioning array %p(%s->%s)\n",
- reinterpret_cast<void*>(array),
- ElementsKindToString(array->GetElementsKind()),
- ElementsKindToString(to_kind));
- }
-
- maybe_array = array->TransitionElementsKind(to_kind);
- if (maybe_array->IsFailure()) return maybe_array;
- }
- }
-
- if (!FLAG_smi_only_arrays) {
- Context* native_context = isolate->context()->native_context();
- if (array->GetElementsKind() == GetInitialFastElementsKind() &&
- !native_context->js_array_maps()->IsUndefined()) {
- FixedArray* map_array =
- FixedArray::cast(native_context->js_array_maps());
- array->set_map(Map::cast(map_array->
- get(TERMINAL_FAST_ELEMENTS_KIND)));
- }
- }
- } else {
- // Allocate the JS Array
- MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
- if (!maybe_obj->To(&array)) return maybe_obj;
- }
-
- Arguments adjusted_arguments(args->length() - 1, args->arguments() - 1);
- ASSERT(adjusted_arguments.length() < 1 ||
- adjusted_arguments[0] == (*args)[1]);
- return ArrayConstructInitializeElements(array, &adjusted_arguments);
-}
-
-
-BUILTIN(InternalArrayCodeGeneric) {
- return ArrayCodeGenericCommon(
- &args,
- isolate,
- isolate->context()->native_context()->internal_array_function());
-}
-
-
-BUILTIN(ArrayCodeGeneric) {
- return ArrayCodeGenericCommon(
- &args,
- isolate,
- isolate->context()->native_context()->array_function());
-}
-
-
static void MoveDoubleElements(FixedDoubleArray* dst,
int dst_index,
FixedDoubleArray* src,
@@ -346,10 +273,15 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
}
- HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
- elms->address() + size_delta));
- return FixedArrayBase::cast(HeapObject::FromAddress(
- elms->address() + to_trim * entry_size));
+ FixedArrayBase* new_elms = FixedArrayBase::cast(HeapObject::FromAddress(
+ elms->address() + size_delta));
+ HeapProfiler* profiler = heap->isolate()->heap_profiler();
+ if (profiler->is_tracking_object_moves()) {
+ profiler->ObjectMoveEvent(elms->address(),
+ new_elms->address(),
+ new_elms->Size());
+ }
+ return new_elms;
}
@@ -374,6 +306,8 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
if (!receiver->IsJSArray()) return NULL;
JSArray* array = JSArray::cast(receiver);
+ if (array->map()->is_observed()) return NULL;
+ if (!array->map()->is_extensible()) return NULL;
HeapObject* elms = array->elements();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
@@ -1166,7 +1100,7 @@ BUILTIN(StrictModePoisonPill) {
static inline Object* FindHidden(Heap* heap,
Object* object,
FunctionTemplateInfo* type) {
- if (object->IsInstanceOf(type)) return object;
+ if (type->IsTemplateFor(object)) return object;
Object* proto = object->GetPrototype(heap->isolate());
if (proto->IsJSObject() &&
JSObject::cast(proto)->map()->is_hidden_prototype()) {
@@ -1392,7 +1326,8 @@ static void Generate_LoadIC_Normal(MacroAssembler* masm) {
static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
- LoadStubCompiler::GenerateLoadViaGetter(masm, Handle<JSFunction>());
+ LoadStubCompiler::GenerateLoadViaGetter(
+ masm, LoadStubCompiler::registers()[0], Handle<JSFunction>());
}
@@ -1412,12 +1347,7 @@ static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm, MISS);
-}
-
-
-static void Generate_KeyedLoadIC_MissForceGeneric(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm, MISS_FORCE_GENERIC);
+ KeyedLoadIC::GenerateMiss(masm);
}
@@ -1481,28 +1411,15 @@ static void Generate_StoreIC_Normal(MacroAssembler* masm) {
}
-static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) {
- StoreIC::GenerateNormal(masm);
-}
-
-
static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
- StoreIC::GenerateMegamorphic(masm, kNonStrictMode);
+ StoreIC::GenerateMegamorphic(masm,
+ StoreIC::ComputeExtraICState(kNonStrictMode));
}
static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
- StoreIC::GenerateMegamorphic(masm, kStrictMode);
-}
-
-
-static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
- StoreIC::GenerateRuntimeSetProperty(masm, kNonStrictMode);
-}
-
-
-static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
- StoreIC::GenerateRuntimeSetProperty(masm, kStrictMode);
+ StoreIC::GenerateMegamorphic(masm,
+ StoreIC::ComputeExtraICState(kStrictMode));
}
@@ -1532,12 +1449,7 @@ static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm, MISS);
-}
-
-
-static void Generate_KeyedStoreIC_MissForceGeneric(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm, MISS_FORCE_GENERIC);
+ KeyedStoreIC::GenerateMiss(masm);
}
@@ -1728,8 +1640,20 @@ void Builtins::InitBuiltinFunctionTable() {
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
+#define DEF_FUNCTION_PTR_H(aname, kind) \
+ functions->generator = FUNCTION_ADDR(Generate_##aname); \
+ functions->c_code = NULL; \
+ functions->s_name = #aname; \
+ functions->name = k##aname; \
+ functions->flags = Code::ComputeFlags( \
+ Code::HANDLER, MONOMORPHIC, kNoExtraICState, \
+ Code::NORMAL, Code::kind); \
+ functions->extra_args = NO_EXTRA_ARGUMENTS; \
+ ++functions;
+
BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
+ BUILTIN_LIST_H(DEF_FUNCTION_PTR_H)
BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
#undef DEF_FUNCTION_PTR_C
@@ -1791,9 +1715,10 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
builtins_[i] = code;
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
- PrintF("Builtin: %s\n", functions[i].s_name);
- Code::cast(code)->Disassemble(functions[i].s_name);
- PrintF("\n");
+ CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
+ PrintF(trace_scope.file(), "Builtin: %s\n", functions[i].s_name);
+ Code::cast(code)->Disassemble(functions[i].s_name, trace_scope.file());
+ PrintF(trace_scope.file(), "\n");
}
#endif
} else {
@@ -1854,8 +1779,15 @@ Handle<Code> Builtins::name() { \
reinterpret_cast<Code**>(builtin_address(k##name)); \
return Handle<Code>(code_address); \
}
+#define DEFINE_BUILTIN_ACCESSOR_H(name, kind) \
+Handle<Code> Builtins::name() { \
+ Code** code_address = \
+ reinterpret_cast<Code**>(builtin_address(k##name)); \
+ return Handle<Code>(code_address); \
+}
BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
+BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
#undef DEFINE_BUILTIN_ACCESSOR_C
#undef DEFINE_BUILTIN_ACCESSOR_A
diff --git a/chromium/v8/src/builtins.h b/chromium/v8/src/builtins.h
index c712f1ee02d..edc13f7511a 100644
--- a/chromium/v8/src/builtins.h
+++ b/chromium/v8/src/builtins.h
@@ -50,11 +50,17 @@ enum BuiltinExtraArguments {
#define CODE_AGE_LIST(V) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
+#define CODE_AGE_LIST_COMPLETE(V) \
+ V(NotExecuted) \
+ V(ExecutedOnce) \
+ V(NoAge) \
+ CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
+
#define DECLARE_CODE_AGE_BUILTIN(C, V) \
V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
- UNINITIALIZED, Code::kNoExtraICState) \
+ UNINITIALIZED, kNoExtraICState) \
V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \
- UNINITIALIZED, Code::kNoExtraICState)
+ UNINITIALIZED, kNoExtraICState)
// Define list of builtins implemented in C++.
@@ -63,9 +69,6 @@ enum BuiltinExtraArguments {
\
V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
\
- V(InternalArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
- V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
- \
V(ArrayPush, NO_EXTRA_ARGUMENTS) \
V(ArrayPop, NO_EXTRA_ARGUMENTS) \
V(ArrayShift, NO_EXTRA_ARGUMENTS) \
@@ -84,145 +87,139 @@ enum BuiltinExtraArguments {
// Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(LazyCompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(ConcurrentRecompile, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(NotifyOSR, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
+ V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_Slow, STUB, MONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(StoreIC_Slow, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_Slow, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_Slow, STUB, MONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(StoreIC_PreMonomorphic, STORE_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
- V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(StoreIC_Generic, STORE_IC, GENERIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(StoreIC_Generic_Strict, STORE_IC, GENERIC, \
- kStrictMode) \
- V(StoreIC_GlobalProxy, STORE_IC, GENERIC, \
- Code::kNoExtraICState) \
+ StoreIC::kStrictModeState) \
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
- kStrictMode) \
+ StoreIC::kStrictModeState) \
V(StoreIC_PreMonomorphic_Strict, STORE_IC, PREMONOMORPHIC, \
- kStrictMode) \
- V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
- kStrictMode) \
+ StoreIC::kStrictModeState) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
- kStrictMode) \
- V(StoreIC_GlobalProxy_Strict, STORE_IC, GENERIC, \
- kStrictMode) \
+ StoreIC::kStrictModeState) \
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
- kStrictMode) \
+ StoreIC::kStrictModeState) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
- kStrictMode) \
+ StoreIC::kStrictModeState) \
V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
- kStrictMode) \
+ StoreIC::kStrictModeState) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
- kStrictMode) \
+ StoreIC::kStrictModeState) \
V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(FunctionApply, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
V(InternalArrayCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(ArrayCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
V(StringConstructCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
V(InterruptCheck, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
+ V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
V(StackCheck, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
+ kNoExtraICState) \
+ \
+ V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
+// Define list of builtin handlers implemented in assembly.
+#define BUILTIN_LIST_H(V) \
+ V(LoadIC_Slow, LOAD_IC) \
+ V(KeyedLoadIC_Slow, KEYED_LOAD_IC) \
+ V(StoreIC_Slow, STORE_IC) \
+ V(KeyedStoreIC_Slow, KEYED_STORE_IC) \
+ V(LoadIC_Normal, LOAD_IC) \
+ V(StoreIC_Normal, STORE_IC)
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \
@@ -310,8 +307,10 @@ class Builtins {
enum Name {
#define DEF_ENUM_C(name, ignore) k##name,
#define DEF_ENUM_A(name, kind, state, extra) k##name,
+#define DEF_ENUM_H(name, kind) k##name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
+ BUILTIN_LIST_H(DEF_ENUM_H)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
#undef DEF_ENUM_C
#undef DEF_ENUM_A
@@ -335,8 +334,10 @@ class Builtins {
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> name();
+#define DECLARE_BUILTIN_ACCESSOR_H(name, kind) Handle<Code> name();
BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
+ BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
#undef DECLARE_BUILTIN_ACCESSOR_C
#undef DECLARE_BUILTIN_ACCESSOR_A
@@ -356,6 +357,11 @@ class Builtins {
}
static const char* GetName(JavaScript id) { return javascript_names_[id]; }
+ const char* name(int index) {
+ ASSERT(index >= 0);
+ ASSERT(index < builtin_count);
+ return names_[index];
+ }
static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
Handle<Code> GetCode(JavaScript id, bool* resolved);
static int NumberOfJavaScriptBuiltins() { return id_count; }
@@ -391,8 +397,8 @@ class Builtins {
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
- static void Generate_NotifyOSR(MacroAssembler* masm);
static void Generate_NotifyStubFailure(MacroAssembler* masm);
+ static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm);
@@ -403,7 +409,7 @@ class Builtins {
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
-
+ static void Generate_OsrAfterStackCheck(MacroAssembler* masm);
static void Generate_InterruptCheck(MacroAssembler* masm);
static void Generate_StackCheck(MacroAssembler* masm);
@@ -415,6 +421,9 @@ class Builtins {
CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR
+ static void Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm);
+ static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm);
+
static void InitBuiltinFunctionTable();
bool initialized_;
diff --git a/chromium/v8/src/checks.cc b/chromium/v8/src/checks.cc
index 7108d18892e..a4514bf9650 100644
--- a/chromium/v8/src/checks.cc
+++ b/chromium/v8/src/checks.cc
@@ -25,11 +25,48 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdarg.h>
+#include "checks.h"
-#include "v8.h"
+#if V8_LIBC_GLIBC || V8_OS_BSD
+# include <cxxabi.h>
+# include <execinfo.h>
+#endif // V8_LIBC_GLIBC || V8_OS_BSD
+#include <stdio.h>
#include "platform.h"
+#include "v8.h"
+
+
+// Attempts to dump a backtrace (if supported).
+static V8_INLINE void DumpBacktrace() {
+#if V8_LIBC_GLIBC || V8_OS_BSD
+ void* trace[100];
+ int size = backtrace(trace, ARRAY_SIZE(trace));
+ char** symbols = backtrace_symbols(trace, size);
+ i::OS::PrintError("\n==== C stack trace ===============================\n\n");
+ if (size == 0) {
+ i::OS::PrintError("(empty)\n");
+ } else if (symbols == NULL) {
+ i::OS::PrintError("(no symbols)\n");
+ } else {
+ for (int i = 1; i < size; ++i) {
+ i::OS::PrintError("%2d: ", i);
+ char mangled[201];
+ if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
+ int status;
+ size_t length;
+ char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
+ i::OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
+ free(demangled);
+ } else {
+ i::OS::PrintError("??\n");
+ }
+ }
+ }
+ free(symbols);
+#endif // V8_LIBC_GLIBC || V8_OS_BSD
+}
+
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
@@ -43,7 +80,8 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
i::OS::VPrintError(format, arguments);
va_end(arguments);
i::OS::PrintError("\n#\n");
- i::OS::DumpBacktrace();
+ DumpBacktrace();
+ fflush(stderr);
i::OS::Abort();
}
@@ -91,9 +129,6 @@ void API_Fatal(const char* location, const char* format, ...) {
namespace v8 { namespace internal {
- bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; }
-
intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
} } // namespace v8::internal
-
diff --git a/chromium/v8/src/checks.h b/chromium/v8/src/checks.h
index f5c5f232bd5..f7b145fc8a8 100644
--- a/chromium/v8/src/checks.h
+++ b/chromium/v8/src/checks.h
@@ -268,11 +268,28 @@ template <int> class StaticAssertionHelper { };
#define STATIC_CHECK(test) \
typedef \
StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
- SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
+ SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) V8_UNUSED
#endif
+#ifdef DEBUG
+#ifndef OPTIMIZED_DEBUG
+#define ENABLE_SLOW_ASSERTS 1
+#endif
+#endif
+
+namespace v8 {
+namespace internal {
+#ifdef ENABLE_SLOW_ASSERTS
+#define SLOW_ASSERT(condition) \
+ CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
extern bool FLAG_enable_slow_asserts;
+#else
+#define SLOW_ASSERT(condition) ((void) 0)
+const bool FLAG_enable_slow_asserts = false;
+#endif
+} // namespace internal
+} // namespace v8
// The ASSERT macro is equivalent to CHECK except that it only
@@ -285,7 +302,6 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition))
#else
#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)
@@ -294,7 +310,6 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_GE(v1, v2) ((void) 0)
#define ASSERT_LT(v1, v2) ((void) 0)
#define ASSERT_LE(v1, v2) ((void) 0)
-#define SLOW_ASSERT(condition) ((void) 0)
#endif
// Static asserts has no impact on runtime performance, so they can be
// safely enabled in release mode. Moreover, the ((void) 0) expression
diff --git a/chromium/v8/src/code-stubs-hydrogen.cc b/chromium/v8/src/code-stubs-hydrogen.cc
index 9130a731594..96cfc378476 100644
--- a/chromium/v8/src/code-stubs-hydrogen.cc
+++ b/chromium/v8/src/code-stubs-hydrogen.cc
@@ -146,40 +146,33 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
int param_count = descriptor_->register_param_count_;
HEnvironment* start_environment = graph()->start_environment();
HBasicBlock* next_block = CreateBasicBlock(start_environment);
- current_block()->Goto(next_block);
+ Goto(next_block);
next_block->SetJoinId(BailoutId::StubEntry());
set_current_block(next_block);
- HConstant* undefined_constant =
- Add<HConstant>(isolate()->factory()->undefined_value());
- graph()->set_undefined_constant(undefined_constant);
-
+ bool runtime_stack_params = descriptor_->stack_parameter_count_.is_valid();
+ HInstruction* stack_parameter_count = NULL;
for (int i = 0; i < param_count; ++i) {
- HParameter* param =
- Add<HParameter>(i, HParameter::REGISTER_PARAMETER);
+ Representation r = descriptor_->IsParameterCountRegister(i)
+ ? Representation::Integer32()
+ : Representation::Tagged();
+ HParameter* param = Add<HParameter>(i, HParameter::REGISTER_PARAMETER, r);
start_environment->Bind(i, param);
parameters_[i] = param;
+ if (descriptor_->IsParameterCountRegister(i)) {
+ param->set_type(HType::Smi());
+ stack_parameter_count = param;
+ arguments_length_ = stack_parameter_count;
+ }
}
- HInstruction* stack_parameter_count;
- if (descriptor_->stack_parameter_count_ != NULL) {
- ASSERT(descriptor_->environment_length() == (param_count + 1));
- stack_parameter_count = New<HParameter>(param_count,
- HParameter::REGISTER_PARAMETER,
- Representation::Integer32());
- stack_parameter_count->set_type(HType::Smi());
- // It's essential to bind this value to the environment in case of deopt.
- AddInstruction(stack_parameter_count);
- start_environment->Bind(param_count, stack_parameter_count);
- arguments_length_ = stack_parameter_count;
- } else {
- ASSERT(descriptor_->environment_length() == param_count);
+ ASSERT(!runtime_stack_params || arguments_length_ != NULL);
+ if (!runtime_stack_params) {
stack_parameter_count = graph()->GetConstantMinus1();
arguments_length_ = graph()->GetConstant0();
}
- context_ = New<HContext>();
- AddInstruction(context_);
+ context_ = Add<HContext>();
start_environment->BindContext(context_);
Add<HSimulate>(BailoutId::StubEntry());
@@ -194,10 +187,11 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) {
if (!stack_parameter_count->IsConstant() &&
descriptor_->hint_stack_parameter_count_ < 0) {
- HInstruction* amount = graph()->GetConstant1();
- stack_pop_count = Add<HAdd>(stack_parameter_count, amount);
- stack_pop_count->ChangeRepresentation(Representation::Integer32());
+ HInstruction* constant_one = graph()->GetConstant1();
+ stack_pop_count = AddUncasted<HAdd>(stack_parameter_count, constant_one);
stack_pop_count->ClearFlag(HValue::kCanOverflow);
+ // TODO(mvstanton): verify that stack_parameter_count+1 really fits in a
+ // smi.
} else {
int count = descriptor_->hint_stack_parameter_count_;
stack_pop_count = Add<HConstant>(count);
@@ -207,8 +201,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
if (current_block() != NULL) {
HReturn* hreturn_instruction = New<HReturn>(return_value,
stack_pop_count);
- current_block()->Finish(hreturn_instruction);
- set_current_block(NULL);
+ FinishCurrentBlock(hreturn_instruction);
}
return true;
}
@@ -217,7 +210,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
template <class Stub>
class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
public:
- explicit CodeStubGraphBuilder(Isolate* isolate, Stub* stub)
+ CodeStubGraphBuilder(Isolate* isolate, Stub* stub)
: CodeStubGraphBuilderBase(isolate, stub) {}
protected:
@@ -258,9 +251,6 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
// Update the static counter each time a new code stub is generated.
isolate->counters()->code_stubs()->Increment();
- // Nested stubs are not allowed for leaves.
- AllowStubCallsScope allow_scope(&masm, false);
-
// Generate the code for the stub.
masm.set_generating_stub(true);
NoCurrentFrameScope scope(&masm);
@@ -298,12 +288,21 @@ static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) {
// the runtime that is significantly faster than using the standard
// stub-failure deopt mechanism.
if (stub->IsUninitialized() && descriptor->has_miss_handler()) {
- ASSERT(descriptor->stack_parameter_count_ == NULL);
+ ASSERT(!descriptor->stack_parameter_count_.is_valid());
return stub->GenerateLightweightMissCode(isolate);
}
+ ElapsedTimer timer;
+ if (FLAG_profile_hydrogen_code_stub_compilation) {
+ timer.Start();
+ }
CodeStubGraphBuilder<Stub> builder(isolate, stub);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
- return chunk->Codegen();
+ Handle<Code> code = chunk->Codegen();
+ if (FLAG_profile_hydrogen_code_stub_compilation) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Lazy compilation of %s took %0.3f ms]\n", *stub->GetName(), ms);
+ }
+ return code;
}
@@ -339,6 +338,19 @@ Handle<Code> ToNumberStub::GenerateCode(Isolate* isolate) {
template <>
+HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
+ info()->MarkAsSavesCallerDoubles();
+ HValue* number = GetParameter(NumberToStringStub::kNumber);
+ return BuildNumberToString(number, handle(Type::Number(), isolate()));
+}
+
+
+Handle<Code> NumberToStringStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
Factory* factory = isolate()->factory();
HValue* undefined = graph()->GetConstantUndefined();
@@ -355,42 +367,48 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
undefined);
checker.Then();
- HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
+ HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kTransitionInfoOffset);
HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
+ HValue* push_value;
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
if_fixed_cow.Then();
- environment()->Push(BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_ELEMENTS,
- 0/*copy-on-write*/));
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_ELEMENTS,
+ 0/*copy-on-write*/);
+ environment()->Push(push_value);
if_fixed_cow.Else();
IfBuilder if_fixed(this);
if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
if_fixed.Then();
- environment()->Push(BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_ELEMENTS,
- length));
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_ELEMENTS,
+ length);
+ environment()->Push(push_value);
if_fixed.Else();
- environment()->Push(BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_DOUBLE_ELEMENTS,
- length));
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_DOUBLE_ELEMENTS,
+ length);
+ environment()->Push(push_value);
} else {
ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
- environment()->Push(BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- elements_kind,
- length));
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ elements_kind,
+ length);
+ environment()->Push(push_value);
}
checker.ElseDeopt("Uninitialized boilerplate literals");
@@ -407,23 +425,33 @@ Handle<Code> FastCloneShallowArrayStub::GenerateCode(Isolate* isolate) {
template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
- Zone* zone = this->zone();
HValue* undefined = graph()->GetConstantUndefined();
- HInstruction* boilerplate = Add<HLoadKeyed>(GetParameter(0),
- GetParameter(1),
- static_cast<HValue*>(NULL),
- FAST_ELEMENTS);
+ HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0),
+ GetParameter(1),
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS);
IfBuilder checker(this);
- checker.IfNot<HCompareObjectEqAndBranch, HValue*>(boilerplate,
+ checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
undefined);
checker.And();
+ HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kTransitionInfoOffset);
+ HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
+
int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
- HValue* boilerplate_size =
- AddInstruction(new(zone) HInstanceSize(boilerplate));
- HValue* size_in_words = Add<HConstant>(size >> kPointerSizeLog2);
+ int object_size = size;
+ if (FLAG_allocation_site_pretenuring) {
+ size += AllocationMemento::kSize;
+ }
+
+ HValue* boilerplate_map = Add<HLoadNamedField>(
+ boilerplate, HObjectAccess::ForMap());
+ HValue* boilerplate_size = Add<HLoadNamedField>(
+ boilerplate_map, HObjectAccess::ForMapInstanceSize());
+ HValue* size_in_words = Add<HConstant>(object_size >> kPointerSizeLog2);
checker.If<HCompareNumericAndBranch>(boilerplate_size,
size_in_words, Token::EQ);
checker.Then();
@@ -433,12 +461,18 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HInstruction* object = Add<HAllocate>(size_in_bytes, HType::JSObject(),
isolate()->heap()->GetPretenureMode(), JS_OBJECT_TYPE);
- for (int i = 0; i < size; i += kPointerSize) {
+ for (int i = 0; i < object_size; i += kPointerSize) {
HObjectAccess access = HObjectAccess::ForJSObjectOffset(i);
Add<HStoreNamedField>(object, access,
Add<HLoadNamedField>(boilerplate, access));
}
+ ASSERT(FLAG_allocation_site_pretenuring || (size == object_size));
+ if (FLAG_allocation_site_pretenuring) {
+ BuildCreateAllocationMemento(
+ object, Add<HConstant>(object_size), allocation_site);
+ }
+
environment()->Push(object);
checker.ElseDeopt("Uninitialized boilerplate in fast clone");
checker.End();
@@ -459,24 +493,55 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
JS_OBJECT_TYPE);
// Store the map
- Handle<Map> allocation_site_map(isolate()->heap()->allocation_site_map(),
- isolate());
+ Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
AddStoreMapConstant(object, allocation_site_map);
// Store the payload (smi elements kind)
HValue* initial_elements_kind = Add<HConstant>(GetInitialFastElementsKind());
Add<HStoreNamedField>(object,
- HObjectAccess::ForAllocationSiteTransitionInfo(),
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kTransitionInfoOffset),
initial_elements_kind);
+ // Unlike literals, constructed arrays don't have nested sites
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kNestedSiteOffset),
+ graph()->GetConstant0());
+
+ // Pretenuring calculation fields.
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kMementoFoundCountOffset),
+ graph()->GetConstant0());
+
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kMementoCreateCountOffset),
+ graph()->GetConstant0());
+
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kPretenureDecisionOffset),
+ graph()->GetConstant0());
+
+ // Store an empty fixed array for the code dependency.
+ HConstant* empty_fixed_array =
+ Add<HConstant>(isolate()->factory()->empty_fixed_array());
+ HStoreNamedField* store = Add<HStoreNamedField>(
+ object,
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kDependentCodeOffset),
+ empty_fixed_array);
+
// Link the object to the allocation site list
HValue* site_list = Add<HConstant>(
ExternalReference::allocation_sites_list_address(isolate()));
HValue* site = Add<HLoadNamedField>(site_list,
HObjectAccess::ForAllocationSiteList());
- HStoreNamedField* store =
- Add<HStoreNamedField>(object, HObjectAccess::ForAllocationSiteWeakNext(),
- site);
+ store = Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteOffset(AllocationSite::kWeakNextOffset),
+ site);
store->SkipWriteBarrier();
Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
object);
@@ -519,7 +584,7 @@ HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
- return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
+ return AddLoadNamedField(GetParameter(0), access);
}
@@ -534,7 +599,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
- return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
+ return AddLoadNamedField(GetParameter(0), access);
}
@@ -543,6 +608,33 @@ Handle<Code> KeyedLoadFieldStub::GenerateCode(Isolate* isolate) {
}
+template<>
+HValue* CodeStubGraphBuilder<KeyedArrayCallStub>::BuildCodeStub() {
+ int argc = casted_stub()->argc() + 1;
+ info()->set_parameter_count(argc);
+
+ HValue* receiver = Add<HParameter>(1);
+ BuildCheckHeapObject(receiver);
+
+ // Load the expected initial array map from the context.
+ JSArrayBuilder array_builder(this, casted_stub()->elements_kind());
+ HValue* map = array_builder.EmitMapCode();
+
+ HValue* checked_receiver = Add<HCheckMapValue>(receiver, map);
+
+ HValue* function = BuildUncheckedMonomorphicElementAccess(
+ checked_receiver, GetParameter(0),
+ NULL, true, casted_stub()->elements_kind(),
+ false, NEVER_RETURN_HOLE, STANDARD_STORE);
+ return Add<HCallFunction>(function, argc, TAIL_CALL);
+}
+
+
+Handle<Code> KeyedArrayCallStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
template <>
HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
@@ -640,46 +732,38 @@ HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
HValue* constant_zero = graph()->GetConstant0();
HInstruction* elements = Add<HArgumentsElements>(false);
- HInstruction* argument = AddInstruction(
- new(zone()) HAccessArgumentsAt(elements, constant_one, constant_zero));
+ HInstruction* argument = Add<HAccessArgumentsAt>(
+ elements, constant_one, constant_zero);
- HConstant* max_alloc_length =
- Add<HConstant>(JSObject::kInitialMaxFastElementArray);
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- HConstant* initial_capacity_node = New<HConstant>(initial_capacity);
- AddInstruction(initial_capacity_node);
-
- HInstruction* checked_arg = Add<HBoundsCheck>(argument, max_alloc_length);
- IfBuilder if_builder(this);
- if_builder.If<HCompareNumericAndBranch>(checked_arg, constant_zero,
- Token::EQ);
- if_builder.Then();
- Push(initial_capacity_node); // capacity
- Push(constant_zero); // length
- if_builder.Else();
- Push(checked_arg); // capacity
- Push(checked_arg); // length
- if_builder.End();
-
- // Figure out total size
- HValue* length = Pop();
- HValue* capacity = Pop();
- return array_builder->AllocateArray(capacity, length, true);
+ return BuildAllocateArrayFromLength(array_builder, argument);
}
HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
JSArrayBuilder* array_builder, ElementsKind kind) {
+ // Insert a bounds check because the number of arguments might exceed
+ // the kInitialMaxFastElementArray limit. This cannot happen for code
+ // that was parsed, but calling via Array.apply(thisArg, [...]) might
+ // trigger it.
+ HValue* length = GetArgumentsLength();
+ HConstant* max_alloc_length =
+ Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ HValue* checked_length = Add<HBoundsCheck>(length, max_alloc_length);
+
// We need to fill with the hole if it's a smi array in the multi-argument
// case because we might have to bail out while copying arguments into
// the array because they aren't compatible with a smi array.
// If it's a double array, no problem, and if it's fast then no
// problem either because doubles are boxed.
- HValue* length = GetArgumentsLength();
- bool fill_with_hole = IsFastSmiElementsKind(kind);
- HValue* new_object = array_builder->AllocateArray(length,
- length,
- fill_with_hole);
+ //
+ // TODO(mvstanton): consider an instruction to memset fill the array
+ // with zero in this case instead.
+ JSArrayBuilder::FillMode fill_mode = IsFastSmiElementsKind(kind)
+ ? JSArrayBuilder::FILL_WITH_HOLE
+ : JSArrayBuilder::DONT_FILL_WITH_HOLE;
+ HValue* new_object = array_builder->AllocateArray(checked_length,
+ checked_length,
+ fill_mode);
HValue* elements = array_builder->GetElementsLocation();
ASSERT(elements != NULL);
@@ -688,10 +772,10 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
context(),
LoopBuilder::kPostIncrement);
HValue* start = graph()->GetConstant0();
- HValue* key = builder.BeginBody(start, length, Token::LT);
+ HValue* key = builder.BeginBody(start, checked_length, Token::LT);
HInstruction* argument_elements = Add<HArgumentsElements>(false);
- HInstruction* argument = AddInstruction(new(zone()) HAccessArgumentsAt(
- argument_elements, length, key));
+ HInstruction* argument = Add<HAccessArgumentsAt>(
+ argument_elements, checked_length, key);
Add<HStoreKeyed>(elements, key, argument, kind);
builder.EndBody();
@@ -792,7 +876,7 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
HIfContinuation continuation;
Handle<Map> sentinel_map(isolate->heap()->meta_map());
Handle<Type> type = stub->GetType(isolate, sentinel_map);
- BuildCompareNil(GetParameter(0), type, RelocInfo::kNoPosition, &continuation);
+ BuildCompareNil(GetParameter(0), type, &continuation);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
if (continuation.IsFalseReachable()) {
@@ -812,6 +896,142 @@ Handle<Code> CompareNilICStub::GenerateCode(Isolate* isolate) {
template <>
+HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
+ BinaryOpIC::State state = casted_stub()->state();
+
+ HValue* left = GetParameter(BinaryOpICStub::kLeft);
+ HValue* right = GetParameter(BinaryOpICStub::kRight);
+
+ Handle<Type> left_type = state.GetLeftType(isolate());
+ Handle<Type> right_type = state.GetRightType(isolate());
+ Handle<Type> result_type = state.GetResultType(isolate());
+
+ ASSERT(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
+ (state.HasSideEffects() || !result_type->Is(Type::None())));
+
+ HValue* result = NULL;
+ if (state.op() == Token::ADD &&
+ (left_type->Maybe(Type::String()) || right_type->Maybe(Type::String())) &&
+ !left_type->Is(Type::String()) && !right_type->Is(Type::String())) {
+ // For the generic add stub a fast case for string addition is performance
+ // critical.
+ if (left_type->Maybe(Type::String())) {
+ IfBuilder if_leftisstring(this);
+ if_leftisstring.If<HIsStringAndBranch>(left);
+ if_leftisstring.Then();
+ {
+ Push(BuildBinaryOperation(
+ state.op(), left, right,
+ handle(Type::String(), isolate()), right_type,
+ result_type, state.fixed_right_arg()));
+ }
+ if_leftisstring.Else();
+ {
+ Push(BuildBinaryOperation(
+ state.op(), left, right,
+ left_type, right_type, result_type,
+ state.fixed_right_arg()));
+ }
+ if_leftisstring.End();
+ result = Pop();
+ } else {
+ IfBuilder if_rightisstring(this);
+ if_rightisstring.If<HIsStringAndBranch>(right);
+ if_rightisstring.Then();
+ {
+ Push(BuildBinaryOperation(
+ state.op(), left, right,
+ left_type, handle(Type::String(), isolate()),
+ result_type, state.fixed_right_arg()));
+ }
+ if_rightisstring.Else();
+ {
+ Push(BuildBinaryOperation(
+ state.op(), left, right,
+ left_type, right_type, result_type,
+ state.fixed_right_arg()));
+ }
+ if_rightisstring.End();
+ result = Pop();
+ }
+ } else {
+ result = BuildBinaryOperation(
+ state.op(), left, right,
+ left_type, right_type, result_type,
+ state.fixed_right_arg());
+ }
+
+ // If we encounter a generic argument, the number conversion is
+ // observable, thus we cannot afford to bail out after the fact.
+ if (!state.HasSideEffects()) {
+ if (result_type->Is(Type::Smi())) {
+ if (state.op() == Token::SHR) {
+ // TODO(olivf) Replace this by a SmiTagU Instruction.
+ // 0x40000000: this number would convert to negative when interpreting
+ // the register as signed value;
+ IfBuilder if_of(this);
+ if_of.IfNot<HCompareNumericAndBranch>(result,
+ Add<HConstant>(static_cast<int>(SmiValuesAre32Bits()
+ ? 0x80000000 : 0x40000000)), Token::EQ_STRICT);
+ if_of.Then();
+ if_of.ElseDeopt("UInt->Smi oveflow");
+ if_of.End();
+ }
+ }
+ result = EnforceNumberType(result, result_type);
+ }
+
+ // Reuse the double box of one of the operands if we are allowed to (i.e.
+ // chained binops).
+ if (state.CanReuseDoubleBox()) {
+ HValue* operand = (state.mode() == OVERWRITE_LEFT) ? left : right;
+ IfBuilder if_heap_number(this);
+ if_heap_number.IfNot<HIsSmiAndBranch>(operand);
+ if_heap_number.Then();
+ Add<HStoreNamedField>(operand, HObjectAccess::ForHeapNumberValue(), result);
+ Push(operand);
+ if_heap_number.Else();
+ Push(result);
+ if_heap_number.End();
+ result = Pop();
+ }
+
+ return result;
+}
+
+
+Handle<Code> BinaryOpICStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<NewStringAddStub>::BuildCodeInitializedStub() {
+ NewStringAddStub* stub = casted_stub();
+ StringAddFlags flags = stub->flags();
+ PretenureFlag pretenure_flag = stub->pretenure_flag();
+
+ HValue* left = GetParameter(NewStringAddStub::kLeft);
+ HValue* right = GetParameter(NewStringAddStub::kRight);
+
+ // Make sure that both arguments are strings if not known in advance.
+ if ((flags & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ left = BuildCheckString(left);
+ }
+ if ((flags & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ right = BuildCheckString(right);
+ }
+
+ return BuildStringAdd(left, right, pretenure_flag);
+}
+
+
+Handle<Code> NewStringAddStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
ToBooleanStub* stub = casted_stub();
@@ -918,8 +1138,7 @@ void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
HValue* native_context,
HValue* code_object) {
Counters* counters = isolate()->counters();
- AddIncrementCounter(counters->fast_new_closure_install_optimized(),
- context());
+ AddIncrementCounter(counters->fast_new_closure_install_optimized());
// TODO(fschneider): Idea: store proper code pointers in the optimized code
// map and either unmangle them on marking or do nothing as the whole map is
@@ -967,7 +1186,7 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
}
is_optimized.Else();
{
- AddIncrementCounter(counters->fast_new_closure_try_optimized(), context());
+ AddIncrementCounter(counters->fast_new_closure_try_optimized());
// optimized_map points to fixed array of 3-element entries
// (native context, optimized code, literals).
// Map must never be empty, so check the first elements.
@@ -1012,8 +1231,8 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
}
restore_check.Else();
{
- HValue* keyed_minus = AddInstruction(HSub::New(zone(), context(), key,
- shared_function_entry_length));
+ HValue* keyed_minus = AddUncasted<HSub>(
+ key, shared_function_entry_length);
HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map,
keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
IfBuilder done_check(this);
@@ -1022,8 +1241,8 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
done_check.Then();
{
// Hit: fetch the optimized code.
- HValue* keyed_plus = AddInstruction(HAdd::New(zone(), context(),
- keyed_minus, graph()->GetConstant1()));
+ HValue* keyed_plus = AddUncasted<HAdd>(
+ keyed_minus, graph()->GetConstant1());
HValue* code_object = Add<HLoadKeyed>(optimized_map,
keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
BuildInstallOptimizedCode(js_function, native_context, code_object);
@@ -1052,11 +1271,12 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
Add<HConstant>(factory->empty_fixed_array());
HValue* shared_info = GetParameter(0);
+ AddIncrementCounter(counters->fast_new_closure_total());
+
// Create a new closure from the given function info in new space
HValue* size = Add<HConstant>(JSFunction::kSize);
HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
NOT_TENURED, JS_FUNCTION_TYPE);
- AddIncrementCounter(counters->fast_new_closure_total(), context());
int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
casted_stub()->is_generator());
@@ -1101,4 +1321,20 @@ Handle<Code> FastNewClosureStub::GenerateCode(Isolate* isolate) {
}
+template<>
+HValue* CodeStubGraphBuilder<KeyedLoadDictionaryElementStub>::BuildCodeStub() {
+ HValue* receiver = GetParameter(0);
+ HValue* key = GetParameter(1);
+
+ Add<HCheckSmi>(key);
+
+ return BuildUncheckedDictionaryElementLoad(receiver, key);
+}
+
+
+Handle<Code> KeyedLoadDictionaryElementStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/code-stubs.cc b/chromium/v8/src/code-stubs.cc
index ace4af42a9e..5f6a1147a15 100644
--- a/chromium/v8/src/code-stubs.cc
+++ b/chromium/v8/src/code-stubs.cc
@@ -41,15 +41,22 @@ namespace internal {
CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
: register_param_count_(-1),
- stack_parameter_count_(NULL),
+ stack_parameter_count_(no_reg),
hint_stack_parameter_count_(-1),
+ continuation_type_(NORMAL_CONTINUATION),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL),
deoptimization_handler_(NULL),
+ handler_arguments_mode_(DONT_PASS_ARGUMENTS),
miss_handler_(),
has_miss_handler_(false) { }
+void CodeStub::GenerateStubsRequiringBuiltinsAheadOfTime(Isolate* isolate) {
+ StubFailureTailCallTrampolineStub::GenerateAheadOfTime(isolate);
+}
+
+
bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
UnseededNumberDictionary* stubs = isolate->heap()->code_stubs();
int index = stubs->FindEntry(GetKey());
@@ -103,9 +110,6 @@ Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
// Update the static counter each time a new code stub is generated.
isolate->counters()->code_stubs()->Increment();
- // Nested stubs are not allowed for leaves.
- AllowStubCallsScope allow_scope(&masm, false);
-
// Generate the code for the stub.
masm.set_generating_stub(true);
NoCurrentFrameScope scope(&masm);
@@ -129,6 +133,11 @@ Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
}
+void CodeStub::VerifyPlatformFeatures(Isolate* isolate) {
+ ASSERT(CpuFeatures::VerifyCrossCompiling());
+}
+
+
Handle<Code> CodeStub::GetCode(Isolate* isolate) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
@@ -136,10 +145,14 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) {
if (UseSpecialCache()
? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code, isolate)) {
- ASSERT(IsPregenerated(isolate) == code->is_pregenerated());
+ ASSERT(GetCodeKind() == code->kind());
return Handle<Code>(code);
}
+#ifdef DEBUG
+ VerifyPlatformFeatures(isolate);
+#endif
+
{
HandleScope scope(isolate);
@@ -150,8 +163,9 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) {
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
- new_object->Disassemble(*GetName());
- PrintF("\n");
+ CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
+ new_object->Disassemble(*GetName(), trace_scope.file());
+ PrintF(trace_scope.file(), "\n");
}
#endif
@@ -183,6 +197,7 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
#define DEF_CASE(name) case name: return #name "Stub";
CODE_STUB_LIST(DEF_CASE)
#undef DEF_CASE
+ case UninitializedMajorKey: return "<UninitializedMajorKey>Stub";
default:
if (!allow_unknown_keys) {
UNREACHABLE();
@@ -203,119 +218,47 @@ void CodeStub::PrintName(StringStream* stream) {
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
- if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
- // The OddballStub handles a number and an oddball, not two oddballs.
- operands_type = BinaryOpIC::GENERIC;
- }
- switch (operands_type) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
+// static
+void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
+ // Generate the uninitialized versions of the stub.
+ for (int op = Token::BIT_OR; op <= Token::MOD; ++op) {
+ for (int mode = NO_OVERWRITE; mode <= OVERWRITE_RIGHT; ++mode) {
+ BinaryOpICStub stub(static_cast<Token::Value>(op),
+ static_cast<OverwriteMode>(mode));
+ stub.GetCode(isolate);
+ }
}
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, CALL_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, CALL_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, CALL_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, CALL_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, CALL_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, CALL_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, CALL_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, CALL_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, CALL_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, CALL_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, CALL_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
+ // Generate special versions of the stub.
+ BinaryOpIC::State::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
}
-#undef __
+void BinaryOpICStub::PrintState(StringStream* stream) {
+ state_.Print(stream);
+}
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s+%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(left_type_),
- BinaryOpIC::GetName(right_type_));
+// static
+void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate,
+ const BinaryOpIC::State& state) {
+ BinaryOpICStub stub(state);
+ stub.GetCode(isolate);
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
- GenerateBothStringStub(masm);
- return;
+void NewStringAddStub::PrintBaseName(StringStream* stream) {
+ stream->Add("NewStringAddStub");
+ if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ stream->Add("_CheckBoth");
+ } else if ((flags() & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ stream->Add("_CheckLeft");
+ } else if ((flags() & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ stream->Add("_CheckRight");
+ }
+ if (pretenure_flag() == TENURED) {
+ stream->Add("_Tenured");
}
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
}
@@ -471,7 +414,6 @@ void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
// Note: Although a no-op transition is semantically OK, it is hinting at a
// bug somewhere in our state transition machinery.
ASSERT(from != to);
- #ifdef DEBUG
if (!FLAG_trace_ic) return;
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
@@ -485,7 +427,6 @@ void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
to.Print(&stream);
stream.Add("]\n");
stream.OutputToStdOut();
- #endif
}
@@ -579,14 +520,15 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
}
-void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
+void KeyedLoadDictionaryElementPlatformStub::Generate(
+ MacroAssembler* masm) {
KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
}
void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
CreateAllocationSiteStub stub;
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
}
@@ -725,8 +667,14 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub stub1(NOT_JS_FUNCTION_STUB_MODE);
StubFailureTrampolineStub stub2(JS_FUNCTION_STUB_MODE);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
+ stub2.GetCode(isolate);
+}
+
+
+void StubFailureTailCallTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
+ StubFailureTailCallTrampolineStub stub;
+ stub.GetCode(isolate);
}
@@ -759,12 +707,32 @@ void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
}
+void NumberToStringStub::InstallDescriptors(Isolate* isolate) {
+ NumberToStringStub stub;
+ InstallDescriptor(isolate, &stub);
+}
+
+
void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
FastNewClosureStub stub(STRICT_MODE, false);
InstallDescriptor(isolate, &stub);
}
+// static
+void BinaryOpICStub::InstallDescriptors(Isolate* isolate) {
+ BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
+ InstallDescriptor(isolate, &stub);
+}
+
+
+// static
+void NewStringAddStub::InstallDescriptors(Isolate* isolate) {
+ NewStringAddStub stub(STRING_ADD_CHECK_NONE, NOT_TENURED);
+ InstallDescriptor(isolate, &stub);
+}
+
+
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
: argument_count_(ANY) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
diff --git a/chromium/v8/src/code-stubs.h b/chromium/v8/src/code-stubs.h
index 946eb76962d..bedf295d2f9 100644
--- a/chromium/v8/src/code-stubs.h
+++ b/chromium/v8/src/code-stubs.h
@@ -30,8 +30,9 @@
#include "allocation.h"
#include "assembler.h"
-#include "globals.h"
#include "codegen.h"
+#include "globals.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -40,8 +41,9 @@ namespace internal {
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
V(CallFunction) \
V(CallConstruct) \
- V(BinaryOp) \
+ V(BinaryOpIC) \
V(StringAdd) \
+ V(NewStringAdd) \
V(SubString) \
V(StringCompare) \
V(Compare) \
@@ -88,13 +90,15 @@ namespace internal {
V(TransitionElementsKind) \
V(StoreArrayLiteralElement) \
V(StubFailureTrampoline) \
+ V(StubFailureTailCallTrampoline) \
V(ArrayConstructor) \
V(InternalArrayConstructor) \
V(ProfileEntryHook) \
V(StoreGlobal) \
/* IC Handler stubs */ \
V(LoadField) \
- V(KeyedLoadField)
+ V(KeyedLoadField) \
+ V(KeyedArrayCall)
// List of code stubs only used on ARM platforms.
#if V8_TARGET_ARCH_ARM
@@ -122,13 +126,11 @@ namespace internal {
CODE_STUB_LIST_ARM(V) \
CODE_STUB_LIST_MIPS(V)
-// Mode to overwrite BinaryExpression values.
-enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-
// Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED {
public:
enum Major {
+ UninitializedMajorKey = 0,
#define DEF_ENUM(name) name,
CODE_STUB_LIST(DEF_ENUM)
#undef DEF_ENUM
@@ -157,17 +159,8 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
- bool CompilingCallsToThisStubIsGCSafe(Isolate* isolate) {
- bool is_pregenerated = IsPregenerated(isolate);
- Code* code = NULL;
- CHECK(!is_pregenerated || FindCodeInCache(&code, isolate));
- return is_pregenerated;
- }
-
- // See comment above, where Instanceof is defined.
- virtual bool IsPregenerated(Isolate* isolate) { return false; }
-
static void GenerateStubsAheadOfTime(Isolate* isolate);
+ static void GenerateStubsRequiringBuiltinsAheadOfTime(Isolate* isolate);
static void GenerateFPStubs(Isolate* isolate);
// Some stubs put untagged junk on the stack that cannot be scanned by the
@@ -188,8 +181,8 @@ class CodeStub BASE_EMBEDDED {
virtual InlineCacheState GetICState() {
return UNINITIALIZED;
}
- virtual Code::ExtraICState GetExtraICState() {
- return Code::kNoExtraICState;
+ virtual ExtraICState GetExtraICState() {
+ return kNoExtraICState;
}
virtual Code::StubType GetStubType() {
return Code::NORMAL;
@@ -200,19 +193,21 @@ class CodeStub BASE_EMBEDDED {
virtual void PrintName(StringStream* stream);
+ // Returns a name for logging/debugging purposes.
+ SmartArrayPointer<const char> GetName();
+
protected:
static bool CanUseFPRegisters();
// Generates the assembler code for the stub.
virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
+ virtual void VerifyPlatformFeatures(Isolate* isolate);
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
- // Returns a name for logging/debugging purposes.
- SmartArrayPointer<const char> GetName();
virtual void PrintBaseName(StringStream* stream);
virtual void PrintState(StringStream* stream) { }
@@ -251,6 +246,7 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey());
}
+ STATIC_ASSERT(NUMBER_OF_IDS < (1 << kStubMajorKeyBits));
class MajorKeyBits: public BitField<uint32_t, 0, kStubMajorKeyBits> {};
class MinorKeyBits: public BitField<uint32_t,
kStubMajorKeyBits, kStubMinorKeyBits> {}; // NOLINT
@@ -273,31 +269,42 @@ class PlatformCodeStub : public CodeStub {
enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
+enum HandlerArgumentsMode { DONT_PASS_ARGUMENTS, PASS_ARGUMENTS };
+
+enum ContinuationType { NORMAL_CONTINUATION, TAIL_CALL_CONTINUATION };
struct CodeStubInterfaceDescriptor {
CodeStubInterfaceDescriptor();
int register_param_count_;
- const Register* stack_parameter_count_;
+
+ Register stack_parameter_count_;
// if hint_stack_parameter_count_ > 0, the code stub can optimize the
// return sequence. Default value is -1, which means it is ignored.
int hint_stack_parameter_count_;
+ ContinuationType continuation_type_;
StubFunctionMode function_mode_;
Register* register_params_;
+
Address deoptimization_handler_;
+ HandlerArgumentsMode handler_arguments_mode_;
+
+ bool initialized() const { return register_param_count_ >= 0; }
+
+ bool HasTailCallContinuation() const {
+ return continuation_type_ == TAIL_CALL_CONTINUATION;
+ }
int environment_length() const {
- if (stack_parameter_count_ != NULL) {
- return register_param_count_ + 1;
- }
return register_param_count_;
}
- bool initialized() const { return register_param_count_ >= 0; }
-
void SetMissHandler(ExternalReference handler) {
miss_handler_ = handler;
has_miss_handler_ = true;
+ // Our miss handler infrastructure doesn't currently support
+ // variable stack parameter counts.
+ ASSERT(!stack_parameter_count_.is_valid());
}
ExternalReference miss_handler() {
@@ -309,18 +316,27 @@ struct CodeStubInterfaceDescriptor {
return has_miss_handler_;
}
+ Register GetParameterRegister(int index) {
+ return register_params_[index];
+ }
+
+ bool IsParameterCountRegister(int index) {
+ return GetParameterRegister(index).is(stack_parameter_count_);
+ }
+
+ int GetHandlerParameterCount() {
+ int params = environment_length();
+ if (handler_arguments_mode_ == PASS_ARGUMENTS) {
+ params += 1;
+ }
+ return params;
+ }
+
private:
ExternalReference miss_handler_;
bool has_miss_handler_;
};
-// A helper to make up for the fact that type Register is not fully
-// defined outside of the platform directories
-#define DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index) \
- ((index) == (descriptor)->register_param_count_) \
- ? *((descriptor)->stack_parameter_count_) \
- : (descriptor)->register_params_[(index)]
-
class HydrogenCodeStub : public CodeStub {
public:
@@ -402,9 +418,7 @@ enum StringAddFlags {
// Check right parameter.
STRING_ADD_CHECK_RIGHT = 1 << 1,
// Check both parameters.
- STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT,
- // Stub needs a frame before calling the runtime
- STRING_ADD_ERECT_FRAME = 1 << 2
+ STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT
};
} } // namespace v8::internal
@@ -464,6 +478,27 @@ class ToNumberStub: public HydrogenCodeStub {
};
+class NumberToStringStub V8_FINAL : public HydrogenCodeStub {
+ public:
+ NumberToStringStub() {}
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kNumber = 0;
+
+ private:
+ virtual Major MajorKey() V8_OVERRIDE { return NumberToString; }
+ virtual int NotMissMinorKey() V8_OVERRIDE { return 0; }
+};
+
+
class FastNewClosureStub : public HydrogenCodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
@@ -531,51 +566,6 @@ class FastNewBlockContextStub : public PlatformCodeStub {
int MinorKey() { return slots_; }
};
-class StoreGlobalStub : public HydrogenCodeStub {
- public:
- StoreGlobalStub(StrictModeFlag strict_mode, bool is_constant) {
- bit_field_ = StrictModeBits::encode(strict_mode) |
- IsConstantBits::encode(is_constant);
- }
-
- virtual Handle<Code> GenerateCode(Isolate* isolate);
-
- virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
-
- virtual Code::Kind GetCodeKind() const { return Code::STORE_IC; }
- virtual InlineCacheState GetICState() { return MONOMORPHIC; }
- virtual Code::ExtraICState GetExtraICState() { return bit_field_; }
-
- bool is_constant() {
- return IsConstantBits::decode(bit_field_);
- }
- void set_is_constant(bool value) {
- bit_field_ = IsConstantBits::update(bit_field_, value);
- }
-
- Representation representation() {
- return Representation::FromKind(RepresentationBits::decode(bit_field_));
- }
- void set_representation(Representation r) {
- bit_field_ = RepresentationBits::update(bit_field_, r.kind());
- }
-
- private:
- virtual int NotMissMinorKey() { return GetExtraICState(); }
- Major MajorKey() { return StoreGlobal; }
-
- class StrictModeBits: public BitField<StrictModeFlag, 0, 1> {};
- class IsConstantBits: public BitField<bool, 1, 1> {};
- class RepresentationBits: public BitField<Representation::Kind, 2, 8> {};
-
- int bit_field_;
-
- DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
-};
-
-
class FastCloneShallowArrayStub : public HydrogenCodeStub {
public:
// Maximum length of copied elements array.
@@ -682,8 +672,6 @@ class CreateAllocationSiteStub : public HydrogenCodeStub {
virtual Handle<Code> GenerateCode(Isolate* isolate);
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
-
static void GenerateAheadOfTime(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
@@ -830,19 +818,12 @@ class FunctionPrototypeStub: public ICStub {
class StringLengthStub: public ICStub {
public:
- StringLengthStub(Code::Kind kind, bool support_wrapper)
- : ICStub(kind), support_wrapper_(support_wrapper) { }
+ explicit StringLengthStub(Code::Kind kind) : ICStub(kind) { }
virtual void Generate(MacroAssembler* masm);
private:
STATIC_ASSERT(KindBits::kSize == 4);
- class WrapperModeBits: public BitField<bool, 4, 1> {};
- virtual CodeStub::Major MajorKey() { return StringLength; }
- virtual int MinorKey() {
- return KindBits::encode(kind()) | WrapperModeBits::encode(support_wrapper_);
- }
-
- bool support_wrapper_;
+ virtual CodeStub::Major MajorKey() { return StringLength; }
};
@@ -852,8 +833,8 @@ class StoreICStub: public ICStub {
: ICStub(kind), strict_mode_(strict_mode) { }
protected:
- virtual Code::ExtraICState GetExtraICState() {
- return strict_mode_;
+ virtual ExtraICState GetExtraICState() {
+ return StoreIC::ComputeExtraICState(strict_mode_);
}
private:
@@ -884,7 +865,6 @@ class HICStub: public HydrogenCodeStub {
virtual InlineCacheState GetICState() { return MONOMORPHIC; }
protected:
- HICStub() { }
class KindBits: public BitField<Code::Kind, 0, 4> {};
virtual Code::Kind kind() const = 0;
};
@@ -892,18 +872,19 @@ class HICStub: public HydrogenCodeStub {
class HandlerStub: public HICStub {
public:
- virtual Code::Kind GetCodeKind() const { return Code::STUB; }
+ virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
virtual int GetStubFlags() { return kind(); }
protected:
HandlerStub() : HICStub() { }
+ virtual int NotMissMinorKey() { return bit_field_; }
+ int bit_field_;
};
class LoadFieldStub: public HandlerStub {
public:
- LoadFieldStub(bool inobject, int index, Representation representation)
- : HandlerStub() {
+ LoadFieldStub(bool inobject, int index, Representation representation) {
Initialize(Code::LOAD_IC, inobject, index, representation);
}
@@ -937,7 +918,7 @@ class LoadFieldStub: public HandlerStub {
return UnboxedDoubleBits::decode(bit_field_);
}
- virtual Code::StubType GetStubType() { return Code::FIELD; }
+ virtual Code::StubType GetStubType() { return Code::FAST; }
protected:
LoadFieldStub() : HandlerStub() { }
@@ -959,9 +940,61 @@ class LoadFieldStub: public HandlerStub {
class IndexBits: public BitField<int, 5, 11> {};
class UnboxedDoubleBits: public BitField<bool, 16, 1> {};
virtual CodeStub::Major MajorKey() { return LoadField; }
- virtual int NotMissMinorKey() { return bit_field_; }
+};
+
+
+class StoreGlobalStub : public HandlerStub {
+ public:
+ explicit StoreGlobalStub(bool is_constant) {
+ bit_field_ = IsConstantBits::encode(is_constant);
+ }
+
+ Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate,
+ Map* receiver_map,
+ PropertyCell* cell) {
+ Handle<Code> code = CodeStub::GetCodeCopyFromTemplate(isolate);
+ // Replace the placeholder cell and global object map with the actual global
+ // cell and receiver map.
+ Map* cell_map = isolate->heap()->global_property_cell_map();
+ code->ReplaceNthObject(1, cell_map, cell);
+ code->ReplaceNthObject(1, isolate->heap()->meta_map(), receiver_map);
+ return code;
+ }
+
+ virtual Code::Kind kind() const { return Code::STORE_IC; }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ virtual ExtraICState GetExtraICState() { return bit_field_; }
+
+ bool is_constant() {
+ return IsConstantBits::decode(bit_field_);
+ }
+ void set_is_constant(bool value) {
+ bit_field_ = IsConstantBits::update(bit_field_, value);
+ }
+
+ Representation representation() {
+ return Representation::FromKind(RepresentationBits::decode(bit_field_));
+ }
+ void set_representation(Representation r) {
+ bit_field_ = RepresentationBits::update(bit_field_, r.kind());
+ }
+
+ private:
+ virtual int NotMissMinorKey() { return GetExtraICState(); }
+ Major MajorKey() { return StoreGlobal; }
+
+ class IsConstantBits: public BitField<bool, 0, 1> {};
+ class RepresentationBits: public BitField<Representation::Kind, 1, 8> {};
int bit_field_;
+
+ DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
};
@@ -983,156 +1016,141 @@ class KeyedLoadFieldStub: public LoadFieldStub {
};
-class BinaryOpStub: public PlatformCodeStub {
+class KeyedArrayCallStub: public HICStub {
public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- platform_specific_bit_(false),
- left_type_(BinaryOpIC::UNINITIALIZED),
- right_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED),
- encoded_right_arg_(false, encode_arg_value(1)) {
- Initialize();
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- BinaryOpIC::TypeInfo result_type,
- Maybe<int32_t> fixed_right_arg)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- platform_specific_bit_(PlatformSpecificBits::decode(key)),
- left_type_(left_type),
- right_type_(right_type),
- result_type_(result_type),
- encoded_right_arg_(fixed_right_arg.has_value,
- encode_arg_value(fixed_right_arg.value)) { }
-
- static void decode_types_from_minor_key(int minor_key,
- BinaryOpIC::TypeInfo* left_type,
- BinaryOpIC::TypeInfo* right_type,
- BinaryOpIC::TypeInfo* result_type) {
- *left_type =
- static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
- *right_type =
- static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
- *result_type =
- static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
- }
-
- static Token::Value decode_op_from_minor_key(int minor_key) {
- return static_cast<Token::Value>(OpBits::decode(minor_key));
- }
-
- static Maybe<int> decode_fixed_right_arg_from_minor_key(int minor_key) {
- return Maybe<int>(
- HasFixedRightArgBits::decode(minor_key),
- decode_arg_value(FixedRightArgValueBits::decode(minor_key)));
- }
-
- int fixed_right_arg_value() const {
- return decode_arg_value(encoded_right_arg_.value);
- }
-
- static bool can_encode_arg_value(int32_t value) {
- return value > 0 &&
- IsPowerOf2(value) &&
- FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
- }
-
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
+ KeyedArrayCallStub(bool holey, int argc) : HICStub(), argc_(argc) {
+ bit_field_ = ContextualBits::encode(false) | HoleyBits::encode(holey);
+ }
+
+ virtual Code::Kind kind() const { return Code::KEYED_CALL_IC; }
+ virtual ExtraICState GetExtraICState() { return bit_field_; }
+
+ ElementsKind elements_kind() {
+ return HoleyBits::decode(bit_field_) ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+ }
+
+ int argc() { return argc_; }
+ virtual int GetStubFlags() { return argc(); }
+
+ static bool IsHoley(Handle<Code> code) {
+ ExtraICState state = code->extra_ic_state();
+ return HoleyBits::decode(state);
+ }
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
private:
- Token::Value op_;
- OverwriteMode mode_;
- bool platform_specific_bit_; // Indicates SSE3 on IA32.
+ virtual int NotMissMinorKey() {
+ return GetExtraICState() | ArgcBits::encode(argc_);
+ }
+
+ class ContextualBits: public BitField<bool, 0, 1> {};
+ STATIC_ASSERT(CallICBase::Contextual::kShift == ContextualBits::kShift);
+ STATIC_ASSERT(CallICBase::Contextual::kSize == ContextualBits::kSize);
+ class HoleyBits: public BitField<bool, 1, 1> {};
+ STATIC_ASSERT(Code::kArgumentsBits <= kStubMinorKeyBits - 2);
+ class ArgcBits: public BitField<int, 2, Code::kArgumentsBits> {};
+ virtual CodeStub::Major MajorKey() { return KeyedArrayCall; }
+ int bit_field_;
+ int argc_;
+};
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo left_type_;
- BinaryOpIC::TypeInfo right_type_;
- BinaryOpIC::TypeInfo result_type_;
- Maybe<int> encoded_right_arg_;
+class BinaryOpICStub V8_FINAL : public HydrogenCodeStub {
+ public:
+ BinaryOpICStub(Token::Value op, OverwriteMode mode)
+ : HydrogenCodeStub(UNINITIALIZED), state_(op, mode) {}
+
+ explicit BinaryOpICStub(const BinaryOpIC::State& state) : state_(state) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
- static int encode_arg_value(int32_t value) {
- ASSERT(can_encode_arg_value(value));
- return WhichPowerOf2(value);
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
+ return Code::BINARY_OP_IC;
}
- static int32_t decode_arg_value(int value) {
- return 1 << value;
+ virtual InlineCacheState GetICState() V8_OVERRIDE {
+ return state_.GetICState();
}
- virtual void PrintName(StringStream* stream);
+ virtual ExtraICState GetExtraICState() V8_OVERRIDE {
+ return state_.GetExtraICState();
+ }
- // Minor key encoding in all 25 bits FFFFFHTTTRRRLLLPOOOOOOOMM.
- // Note: We actually do not need 7 bits for the operation, just 4 bits to
- // encode ADD, SUB, MUL, DIV, MOD, BIT_OR, BIT_AND, BIT_XOR, SAR, SHL, SHR.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class PlatformSpecificBits: public BitField<bool, 9, 1> {};
- class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
- class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
- class HasFixedRightArgBits: public BitField<bool, 19, 1> {};
- class FixedRightArgValueBits: public BitField<int, 20, 5> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | PlatformSpecificBits::encode(platform_specific_bit_)
- | LeftTypeBits::encode(left_type_)
- | RightTypeBits::encode(right_type_)
- | ResultTypeBits::encode(result_type_)
- | HasFixedRightArgBits::encode(encoded_right_arg_.has_value)
- | FixedRightArgValueBits::encode(encoded_right_arg_.value);
+ virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
+ ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
}
+ virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
- // Platform-independent implementation.
- void Generate(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
+ const BinaryOpIC::State& state() const { return state_; }
- // Platform-independent signature, platform-specific implementation.
- void Initialize();
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
-
- // Entirely platform-specific methods are defined as static helper
- // functions in the <arch>/code-stubs-<arch>.cc files.
-
- virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; }
+ virtual void PrintState(StringStream* stream) V8_OVERRIDE;
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(Max(left_type_, right_type_));
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kLeft = 0;
+ static const int kRight = 1;
+
+ private:
+ static void GenerateAheadOfTime(Isolate* isolate,
+ const BinaryOpIC::State& state);
+
+ virtual Major MajorKey() V8_OVERRIDE { return BinaryOpIC; }
+ virtual int NotMissMinorKey() V8_OVERRIDE { return GetExtraICState(); }
+
+ BinaryOpIC::State state_;
+
+ DISALLOW_COPY_AND_ASSIGN(BinaryOpICStub);
+};
+
+
+// TODO(bmeurer): Rename to StringAddStub once we dropped the old StringAddStub.
+class NewStringAddStub V8_FINAL : public HydrogenCodeStub {
+ public:
+ NewStringAddStub(StringAddFlags flags, PretenureFlag pretenure_flag)
+ : bit_field_(StringAddFlagsBits::encode(flags) |
+ PretenureFlagBits::encode(pretenure_flag)) {}
+
+ StringAddFlags flags() const {
+ return StringAddFlagsBits::decode(bit_field_);
}
- virtual void FinishCode(Handle<Code> code) {
- code->set_stub_info(MinorKey());
+ PretenureFlag pretenure_flag() const {
+ return PretenureFlagBits::decode(bit_field_);
}
- friend class CodeGenerator;
+ virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kLeft = 0;
+ static const int kRight = 1;
+
+ private:
+ class StringAddFlagsBits: public BitField<StringAddFlags, 0, 2> {};
+ class PretenureFlagBits: public BitField<PretenureFlag, 2, 1> {};
+ uint32_t bit_field_;
+
+ virtual Major MajorKey() V8_OVERRIDE { return NewStringAdd; }
+ virtual int NotMissMinorKey() V8_OVERRIDE { return bit_field_; }
+
+ virtual void PrintBaseName(StringStream* stream) V8_OVERRIDE;
+
+ DISALLOW_COPY_AND_ASSIGN(NewStringAddStub);
};
@@ -1212,7 +1230,7 @@ class CompareNilICStub : public HydrogenCodeStub {
explicit CompareNilICStub(NilValue nil) : nil_value_(nil) { }
- CompareNilICStub(Code::ExtraICState ic_state,
+ CompareNilICStub(ExtraICState ic_state,
InitializationState init_state = INITIALIZED)
: HydrogenCodeStub(init_state),
nil_value_(NilValueField::decode(ic_state)),
@@ -1249,7 +1267,7 @@ class CompareNilICStub : public HydrogenCodeStub {
virtual Handle<Code> GenerateCode(Isolate* isolate);
- virtual Code::ExtraICState GetExtraICState() {
+ virtual ExtraICState GetExtraICState() {
return NilValueField::encode(nil_value_) |
TypesField::encode(state_.ToIntegral());
}
@@ -1315,9 +1333,13 @@ class CEntryStub : public PlatformCodeStub {
// time, so it's OK to call it from other stubs that can't cope with GC during
// their code generation. On machines that always have gp registers (x64) we
// can generate both variants ahead of time.
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateAheadOfTime(Isolate* isolate);
+ protected:
+ virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
+ ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
+ };
+
private:
void GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
@@ -1662,27 +1684,27 @@ class StringCharAtGenerator {
};
-class AllowStubCallsScope {
+class KeyedLoadDictionaryElementStub : public HydrogenCodeStub {
public:
- AllowStubCallsScope(MacroAssembler* masm, bool allow)
- : masm_(masm), previous_allow_(masm->allow_stub_calls()) {
- masm_->set_allow_stub_calls(allow);
- }
- ~AllowStubCallsScope() {
- masm_->set_allow_stub_calls(previous_allow_);
- }
+ KeyedLoadDictionaryElementStub() {}
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
private:
- MacroAssembler* masm_;
- bool previous_allow_;
+ Major MajorKey() { return KeyedLoadElement; }
+ int NotMissMinorKey() { return DICTIONARY_ELEMENTS; }
- DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope);
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
};
-class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
+class KeyedLoadDictionaryElementPlatformStub : public PlatformCodeStub {
public:
- KeyedLoadDictionaryElementStub() {}
+ KeyedLoadDictionaryElementPlatformStub() {}
void Generate(MacroAssembler* masm);
@@ -1690,7 +1712,7 @@ class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
Major MajorKey() { return KeyedLoadElement; }
int MinorKey() { return DICTIONARY_ELEMENTS; }
- DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementPlatformStub);
};
@@ -1705,7 +1727,9 @@ class DoubleToIStub : public PlatformCodeStub {
DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
- SkipFastPathBits::encode(skip_fastpath);
+ SkipFastPathBits::encode(skip_fastpath) |
+ SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
+ CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
}
Register source() {
@@ -1734,6 +1758,11 @@ class DoubleToIStub : public PlatformCodeStub {
virtual bool SometimesSetsUpAFrame() { return false; }
+ protected:
+ virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE {
+ ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2));
+ }
+
private:
static const int kBitsPerRegisterNumber = 6;
STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
@@ -1748,6 +1777,8 @@ class DoubleToIStub : public PlatformCodeStub {
public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
class SkipFastPathBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT
+ class SSEBits:
+ public BitField<int, 2 * kBitsPerRegisterNumber + 5, 2> {}; // NOLINT
Major MajorKey() { return DoubleToI; }
int MinorKey() { return bit_field_; }
@@ -1900,11 +1931,6 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
return ContextCheckModeBits::decode(bit_field_);
}
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE {
- // We only pre-generate stubs that verify correct context
- return context_mode() == CONTEXT_CHECK_REQUIRED;
- }
-
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void InstallDescriptors(Isolate* isolate);
@@ -2001,7 +2027,6 @@ class InternalArrayConstructorStubBase : public HydrogenCodeStub {
kind_ = kind;
}
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void InstallDescriptors(Isolate* isolate);
@@ -2146,7 +2171,7 @@ class ToBooleanStub: public HydrogenCodeStub {
explicit ToBooleanStub(Types types = Types())
: types_(types) { }
- explicit ToBooleanStub(Code::ExtraICState state)
+ explicit ToBooleanStub(ExtraICState state)
: types_(static_cast<byte>(state)) { }
bool UpdateStatus(Handle<Object> object);
@@ -2173,7 +2198,7 @@ class ToBooleanStub: public HydrogenCodeStub {
return ToBooleanStub(UNINITIALIZED).GetCode(isolate);
}
- virtual Code::ExtraICState GetExtraICState() {
+ virtual ExtraICState GetExtraICState() {
return types_.ToIntegral();
}
@@ -2265,8 +2290,6 @@ class StubFailureTrampolineStub : public PlatformCodeStub {
explicit StubFailureTrampolineStub(StubFunctionMode function_mode)
: fp_registers_(CanUseFPRegisters()), function_mode_(function_mode) {}
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
-
static void GenerateAheadOfTime(Isolate* isolate);
private:
@@ -2288,6 +2311,25 @@ class StubFailureTrampolineStub : public PlatformCodeStub {
};
+class StubFailureTailCallTrampolineStub : public PlatformCodeStub {
+ public:
+ StubFailureTailCallTrampolineStub() : fp_registers_(CanUseFPRegisters()) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+ class FPRegisters: public BitField<bool, 0, 1> {};
+ Major MajorKey() { return StubFailureTailCallTrampoline; }
+ int MinorKey() { return FPRegisters::encode(fp_registers_); }
+
+ void Generate(MacroAssembler* masm);
+
+ bool fp_registers_;
+
+ DISALLOW_COPY_AND_ASSIGN(StubFailureTailCallTrampolineStub);
+};
+
+
class ProfileEntryHookStub : public PlatformCodeStub {
public:
explicit ProfileEntryHookStub() {}
diff --git a/chromium/v8/src/codegen.cc b/chromium/v8/src/codegen.cc
index d33c7f06bd4..28f7d6c099e 100644
--- a/chromium/v8/src/codegen.cc
+++ b/chromium/v8/src/codegen.cc
@@ -113,10 +113,12 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
masm->GetCode(&desc);
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm->CodeObject(),
- false, is_crankshafted);
+ false, is_crankshafted,
+ info->prologue_offset());
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
- code->set_prologue_offset(info->prologue_offset());
+ isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted,
+ code->instruction_size());
return code;
}
@@ -132,10 +134,14 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
if (print_code) {
// Print the source code if available.
FunctionLiteral* function = info->function();
- if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION ||
+ code->kind() == Code::FUNCTION;
+
+ CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+ if (print_source) {
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
+ PrintF(tracing_scope.file(), "--- Raw source ---\n");
ConsStringIteratorOp op;
StringCharacterStream stream(String::cast(script->source()),
&op,
@@ -145,27 +151,36 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
int source_len =
function->end_position() - function->start_position() + 1;
for (int i = 0; i < source_len; i++) {
- if (stream.HasMore()) PrintF("%c", stream.GetNext());
+ if (stream.HasMore()) {
+ PrintF(tracing_scope.file(), "%c", stream.GetNext());
+ }
}
- PrintF("\n\n");
+ PrintF(tracing_scope.file(), "\n\n");
}
}
if (info->IsOptimizing()) {
if (FLAG_print_unopt_code) {
- PrintF("--- Unoptimized code ---\n");
+ PrintF(tracing_scope.file(), "--- Unoptimized code ---\n");
info->closure()->shared()->code()->Disassemble(
- *function->debug_name()->ToCString());
+ *function->debug_name()->ToCString(), tracing_scope.file());
}
- PrintF("--- Optimized code ---\n");
+ PrintF(tracing_scope.file(), "--- Optimized code ---\n");
} else {
- PrintF("--- Code ---\n");
+ PrintF(tracing_scope.file(), "--- Code ---\n");
+ }
+ if (print_source) {
+ PrintF(tracing_scope.file(),
+ "source_position = %d\n", function->start_position());
}
if (info->IsStub()) {
CodeStub::Major major_key = info->code_stub()->MajorKey();
- code->Disassemble(CodeStub::MajorName(major_key, false));
+ code->Disassemble(CodeStub::MajorName(major_key, false),
+ tracing_scope.file());
} else {
- code->Disassemble(*function->debug_name()->ToCString());
+ code->Disassemble(*function->debug_name()->ToCString(),
+ tracing_scope.file());
}
+ PrintF(tracing_scope.file(), "--- End code ---\n");
}
#endif // ENABLE_DISASSEMBLER
}
diff --git a/chromium/v8/src/codegen.h b/chromium/v8/src/codegen.h
index ea202969162..33672a2b341 100644
--- a/chromium/v8/src/codegen.h
+++ b/chromium/v8/src/codegen.h
@@ -112,6 +112,8 @@ class ElementsTransitionGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
};
+static const int kNumberDictionaryProbes = 4;
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/compiler.cc b/chromium/v8/src/compiler.cc
index 1aa3776bde9..83f9ab2daff 100644
--- a/chromium/v8/src/compiler.cc
+++ b/chromium/v8/src/compiler.cc
@@ -59,7 +59,8 @@ CompilationInfo::CompilationInfo(Handle<Script> script,
: flags_(LanguageModeField::encode(CLASSIC_MODE)),
script_(script),
osr_ast_id_(BailoutId::None()),
- osr_pc_offset_(0) {
+ osr_pc_offset_(0),
+ parameter_count_(0) {
Initialize(script->GetIsolate(), BASE, zone);
}
@@ -70,7 +71,8 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
osr_ast_id_(BailoutId::None()),
- osr_pc_offset_(0) {
+ osr_pc_offset_(0),
+ parameter_count_(0) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -83,7 +85,8 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
script_(Handle<Script>(Script::cast(shared_info_->script()))),
context_(closure->context()),
osr_ast_id_(BailoutId::None()),
- osr_pc_offset_(0) {
+ osr_pc_offset_(0),
+ parameter_count_(0) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -94,7 +97,8 @@ CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
: flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
osr_ast_id_(BailoutId::None()),
- osr_pc_offset_(0) {
+ osr_pc_offset_(0),
+ parameter_count_(0) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
@@ -112,7 +116,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
zone_ = zone;
deferred_handles_ = NULL;
code_stub_ = NULL;
- prologue_offset_ = kPrologueOffsetNotSet;
+ prologue_offset_ = Code::kPrologueOffsetNotSet;
opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
? new List<OffsetRange>(2) : NULL;
@@ -123,7 +127,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
mode_ = STUB;
return;
}
- mode_ = isolate->use_crankshaft() ? mode : NONOPT;
+ mode_ = mode;
abort_due_to_dependency_ = false;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
@@ -184,8 +188,12 @@ void CompilationInfo::RollbackDependencies() {
int CompilationInfo::num_parameters() const {
- ASSERT(!IsStub());
- return scope()->num_parameters();
+ if (IsStub()) {
+ ASSERT(parameter_count_ > 0);
+ return parameter_count_;
+ } else {
+ return scope()->num_parameters();
+ }
}
@@ -260,7 +268,7 @@ static bool AlwaysFullCompiler(Isolate* isolate) {
}
-void OptimizingCompiler::RecordOptimizationStats() {
+void RecompileJob::RecordOptimizationStats() {
Handle<JSFunction> function = info()->closure();
if (!function->IsOptimized()) {
// Concurrent recompilation and OSR may race. Increment only once.
@@ -300,23 +308,60 @@ void OptimizingCompiler::RecordOptimizationStats() {
// A return value of true indicates the compilation pipeline is still
// going, not necessarily that we optimized the code.
static bool MakeCrankshaftCode(CompilationInfo* info) {
- OptimizingCompiler compiler(info);
- OptimizingCompiler::Status status = compiler.CreateGraph();
+ RecompileJob job(info);
+ RecompileJob::Status status = job.CreateGraph();
- if (status != OptimizingCompiler::SUCCEEDED) {
- return status != OptimizingCompiler::FAILED;
+ if (status != RecompileJob::SUCCEEDED) {
+ return status != RecompileJob::FAILED;
}
- status = compiler.OptimizeGraph();
- if (status != OptimizingCompiler::SUCCEEDED) {
- status = compiler.AbortOptimization();
- return status != OptimizingCompiler::FAILED;
+ status = job.OptimizeGraph();
+ if (status != RecompileJob::SUCCEEDED) {
+ status = job.AbortOptimization();
+ return status != RecompileJob::FAILED;
}
- status = compiler.GenerateAndInstallCode();
- return status != OptimizingCompiler::FAILED;
+ status = job.GenerateAndInstallCode();
+ return status != RecompileJob::FAILED;
}
-OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
+class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
+ public:
+ explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
+ : HOptimizedGraphBuilder(info) {
+ }
+
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node) V8_OVERRIDE { \
+ if (node->position() != RelocInfo::kNoPosition) { \
+ SetSourcePosition(node->position()); \
+ } \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ }
+ EXPRESSION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node) V8_OVERRIDE { \
+ if (node->position() != RelocInfo::kNoPosition) { \
+ SetSourcePosition(node->position()); \
+ } \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ }
+ STATEMENT_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+#define DEF_VISIT(type) \
+ virtual void Visit##type(type* node) V8_OVERRIDE { \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ }
+ MODULE_NODE_LIST(DEF_VISIT)
+ DECLARATION_NODE_LIST(DEF_VISIT)
+ AUXILIARY_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+};
+
+
+RecompileJob::Status RecompileJob::CreateGraph() {
ASSERT(isolate()->use_crankshaft());
ASSERT(info()->IsOptimizing());
ASSERT(!info()->IsCompilingForDebugging());
@@ -422,7 +467,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// Type-check the function.
AstTyper::Run(info());
- graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info());
+ graph_builder_ = FLAG_emit_opt_code_positions
+ ? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info())
+ : new(info()->zone()) HOptimizedGraphBuilder(info());
Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph();
@@ -455,7 +502,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}
-OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
+RecompileJob::Status RecompileJob::OptimizeGraph() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
@@ -466,7 +513,7 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
ASSERT(graph_ != NULL);
BailoutReason bailout_reason = kNoReason;
if (!graph_->Optimize(&bailout_reason)) {
- if (bailout_reason == kNoReason) graph_builder_->Bailout(bailout_reason);
+ if (bailout_reason != kNoReason) graph_builder_->Bailout(bailout_reason);
return SetLastStatus(BAILED_OUT);
} else {
chunk_ = LChunk::NewChunk(graph_);
@@ -478,7 +525,7 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
}
-OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
+RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
ASSERT(!info()->HasAbortedDueToDependencyChange());
DisallowCodeDependencyChange no_dependency_change;
@@ -558,6 +605,33 @@ static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
}
+// Sets the expected number of properties based on estimate from compiler.
+void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
+ int estimate) {
+ // See the comment in SetExpectedNofProperties.
+ if (shared->live_objects_may_exist()) return;
+
+ // If no properties are added in the constructor, they are more likely
+ // to be added later.
+ if (estimate == 0) estimate = 2;
+
+ // TODO(yangguo): check whether those heuristics are still up-to-date.
+ // We do not shrink objects that go into a snapshot (yet), so we adjust
+ // the estimate conservatively.
+ if (Serializer::enabled()) {
+ estimate += 2;
+ } else if (FLAG_clever_optimizations) {
+ // Inobject slack tracking will reclaim redundant inobject space later,
+ // so we can afford to adjust the estimate generously.
+ estimate += 8;
+ } else {
+ estimate += 3;
+ }
+
+ shared->set_expected_nof_properties(estimate);
+}
+
+
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate();
PostponeInterruptsScope postpone(isolate);
@@ -602,66 +676,70 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
}
}
- // Measure how long it takes to do the compilation; only take the
- // rest of the function into account to avoid overlap with the
- // parsing statistics.
- HistogramTimer* rate = info->is_eval()
- ? info->isolate()->counters()->compile_eval()
- : info->isolate()->counters()->compile();
- HistogramTimerScope timer(rate);
-
- // Compile the code.
FunctionLiteral* lit = info->function();
LiveEditFunctionTracker live_edit_tracker(isolate, lit);
- if (!MakeCode(info)) {
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
- return Handle<SharedFunctionInfo>::null();
- }
+ Handle<SharedFunctionInfo> result;
+ {
+ // Measure how long it takes to do the compilation; only take the
+ // rest of the function into account to avoid overlap with the
+ // parsing statistics.
+ HistogramTimer* rate = info->is_eval()
+ ? info->isolate()->counters()->compile_eval()
+ : info->isolate()->counters()->compile();
+ HistogramTimerScope timer(rate);
- // Allocate function.
- ASSERT(!info->code().is_null());
- Handle<SharedFunctionInfo> result =
- isolate->factory()->NewSharedFunctionInfo(
- lit->name(),
- lit->materialized_literal_count(),
- lit->is_generator(),
- info->code(),
- ScopeInfo::Create(info->scope(), info->zone()));
-
- ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
- Compiler::SetFunctionInfo(result, lit, true, script);
-
- if (script->name()->IsString()) {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- info,
- String::cast(script->name())));
- GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
- script,
- info->code(),
- info));
- } else {
- PROFILE(isolate, CodeCreateEvent(
- info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *info->code(),
- *result,
- info,
- isolate->heap()->empty_string()));
- GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
- }
+ // Compile the code.
+ if (!MakeCode(info)) {
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return Handle<SharedFunctionInfo>::null();
+ }
+
+ // Allocate function.
+ ASSERT(!info->code().is_null());
+ result =
+ isolate->factory()->NewSharedFunctionInfo(
+ lit->name(),
+ lit->materialized_literal_count(),
+ lit->is_generator(),
+ info->code(),
+ ScopeInfo::Create(info->scope(), info->zone()));
- // Hint to the runtime system used when allocating space for initial
- // property space by setting the expected number of properties for
- // the instances of the function.
- SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
+ ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
+ Compiler::SetFunctionInfo(result, lit, true, script);
- script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+ if (script->name()->IsString()) {
+ PROFILE(isolate, CodeCreateEvent(
+ info->is_eval()
+ ? Logger::EVAL_TAG
+ : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+ *info->code(),
+ *result,
+ info,
+ String::cast(script->name())));
+ GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
+ script,
+ info->code(),
+ info));
+ } else {
+ PROFILE(isolate, CodeCreateEvent(
+ info->is_eval()
+ ? Logger::EVAL_TAG
+ : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+ *info->code(),
+ *result,
+ info,
+ isolate->heap()->empty_string()));
+ GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
+ }
+
+ // Hint to the runtime system used when allocating space for initial
+ // property space by setting the expected number of properties for
+ // the instances of the function.
+ SetExpectedNofPropertiesFromEstimate(result,
+ lit->expected_property_count());
+
+ script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+ }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
@@ -1035,16 +1113,15 @@ bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
info->SaveHandles();
if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
- OptimizingCompiler* compiler =
- new(info->zone()) OptimizingCompiler(*info);
- OptimizingCompiler::Status status = compiler->CreateGraph();
- if (status == OptimizingCompiler::SUCCEEDED) {
+ RecompileJob* job = new(info->zone()) RecompileJob(*info);
+ RecompileJob::Status status = job->CreateGraph();
+ if (status == RecompileJob::SUCCEEDED) {
info.Detach();
shared->code()->set_profiler_ticks(0);
- isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
+ isolate->optimizing_compiler_thread()->QueueForOptimization(job);
ASSERT(!isolate->has_pending_exception());
return true;
- } else if (status == OptimizingCompiler::BAILED_OUT) {
+ } else if (status == RecompileJob::BAILED_OUT) {
isolate->clear_pending_exception();
InstallFullCode(*info);
}
@@ -1057,9 +1134,8 @@ bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
}
-Handle<Code> Compiler::InstallOptimizedCode(
- OptimizingCompiler* optimizing_compiler) {
- SmartPointer<CompilationInfo> info(optimizing_compiler->info());
+Handle<Code> Compiler::InstallOptimizedCode(RecompileJob* job) {
+ SmartPointer<CompilationInfo> info(job->info());
// The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
@@ -1080,24 +1156,24 @@ Handle<Code> Compiler::InstallOptimizedCode(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
- OptimizingCompiler::Status status = optimizing_compiler->last_status();
+ RecompileJob::Status status = job->last_status();
if (info->HasAbortedDueToDependencyChange()) {
info->set_bailout_reason(kBailedOutDueToDependencyChange);
- status = optimizing_compiler->AbortOptimization();
- } else if (status != OptimizingCompiler::SUCCEEDED) {
+ status = job->AbortOptimization();
+ } else if (status != RecompileJob::SUCCEEDED) {
info->set_bailout_reason(kFailedBailedOutLastTime);
- status = optimizing_compiler->AbortOptimization();
+ status = job->AbortOptimization();
} else if (isolate->DebuggerHasBreakPoints()) {
info->set_bailout_reason(kDebuggerIsActive);
- status = optimizing_compiler->AbortOptimization();
+ status = job->AbortOptimization();
} else {
- status = optimizing_compiler->GenerateAndInstallCode();
- ASSERT(status == OptimizingCompiler::SUCCEEDED ||
- status == OptimizingCompiler::BAILED_OUT);
+ status = job->GenerateAndInstallCode();
+ ASSERT(status == RecompileJob::SUCCEEDED ||
+ status == RecompileJob::BAILED_OUT);
}
InstallCodeCommon(*info);
- if (status == OptimizingCompiler::SUCCEEDED) {
+ if (status == RecompileJob::SUCCEEDED) {
Handle<Code> code = info->code();
ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
info->closure()->ReplaceCode(*code);
@@ -1118,8 +1194,8 @@ Handle<Code> Compiler::InstallOptimizedCode(
// profiler ticks to prevent too soon re-opt after a deopt.
info->shared_info()->code()->set_profiler_ticks(0);
ASSERT(!info->closure()->IsInRecompileQueue());
- return (status == OptimizingCompiler::SUCCEEDED) ? info->code()
- : Handle<Code>::null();
+ return (status == RecompileJob::SUCCEEDED) ? info->code()
+ : Handle<Code>::null();
}
diff --git a/chromium/v8/src/compiler.h b/chromium/v8/src/compiler.h
index 8ceb61db9ce..080907e390c 100644
--- a/chromium/v8/src/compiler.h
+++ b/chromium/v8/src/compiler.h
@@ -35,8 +35,6 @@
namespace v8 {
namespace internal {
-static const int kPrologueOffsetNotSet = -1;
-
class ScriptDataImpl;
class HydrogenCodeStub;
@@ -86,6 +84,7 @@ class CompilationInfo {
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
+ uint32_t osr_pc_offset() const { return osr_pc_offset_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
int num_heap_slots() const;
@@ -99,6 +98,10 @@ class CompilationInfo {
ASSERT(!is_lazy());
flags_ |= IsGlobal::encode(true);
}
+ void set_parameter_count(int parameter_count) {
+ ASSERT(IsStub());
+ parameter_count_ = parameter_count;
+ }
void SetLanguageMode(LanguageMode language_mode) {
ASSERT(this->language_mode() == CLASSIC_MODE ||
this->language_mode() == language_mode ||
@@ -268,12 +271,12 @@ class CompilationInfo {
void set_bailout_reason(BailoutReason reason) { bailout_reason_ = reason; }
int prologue_offset() const {
- ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_);
+ ASSERT_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
return prologue_offset_;
}
void set_prologue_offset(int prologue_offset) {
- ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_);
+ ASSERT_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
prologue_offset_ = prologue_offset;
}
@@ -299,12 +302,12 @@ class CompilationInfo {
}
void AbortDueToDependencyChange() {
- ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
abort_due_to_dependency_ = true;
}
bool HasAbortedDueToDependencyChange() {
- ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
return abort_due_to_dependency_;
}
@@ -443,6 +446,9 @@ class CompilationInfo {
// during graph optimization.
int opt_count_;
+ // Number of parameters used for compilation of stubs that require arguments.
+ int parameter_count_;
+
Handle<Foreign> object_wrapper_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
@@ -505,14 +511,15 @@ class LChunk;
// fail, bail-out to the full code generator or succeed. Apart from
// their return value, the status of the phase last run can be checked
// using last_status().
-class OptimizingCompiler: public ZoneObject {
+class RecompileJob: public ZoneObject {
public:
- explicit OptimizingCompiler(CompilationInfo* info)
+ explicit RecompileJob(CompilationInfo* info)
: info_(info),
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
- last_status_(FAILED) { }
+ last_status_(FAILED),
+ awaiting_install_(false) { }
enum Status {
FAILED, BAILED_OUT, SUCCEEDED
@@ -532,6 +539,13 @@ class OptimizingCompiler: public ZoneObject {
return SetLastStatus(BAILED_OUT);
}
+ void WaitForInstall() {
+ ASSERT(info_->is_osr());
+ awaiting_install_ = true;
+ }
+
+ bool IsWaitingForInstall() { return awaiting_install_; }
+
private:
CompilationInfo* info_;
HOptimizedGraphBuilder* graph_builder_;
@@ -541,6 +555,7 @@ class OptimizingCompiler: public ZoneObject {
TimeDelta time_taken_to_optimize_;
TimeDelta time_taken_to_codegen_;
Status last_status_;
+ bool awaiting_install_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
last_status_ = status;
@@ -549,9 +564,8 @@ class OptimizingCompiler: public ZoneObject {
void RecordOptimizationStats();
struct Timer {
- Timer(OptimizingCompiler* compiler, TimeDelta* location)
- : compiler_(compiler),
- location_(location) {
+ Timer(RecompileJob* job, TimeDelta* location)
+ : job_(job), location_(location) {
ASSERT(location_ != NULL);
timer_.Start();
}
@@ -560,7 +574,7 @@ class OptimizingCompiler: public ZoneObject {
*location_ += timer_.Elapsed();
}
- OptimizingCompiler* compiler_;
+ RecompileJob* job_;
ElapsedTimer timer_;
TimeDelta* location_;
};
@@ -625,7 +639,7 @@ class Compiler : public AllStatic {
bool is_toplevel,
Handle<Script> script);
- static Handle<Code> InstallOptimizedCode(OptimizingCompiler* info);
+ static Handle<Code> InstallOptimizedCode(RecompileJob* job);
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
diff --git a/chromium/v8/src/contexts.cc b/chromium/v8/src/contexts.cc
index 441ef9d9c32..710d30aa8ec 100644
--- a/chromium/v8/src/contexts.cc
+++ b/chromium/v8/src/contexts.cc
@@ -259,7 +259,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsNativeContext());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined()) {
diff --git a/chromium/v8/src/contexts.h b/chromium/v8/src/contexts.h
index 189c215e639..1b857c30207 100644
--- a/chromium/v8/src/contexts.h
+++ b/chromium/v8/src/contexts.h
@@ -166,6 +166,7 @@ enum BindingFlags {
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
+ V(RUN_MICROTASKS_INDEX, JSFunction, run_microtasks) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
@@ -178,14 +179,12 @@ enum BindingFlags {
observers_begin_perform_splice) \
V(OBSERVERS_END_SPLICE_INDEX, JSFunction, \
observers_end_perform_splice) \
- V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
V(STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX, Map, \
strict_mode_generator_function_map) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
generator_object_prototype_map) \
- V(GENERATOR_RESULT_MAP_INDEX, Map, generator_result_map) \
- V(RANDOM_SEED_INDEX, ByteArray, random_seed)
+ V(GENERATOR_RESULT_MAP_INDEX, Map, generator_result_map)
// JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and
@@ -318,6 +317,7 @@ class Context: public FixedArray {
EMBEDDER_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
+ RUN_MICROTASKS_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
@@ -327,12 +327,10 @@ class Context: public FixedArray {
OBSERVERS_ENQUEUE_SPLICE_INDEX,
OBSERVERS_BEGIN_SPLICE_INDEX,
OBSERVERS_END_SPLICE_INDEX,
- OBSERVERS_DELIVER_CHANGES_INDEX,
GENERATOR_FUNCTION_MAP_INDEX,
STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,
GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
GENERATOR_RESULT_MAP_INDEX,
- RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
diff --git a/chromium/v8/src/conversions-inl.h b/chromium/v8/src/conversions-inl.h
index 2f0a399d1a4..7ba19ba0f1d 100644
--- a/chromium/v8/src/conversions-inl.h
+++ b/chromium/v8/src/conversions-inl.h
@@ -355,7 +355,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
return JunkStringValue();
}
- ASSERT(buffer_pos < kBufferSize);
+ SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
Vector<const char> buffer_vector(buffer, buffer_pos);
return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
@@ -692,7 +692,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
exponent--;
}
- ASSERT(buffer_pos < kBufferSize);
+ SLOW_ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
diff --git a/chromium/v8/src/conversions.cc b/chromium/v8/src/conversions.cc
index cdc42e34d9e..397f3c57fb8 100644
--- a/chromium/v8/src/conversions.cc
+++ b/chromium/v8/src/conversions.cc
@@ -31,6 +31,7 @@
#include "conversions-inl.h"
#include "dtoa.h"
+#include "list-inl.h"
#include "strtod.h"
#include "utils.h"
@@ -45,8 +46,11 @@ namespace internal {
double StringToDouble(UnicodeCache* unicode_cache,
const char* str, int flags, double empty_string_val) {
- const char* end = str + StrLength(str);
- return InternalStringToDouble(unicode_cache, str, end, flags,
+ // We cast to const uint8_t* here to avoid instantiating the
+ // InternalStringToDouble() template for const char* as well.
+ const uint8_t* start = reinterpret_cast<const uint8_t*>(str);
+ const uint8_t* end = start + StrLength(str);
+ return InternalStringToDouble(unicode_cache, start, end, flags,
empty_string_val);
}
@@ -55,11 +59,15 @@ double StringToDouble(UnicodeCache* unicode_cache,
Vector<const char> str,
int flags,
double empty_string_val) {
- const char* end = str.start() + str.length();
- return InternalStringToDouble(unicode_cache, str.start(), end, flags,
+ // We cast to const uint8_t* here to avoid instantiating the
+ // InternalStringToDouble() template for const char* as well.
+ const uint8_t* start = reinterpret_cast<const uint8_t*>(str.start());
+ const uint8_t* end = start + str.length();
+ return InternalStringToDouble(unicode_cache, start, end, flags,
empty_string_val);
}
+
double StringToDouble(UnicodeCache* unicode_cache,
Vector<const uc16> str,
int flags,
@@ -393,8 +401,9 @@ char* DoubleToRadixCString(double value, int radix) {
// at least one digit.
int integer_pos = kBufferSize - 2;
do {
- integer_buffer[integer_pos--] =
- chars[static_cast<int>(fmod(integer_part, radix))];
+ double remainder = fmod(integer_part, radix);
+ integer_buffer[integer_pos--] = chars[static_cast<int>(remainder)];
+ integer_part -= remainder;
integer_part /= radix;
} while (integer_part >= 1.0);
// Sanity check.
diff --git a/chromium/v8/src/counters.h b/chromium/v8/src/counters.h
index 93911d72161..821c25f8cec 100644
--- a/chromium/v8/src/counters.h
+++ b/chromium/v8/src/counters.h
@@ -259,22 +259,51 @@ class HistogramTimer : public Histogram {
return Enabled() && timer_.IsStarted();
}
+ // TODO(bmeurer): Remove this when HistogramTimerScope is fixed.
+#ifdef DEBUG
+ ElapsedTimer* timer() { return &timer_; }
+#endif
+
private:
ElapsedTimer timer_;
};
// Helper class for scoping a HistogramTimer.
+// TODO(bmeurer): The ifdeffery is an ugly hack around the fact that the
+// Parser is currently reentrant (when it throws an error, we call back
+// into JavaScript and all bets are off), but ElapsedTimer is not
+// reentry-safe. Fix this properly and remove |allow_nesting|.
class HistogramTimerScope BASE_EMBEDDED {
public:
- explicit HistogramTimerScope(HistogramTimer* timer) :
- timer_(timer) {
+ explicit HistogramTimerScope(HistogramTimer* timer,
+ bool allow_nesting = false)
+#ifdef DEBUG
+ : timer_(timer),
+ skipped_timer_start_(false) {
+ if (timer_->timer()->IsStarted() && allow_nesting) {
+ skipped_timer_start_ = true;
+ } else {
+ timer_->Start();
+ }
+#else
+ : timer_(timer) {
timer_->Start();
+#endif
}
~HistogramTimerScope() {
+#ifdef DEBUG
+ if (!skipped_timer_start_) {
+ timer_->Stop();
+ }
+#else
timer_->Stop();
+#endif
}
private:
HistogramTimer* timer_;
+#ifdef DEBUG
+ bool skipped_timer_start_;
+#endif
};
diff --git a/chromium/v8/src/cpu-profiler.cc b/chromium/v8/src/cpu-profiler.cc
index e0f7aea18a8..b1af621cccc 100644
--- a/chromium/v8/src/cpu-profiler.cc
+++ b/chromium/v8/src/cpu-profiler.cc
@@ -64,14 +64,15 @@ void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
TickSampleEventRecord record(last_code_event_id_);
- TickSample* sample = &record.sample;
- sample->state = isolate->current_vm_state();
- sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
- for (StackTraceFrameIterator it(isolate);
- !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
- it.Advance()) {
- sample->stack[sample->frames_count++] = it.frame()->pc();
+ RegisterState regs;
+ StackFrameIterator it(isolate);
+ if (!it.done()) {
+ StackFrame* frame = it.frame();
+ regs.sp = frame->sp();
+ regs.fp = frame->fp();
+ regs.pc = frame->pc();
}
+ record.sample.Init(isolate, regs);
ticks_from_vm_buffer_.Enqueue(record);
}
@@ -260,7 +261,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
- Name* source, int line) {
+ Name* source, int line, int column) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -270,7 +271,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
profiles_->GetFunctionName(shared->DebugName()),
CodeEntry::kEmptyNamePrefix,
profiles_->GetName(source),
- line);
+ line,
+ column);
if (info) {
rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
}
@@ -435,8 +437,18 @@ void CpuProfiler::StartProcessorIfNotStarted() {
logger->is_logging_ = false;
generator_ = new ProfileGenerator(profiles_);
Sampler* sampler = logger->sampler();
+#if V8_CC_MSVC && (_MSC_VER >= 1800)
+ // VS2013 reports "warning C4316: 'v8::internal::ProfilerEventsProcessor'
+ // : object allocated on the heap may not be aligned 64". We need to
+ // figure out if this is a legitimate warning or a compiler bug.
+ #pragma warning(push)
+ #pragma warning(disable:4316)
+#endif
processor_ = new ProfilerEventsProcessor(
generator_, sampler, sampling_interval_);
+#if V8_CC_MSVC && (_MSC_VER >= 1800)
+ #pragma warning(pop)
+#endif
is_profiling_ = true;
// Enumerate stuff we already have in the heap.
ASSERT(isolate_->heap()->HasBeenSetUp());
diff --git a/chromium/v8/src/cpu-profiler.h b/chromium/v8/src/cpu-profiler.h
index 8aba5426d5a..fcb9a67ddf7 100644
--- a/chromium/v8/src/cpu-profiler.h
+++ b/chromium/v8/src/cpu-profiler.h
@@ -238,7 +238,7 @@ class CpuProfiler : public CodeEventListener {
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
- Name* source, int line);
+ Name* source, int line, int column);
virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);
virtual void CodeMovingGCEvent() {}
diff --git a/chromium/v8/src/d8-debug.cc b/chromium/v8/src/d8-debug.cc
index 602ae166bea..6c297d73969 100644
--- a/chromium/v8/src/d8-debug.cc
+++ b/chromium/v8/src/d8-debug.cc
@@ -30,8 +30,6 @@
#include "d8.h"
#include "d8-debug.h"
#include "debug-agent.h"
-#include "platform.h"
-#include "platform/socket.h"
namespace v8 {
@@ -65,7 +63,8 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
TryCatch try_catch;
// Get the toJSONProtocol function on the event and get the JSON format.
- Local<String> to_json_fun_name = String::New("toJSONProtocol");
+ Local<String> to_json_fun_name =
+ String::NewFromUtf8(isolate, "toJSONProtocol");
Handle<Object> event_data = event_details.GetEventData();
Local<Function> to_json_fun =
Local<Function>::Cast(event_data->Get(to_json_fun_name));
@@ -82,7 +81,7 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
Shell::ReportException(isolate, &try_catch);
return;
}
- String::Utf8Value str(details->Get(String::New("text")));
+ String::Utf8Value str(details->Get(String::NewFromUtf8(isolate, "text")));
if (str.length() == 0) {
// Empty string is used to signal not to process this event.
return;
@@ -90,7 +89,8 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
printf("%s\n", *str);
// Get the debug command processor.
- Local<String> fun_name = String::New("debugCommandProcessor");
+ Local<String> fun_name =
+ String::NewFromUtf8(isolate, "debugCommandProcessor");
Handle<Object> exec_state = event_details.GetExecutionState();
Local<Function> fun = Local<Function>::Cast(exec_state->Get(fun_name));
Local<Object> cmd_processor =
@@ -114,8 +114,8 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
TryCatch try_catch;
// Convert the debugger command to a JSON debugger request.
- Handle<Value> request =
- Shell::DebugCommandToJSONRequest(isolate, String::New(command));
+ Handle<Value> request = Shell::DebugCommandToJSONRequest(
+ isolate, String::NewFromUtf8(isolate, command));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
continue;
@@ -136,7 +136,7 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
// Invoke the JavaScript to convert the debug command line to a JSON
// request, invoke the JSON request and convert the JSON respose to a text
// representation.
- fun_name = String::New("processDebugRequest");
+ fun_name = String::NewFromUtf8(isolate, "processDebugRequest");
fun = Handle<Function>::Cast(cmd_processor->Get(fun_name));
args[0] = request;
Handle<Value> response_val = fun->Call(cmd_processor, kArgc, args);
@@ -153,12 +153,14 @@ void HandleDebugEvent(const Debug::EventDetails& event_details) {
Shell::ReportException(isolate, &try_catch);
continue;
}
- String::Utf8Value text_str(response_details->Get(String::New("text")));
+ String::Utf8Value text_str(
+ response_details->Get(String::NewFromUtf8(isolate, "text")));
if (text_str.length() > 0) {
printf("%s\n", *text_str);
}
- running =
- response_details->Get(String::New("running"))->ToBoolean()->Value();
+ running = response_details->Get(String::NewFromUtf8(isolate, "running"))
+ ->ToBoolean()
+ ->Value();
}
}
@@ -275,15 +277,14 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
// Print the event details.
TryCatch try_catch;
- Handle<Object> details =
- Shell::DebugMessageDetails(isolate_,
- Handle<String>::Cast(String::New(message)));
+ Handle<Object> details = Shell::DebugMessageDetails(
+ isolate_, Handle<String>::Cast(String::NewFromUtf8(isolate_, message)));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
return;
}
- String::Utf8Value str(details->Get(String::New("text")));
+ String::Utf8Value str(details->Get(String::NewFromUtf8(isolate_, "text")));
if (str.length() == 0) {
// Empty string is used to signal not to process this event.
return;
@@ -294,7 +295,9 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
printf("???\n");
}
- bool is_running = details->Get(String::New("running"))->ToBoolean()->Value();
+ bool is_running = details->Get(String::NewFromUtf8(isolate_, "running"))
+ ->ToBoolean()
+ ->Value();
PrintPrompt(is_running);
}
@@ -305,8 +308,8 @@ void RemoteDebugger::HandleKeyboardCommand(char* command) {
// Convert the debugger command to a JSON debugger request.
TryCatch try_catch;
- Handle<Value> request =
- Shell::DebugCommandToJSONRequest(isolate_, String::New(command));
+ Handle<Value> request = Shell::DebugCommandToJSONRequest(
+ isolate_, String::NewFromUtf8(isolate_, command));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
diff --git a/chromium/v8/src/d8-posix.cc b/chromium/v8/src/d8-posix.cc
index 424dbbb3930..25f79a4be6b 100644
--- a/chromium/v8/src/d8-posix.cc
+++ b/chromium/v8/src/d8-posix.cc
@@ -188,12 +188,12 @@ class ExecArgs {
ExecArgs() {
exec_args_[0] = NULL;
}
- bool Init(Handle<Value> arg0, Handle<Array> command_args) {
+ bool Init(Isolate* isolate, Handle<Value> arg0, Handle<Array> command_args) {
String::Utf8Value prog(arg0);
if (*prog == NULL) {
const char* message =
"os.system(): String conversion of program name failed";
- ThrowException(String::New(message));
+ isolate->ThrowException(String::NewFromUtf8(isolate, message));
return false;
}
int len = prog.length() + 3;
@@ -208,7 +208,7 @@ class ExecArgs {
exec_args_[i] = NULL; // Consistent state for destructor.
const char* message =
"os.system(): String conversion of argument failed.";
- ThrowException(String::New(message));
+ isolate->ThrowException(String::NewFromUtf8(isolate, message));
return false;
}
int len = utf8_arg.length() + 1;
@@ -245,7 +245,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
if (args[3]->IsNumber()) {
*total_timeout = args[3]->Int32Value();
} else {
- ThrowException(String::New("system: Argument 4 must be a number"));
+ args.GetIsolate()->ThrowException(String::NewFromUtf8(
+ args.GetIsolate(), "system: Argument 4 must be a number"));
return false;
}
}
@@ -253,7 +254,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
if (args[2]->IsNumber()) {
*read_timeout = args[2]->Int32Value();
} else {
- ThrowException(String::New("system: Argument 3 must be a number"));
+ args.GetIsolate()->ThrowException(String::NewFromUtf8(
+ args.GetIsolate(), "system: Argument 3 must be a number"));
return false;
}
}
@@ -291,14 +293,14 @@ static void ExecSubprocess(int* exec_error_fds,
// Runs in the parent process. Checks that the child was able to exec (closing
// the file desriptor), or reports an error if it failed.
-static bool ChildLaunchedOK(int* exec_error_fds) {
+static bool ChildLaunchedOK(Isolate* isolate, int* exec_error_fds) {
int bytes_read;
int err;
do {
bytes_read = read(exec_error_fds[kReadFD], &err, sizeof(err));
} while (bytes_read == -1 && errno == EINTR);
if (bytes_read != 0) {
- ThrowException(String::New(strerror(err)));
+ isolate->ThrowException(String::NewFromUtf8(isolate, strerror(err)));
return false;
}
return true;
@@ -307,7 +309,8 @@ static bool ChildLaunchedOK(int* exec_error_fds) {
// Accumulates the output from the child in a string handle. Returns true if it
// succeeded or false if an exception was thrown.
-static Handle<Value> GetStdout(int child_fd,
+static Handle<Value> GetStdout(Isolate* isolate,
+ int child_fd,
struct timeval& start_time,
int read_timeout,
int total_timeout) {
@@ -318,7 +321,8 @@ static Handle<Value> GetStdout(int child_fd,
char buffer[kStdoutReadBufferSize];
if (fcntl(child_fd, F_SETFL, O_NONBLOCK) != 0) {
- return ThrowException(String::New(strerror(errno)));
+ return isolate->ThrowException(
+ String::NewFromUtf8(isolate, strerror(errno)));
}
int bytes_read;
@@ -333,7 +337,8 @@ static Handle<Value> GetStdout(int child_fd,
total_timeout,
start_time) ||
(TimeIsOut(start_time, total_timeout))) {
- return ThrowException(String::New("Timed out waiting for output"));
+ return isolate->ThrowException(
+ String::NewFromUtf8(isolate, "Timed out waiting for output"));
}
continue;
} else if (errno == EINTR) {
@@ -346,7 +351,8 @@ static Handle<Value> GetStdout(int child_fd,
int length = bytes_read == 0 ?
bytes_read + fullness :
LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness);
- Handle<String> addition = String::New(buffer, length);
+ Handle<String> addition =
+ String::NewFromUtf8(isolate, buffer, String::kNormalString, length);
accumulator = String::Concat(accumulator, addition);
fullness = bytes_read + fullness - length;
memcpy(buffer, buffer + length, fullness);
@@ -375,7 +381,8 @@ static Handle<Value> GetStdout(int child_fd,
// Get exit status of child.
-static bool WaitForChild(int pid,
+static bool WaitForChild(Isolate* isolate,
+ int pid,
ZombieProtector& child_waiter,
struct timeval& start_time,
int read_timeout,
@@ -392,7 +399,8 @@ static bool WaitForChild(int pid,
if (useconds < 1000000) useconds <<= 1;
if ((read_timeout != -1 && useconds / 1000 > read_timeout) ||
(TimeIsOut(start_time, total_timeout))) {
- ThrowException(String::New("Timed out waiting for process to terminate"));
+ isolate->ThrowException(String::NewFromUtf8(
+ isolate, "Timed out waiting for process to terminate"));
kill(pid, SIGINT);
return false;
}
@@ -403,7 +411,7 @@ static bool WaitForChild(int pid,
sizeof(message),
"Child killed by signal %d",
child_info.si_status);
- ThrowException(String::New(message));
+ isolate->ThrowException(String::NewFromUtf8(isolate, message));
return false;
}
if (child_info.si_code == CLD_EXITED && child_info.si_status != 0) {
@@ -412,7 +420,7 @@ static bool WaitForChild(int pid,
sizeof(message),
"Child exited with status %d",
child_info.si_status);
- ThrowException(String::New(message));
+ isolate->ThrowException(String::NewFromUtf8(isolate, message));
return false;
}
@@ -427,7 +435,7 @@ static bool WaitForChild(int pid,
sizeof(message),
"Child killed by signal %d",
WTERMSIG(child_status));
- ThrowException(String::New(message));
+ isolate->ThrowException(String::NewFromUtf8(isolate, message));
return false;
}
if (WEXITSTATUS(child_status) != 0) {
@@ -437,7 +445,7 @@ static bool WaitForChild(int pid,
sizeof(message),
"Child exited with status %d",
exit_status);
- ThrowException(String::New(message));
+ isolate->ThrowException(String::NewFromUtf8(isolate, message));
return false;
}
@@ -456,19 +464,22 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
Handle<Array> command_args;
if (args.Length() > 1) {
if (!args[1]->IsArray()) {
- ThrowException(String::New("system: Argument 2 must be an array"));
+ args.GetIsolate()->ThrowException(String::NewFromUtf8(
+ args.GetIsolate(), "system: Argument 2 must be an array"));
return;
}
command_args = Handle<Array>::Cast(args[1]);
} else {
- command_args = Array::New(0);
+ command_args = Array::New(args.GetIsolate(), 0);
}
if (command_args->Length() > ExecArgs::kMaxArgs) {
- ThrowException(String::New("Too many arguments to system()"));
+ args.GetIsolate()->ThrowException(String::NewFromUtf8(
+ args.GetIsolate(), "Too many arguments to system()"));
return;
}
if (args.Length() < 1) {
- ThrowException(String::New("Too few arguments to system()"));
+ args.GetIsolate()->ThrowException(String::NewFromUtf8(
+ args.GetIsolate(), "Too few arguments to system()"));
return;
}
@@ -476,18 +487,20 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
gettimeofday(&start_time, NULL);
ExecArgs exec_args;
- if (!exec_args.Init(args[0], command_args)) {
+ if (!exec_args.Init(args.GetIsolate(), args[0], command_args)) {
return;
}
int exec_error_fds[2];
int stdout_fds[2];
if (pipe(exec_error_fds) != 0) {
- ThrowException(String::New("pipe syscall failed."));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), "pipe syscall failed."));
return;
}
if (pipe(stdout_fds) != 0) {
- ThrowException(String::New("pipe syscall failed."));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), "pipe syscall failed."));
return;
}
@@ -504,9 +517,10 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
OpenFDCloser error_read_closer(exec_error_fds[kReadFD]);
OpenFDCloser stdout_read_closer(stdout_fds[kReadFD]);
- if (!ChildLaunchedOK(exec_error_fds)) return;
+ if (!ChildLaunchedOK(args.GetIsolate(), exec_error_fds)) return;
- Handle<Value> accumulator = GetStdout(stdout_fds[kReadFD],
+ Handle<Value> accumulator = GetStdout(args.GetIsolate(),
+ stdout_fds[kReadFD],
start_time,
read_timeout,
total_timeout);
@@ -516,7 +530,8 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- if (!WaitForChild(pid,
+ if (!WaitForChild(args.GetIsolate(),
+ pid,
child_waiter,
start_time,
read_timeout,
@@ -531,17 +546,20 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "chdir() takes one argument";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.chdir(): String conversion of argument failed.";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
if (chdir(*directory) != 0) {
- ThrowException(String::New(strerror(errno)));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), strerror(errno)));
return;
}
}
@@ -550,7 +568,8 @@ void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "umask() takes one argument";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
if (args[0]->IsNumber()) {
@@ -560,50 +579,51 @@ void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
} else {
const char* message = "umask() argument must be numeric";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
}
-static bool CheckItsADirectory(char* directory) {
+static bool CheckItsADirectory(Isolate* isolate, char* directory) {
struct stat stat_buf;
int stat_result = stat(directory, &stat_buf);
if (stat_result != 0) {
- ThrowException(String::New(strerror(errno)));
+ isolate->ThrowException(String::NewFromUtf8(isolate, strerror(errno)));
return false;
}
if ((stat_buf.st_mode & S_IFDIR) != 0) return true;
- ThrowException(String::New(strerror(EEXIST)));
+ isolate->ThrowException(String::NewFromUtf8(isolate, strerror(EEXIST)));
return false;
}
// Returns true for success. Creates intermediate directories as needed. No
// error if the directory exists already.
-static bool mkdirp(char* directory, mode_t mask) {
+static bool mkdirp(Isolate* isolate, char* directory, mode_t mask) {
int result = mkdir(directory, mask);
if (result == 0) return true;
if (errno == EEXIST) {
- return CheckItsADirectory(directory);
+ return CheckItsADirectory(isolate, directory);
} else if (errno == ENOENT) { // Intermediate path element is missing.
char* last_slash = strrchr(directory, '/');
if (last_slash == NULL) {
- ThrowException(String::New(strerror(errno)));
+ isolate->ThrowException(String::NewFromUtf8(isolate, strerror(errno)));
return false;
}
*last_slash = 0;
- if (!mkdirp(directory, mask)) return false;
+ if (!mkdirp(isolate, directory, mask)) return false;
*last_slash = '/';
result = mkdir(directory, mask);
if (result == 0) return true;
if (errno == EEXIST) {
- return CheckItsADirectory(directory);
+ return CheckItsADirectory(isolate, directory);
}
- ThrowException(String::New(strerror(errno)));
+ isolate->ThrowException(String::NewFromUtf8(isolate, strerror(errno)));
return false;
} else {
- ThrowException(String::New(strerror(errno)));
+ isolate->ThrowException(String::NewFromUtf8(isolate, strerror(errno)));
return false;
}
}
@@ -616,34 +636,39 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
mask = args[1]->Int32Value();
} else {
const char* message = "mkdirp() second argument must be numeric";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
} else if (args.Length() != 1) {
const char* message = "mkdirp() takes one or two arguments";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.mkdirp(): String conversion of argument failed.";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
- mkdirp(*directory, mask);
+ mkdirp(args.GetIsolate(), *directory, mask);
}
void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "rmdir() takes one or two arguments";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.rmdir(): String conversion of argument failed.";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
rmdir(*directory);
@@ -653,7 +678,8 @@ void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2) {
const char* message = "setenv() takes two arguments";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
String::Utf8Value var(args[0]);
@@ -661,13 +687,15 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
if (*value == NULL) {
const char* message =
"os.setenv(): String conversion of variable contents failed.";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
setenv(*var, *value, 1);
@@ -677,29 +705,37 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "unsetenv() takes one argument";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
String::Utf8Value var(args[0]);
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
- ThrowException(String::New(message));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), message));
return;
}
unsetenv(*var);
}
-void Shell::AddOSMethods(Handle<ObjectTemplate> os_templ) {
- os_templ->Set(String::New("system"), FunctionTemplate::New(System));
- os_templ->Set(String::New("chdir"), FunctionTemplate::New(ChangeDirectory));
- os_templ->Set(String::New("setenv"), FunctionTemplate::New(SetEnvironment));
- os_templ->Set(String::New("unsetenv"),
+void Shell::AddOSMethods(Isolate* isolate, Handle<ObjectTemplate> os_templ) {
+ os_templ->Set(String::NewFromUtf8(isolate, "system"),
+ FunctionTemplate::New(System));
+ os_templ->Set(String::NewFromUtf8(isolate, "chdir"),
+ FunctionTemplate::New(ChangeDirectory));
+ os_templ->Set(String::NewFromUtf8(isolate, "setenv"),
+ FunctionTemplate::New(SetEnvironment));
+ os_templ->Set(String::NewFromUtf8(isolate, "unsetenv"),
FunctionTemplate::New(UnsetEnvironment));
- os_templ->Set(String::New("umask"), FunctionTemplate::New(SetUMask));
- os_templ->Set(String::New("mkdirp"), FunctionTemplate::New(MakeDirectory));
- os_templ->Set(String::New("rmdir"), FunctionTemplate::New(RemoveDirectory));
+ os_templ->Set(String::NewFromUtf8(isolate, "umask"),
+ FunctionTemplate::New(SetUMask));
+ os_templ->Set(String::NewFromUtf8(isolate, "mkdirp"),
+ FunctionTemplate::New(MakeDirectory));
+ os_templ->Set(String::NewFromUtf8(isolate, "rmdir"),
+ FunctionTemplate::New(RemoveDirectory));
}
} // namespace v8
diff --git a/chromium/v8/src/d8-readline.cc b/chromium/v8/src/d8-readline.cc
index 298518d72ab..15b13617279 100644
--- a/chromium/v8/src/d8-readline.cc
+++ b/chromium/v8/src/d8-readline.cc
@@ -109,12 +109,9 @@ Handle<String> ReadLineEditor::Prompt(const char* prompt) {
Unlocker unlock(Isolate::GetCurrent());
result = readline(prompt);
}
- if (result != NULL) {
- AddHistory(result);
- } else {
- return Handle<String>();
- }
- return String::New(result);
+ if (result == NULL) return Handle<String>();
+ AddHistory(result);
+ return String::NewFromUtf8(isolate_, result);
}
@@ -150,11 +147,16 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
static Persistent<Array> current_completions;
Isolate* isolate = read_line_editor.isolate_;
Locker lock(isolate);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Array> completions;
if (state == 0) {
- Local<String> full_text = String::New(rl_line_buffer, rl_point);
- completions = Shell::GetCompletions(isolate, String::New(text), full_text);
+ Local<String> full_text = String::NewFromUtf8(isolate,
+ rl_line_buffer,
+ String::kNormalString,
+ rl_point);
+ completions = Shell::GetCompletions(isolate,
+ String::NewFromUtf8(isolate, text),
+ full_text);
current_completions.Reset(isolate, completions);
current_index = 0;
} else {
@@ -167,8 +169,7 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
String::Utf8Value str(str_obj);
return strdup(*str);
} else {
- current_completions.Dispose(isolate);
- current_completions.Clear();
+ current_completions.Reset();
return NULL;
}
}
diff --git a/chromium/v8/src/d8-windows.cc b/chromium/v8/src/d8-windows.cc
index eeb4735bbc2..edf5085d498 100644
--- a/chromium/v8/src/d8-windows.cc
+++ b/chromium/v8/src/d8-windows.cc
@@ -35,7 +35,7 @@
namespace v8 {
-void Shell::AddOSMethods(Handle<ObjectTemplate> os_templ) {
+void Shell::AddOSMethods(Isolate* isolate, Handle<ObjectTemplate> os_templ) {
}
diff --git a/chromium/v8/src/d8.cc b/chromium/v8/src/d8.cc
index da3d14de528..7c5df463d33 100644
--- a/chromium/v8/src/d8.cc
+++ b/chromium/v8/src/d8.cc
@@ -61,6 +61,7 @@
#ifndef V8_SHARED
#include "api.h"
#include "checks.h"
+#include "cpu.h"
#include "d8-debug.h"
#include "debug.h"
#include "natives.h"
@@ -79,8 +80,8 @@
namespace v8 {
-static Handle<Value> Throw(const char* message) {
- return ThrowException(String::New(message));
+static Handle<Value> Throw(Isolate* isolate, const char* message) {
+ return isolate->ThrowException(String::NewFromUtf8(isolate, message));
}
@@ -89,15 +90,15 @@ class PerIsolateData {
public:
explicit PerIsolateData(Isolate* isolate) : isolate_(isolate), realms_(NULL) {
HandleScope scope(isolate);
- isolate->SetData(this);
+ isolate->SetData(0, this);
}
~PerIsolateData() {
- isolate_->SetData(NULL); // Not really needed, just to be sure...
+ isolate_->SetData(0, NULL); // Not really needed, just to be sure...
}
inline static PerIsolateData* Get(Isolate* isolate) {
- return reinterpret_cast<PerIsolateData*>(isolate->GetData());
+ return reinterpret_cast<PerIsolateData*>(isolate->GetData(0));
}
class RealmScope {
@@ -158,6 +159,7 @@ i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
i::Mutex Shell::context_mutex_;
+const i::TimeTicks Shell::kInitialTicks = i::TimeTicks::HighResolutionNow();
Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED
@@ -242,7 +244,8 @@ bool Shell::ExecuteString(Isolate* isolate,
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
Handle<Object> global = context->Global();
- Handle<Value> fun = global->Get(String::New("Stringify"));
+ Handle<Value> fun =
+ global->Get(String::NewFromUtf8(isolate, "Stringify"));
Handle<Value> argv[1] = { result };
Handle<Value> s = Handle<Function>::Cast(fun)->Call(global, 1, argv);
if (try_catch.HasCaught()) return true;
@@ -263,18 +266,18 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_current_ = 0;
data_->realm_switch_ = 0;
data_->realms_ = new Persistent<Context>[1];
- data_->realms_[0].Reset(data_->isolate_, Context::GetEntered());
- data_->realm_shared_.Clear();
+ data_->realms_[0].Reset(data_->isolate_,
+ data_->isolate_->GetEnteredContext());
}
PerIsolateData::RealmScope::~RealmScope() {
// Drop realms to avoid keeping them alive.
for (int i = 0; i < data_->realm_count_; ++i)
- data_->realms_[i].Dispose();
+ data_->realms_[i].Reset();
delete[] data_->realms_;
if (!data_->realm_shared_.IsEmpty())
- data_->realm_shared_.Dispose();
+ data_->realm_shared_.Reset();
}
@@ -286,11 +289,20 @@ int PerIsolateData::RealmFind(Handle<Context> context) {
}
+#ifndef V8_SHARED
+// performance.now() returns a time stamp as double, measured in milliseconds.
+void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks;
+ args.GetReturnValue().Set(delta.InMillisecondsF());
+}
+#endif // V8_SHARED
+
+
// Realm.current() returns the index of the currently active realm.
void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- int index = data->RealmFind(Context::GetEntered());
+ int index = data->RealmFind(isolate->GetEnteredContext());
if (index == -1) return;
args.GetReturnValue().Set(index);
}
@@ -301,7 +313,7 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsObject()) {
- Throw("Invalid argument");
+ Throw(args.GetIsolate(), "Invalid argument");
return;
}
int index = data->RealmFind(args[0]->ToObject()->CreationContext());
@@ -315,12 +327,12 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
PerIsolateData* data = PerIsolateData::Get(args.GetIsolate());
if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw("Invalid argument");
+ Throw(args.GetIsolate(), "Invalid argument");
return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw("Invalid realm index");
+ Throw(args.GetIsolate(), "Invalid realm index");
return;
}
args.GetReturnValue().Set(
@@ -351,18 +363,17 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw("Invalid argument");
+ Throw(args.GetIsolate(), "Invalid argument");
return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty() ||
index == 0 ||
index == data->realm_current_ || index == data->realm_switch_) {
- Throw("Invalid realm index");
+ Throw(args.GetIsolate(), "Invalid realm index");
return;
}
- data->realms_[index].Dispose();
- data->realms_[index].Clear();
+ data->realms_[index].Reset();
}
@@ -371,12 +382,12 @@ void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsNumber()) {
- Throw("Invalid argument");
+ Throw(args.GetIsolate(), "Invalid argument");
return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw("Invalid realm index");
+ Throw(args.GetIsolate(), "Invalid realm index");
return;
}
data->realm_switch_ = index;
@@ -388,12 +399,12 @@ void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 2 || !args[0]->IsNumber() || !args[1]->IsString()) {
- Throw("Invalid argument");
+ Throw(args.GetIsolate(), "Invalid argument");
return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
- Throw("Invalid realm index");
+ Throw(args.GetIsolate(), "Invalid realm index");
return;
}
Handle<Script> script = Script::New(args[1]->ToString());
@@ -420,7 +431,6 @@ void Shell::RealmSharedSet(Local<String> property,
const PropertyCallbackInfo<void>& info) {
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose();
data->realm_shared_.Reset(isolate, value);
}
@@ -460,12 +470,12 @@ void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value file(args[0]);
if (*file == NULL) {
- Throw("Error loading file");
+ Throw(args.GetIsolate(), "Error loading file");
return;
}
Handle<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
- Throw("Error loading file");
+ Throw(args.GetIsolate(), "Error loading file");
return;
}
args.GetReturnValue().Set(source);
@@ -475,7 +485,7 @@ void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
static const int kBufferSize = 256;
char buffer[kBufferSize];
- Handle<String> accumulator = String::New("");
+ Handle<String> accumulator = String::NewFromUtf8(isolate, "");
int length;
while (true) {
// Continue reading if the line ends with an escape '\\' or the line has
@@ -491,12 +501,18 @@ Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
if (length == 0) {
return accumulator;
} else if (buffer[length-1] != '\n') {
- accumulator = String::Concat(accumulator, String::New(buffer, length));
+ accumulator = String::Concat(
+ accumulator,
+ String::NewFromUtf8(isolate, buffer, String::kNormalString, length));
} else if (length > 1 && buffer[length-2] == '\\') {
buffer[length-2] = '\n';
- accumulator = String::Concat(accumulator, String::New(buffer, length-1));
+ accumulator = String::Concat(
+ accumulator, String::NewFromUtf8(isolate, buffer,
+ String::kNormalString, length - 1));
} else {
- return String::Concat(accumulator, String::New(buffer, length-1));
+ return String::Concat(
+ accumulator, String::NewFromUtf8(isolate, buffer,
+ String::kNormalString, length - 1));
}
}
}
@@ -507,20 +523,20 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope handle_scope(args.GetIsolate());
String::Utf8Value file(args[i]);
if (*file == NULL) {
- Throw("Error loading file");
+ Throw(args.GetIsolate(), "Error loading file");
return;
}
Handle<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
- Throw("Error loading file");
+ Throw(args.GetIsolate(), "Error loading file");
return;
}
if (!ExecuteString(args.GetIsolate(),
source,
- String::New(*file),
+ String::NewFromUtf8(args.GetIsolate(), *file),
false,
true)) {
- Throw("Error executing file");
+ Throw(args.GetIsolate(), "Error executing file");
return;
}
}
@@ -535,7 +551,8 @@ void Shell::Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
- args.GetReturnValue().Set(String::New(V8::GetVersion()));
+ args.GetReturnValue().Set(
+ String::NewFromUtf8(args.GetIsolate(), V8::GetVersion()));
}
@@ -543,7 +560,7 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
HandleScope handle_scope(isolate);
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
Handle<Context> utility_context;
- bool enter_context = !Context::InContext();
+ bool enter_context = !isolate->InContext();
if (enter_context) {
utility_context = Local<Context>::New(isolate, utility_context_);
utility_context->Enter();
@@ -598,7 +615,8 @@ Handle<Array> Shell::GetCompletions(Isolate* isolate,
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(utility_context);
Handle<Object> global = utility_context->Global();
- Handle<Value> fun = global->Get(String::New("GetCompletions"));
+ Handle<Value> fun =
+ global->Get(String::NewFromUtf8(isolate, "GetCompletions"));
static const int kArgc = 3;
v8::Local<v8::Context> evaluation_context =
v8::Local<v8::Context>::New(isolate, evaluation_context_);
@@ -616,7 +634,8 @@ Handle<Object> Shell::DebugMessageDetails(Isolate* isolate,
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
Handle<Object> global = context->Global();
- Handle<Value> fun = global->Get(String::New("DebugMessageDetails"));
+ Handle<Value> fun =
+ global->Get(String::NewFromUtf8(isolate, "DebugMessageDetails"));
static const int kArgc = 1;
Handle<Value> argv[kArgc] = { message };
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
@@ -631,7 +650,8 @@ Handle<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
Handle<Object> global = context->Global();
- Handle<Value> fun = global->Get(String::New("DebugCommandToJSONRequest"));
+ Handle<Value> fun =
+ global->Get(String::NewFromUtf8(isolate, "DebugCommandToJSONRequest"));
static const int kArgc = 1;
Handle<Value> argv[kArgc] = { command };
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
@@ -770,8 +790,8 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
debug->Load();
i::Handle<i::JSObject> js_debug
= i::Handle<i::JSObject>(debug->debug_context()->global_object());
- utility_context->Global()->Set(String::New("$debug"),
- Utils::ToLocal(js_debug));
+ utility_context->Global()->Set(String::NewFromUtf8(isolate, "$debug"),
+ Utils::ToLocal(js_debug));
debug->debug_context()->set_security_token(
reinterpret_cast<i::Isolate*>(isolate)->heap()->undefined_value());
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -782,10 +802,12 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
i::NativesCollection<i::D8>::GetRawScriptSource(source_index);
i::Vector<const char> shell_source_name =
i::NativesCollection<i::D8>::GetScriptName(source_index);
- Handle<String> source = String::New(shell_source.start(),
- shell_source.length());
- Handle<String> name = String::New(shell_source_name.start(),
- shell_source_name.length());
+ Handle<String> source =
+ String::NewFromUtf8(isolate, shell_source.start(), String::kNormalString,
+ shell_source.length());
+ Handle<String> name =
+ String::NewFromUtf8(isolate, shell_source_name.start(),
+ String::kNormalString, shell_source_name.length());
Handle<Script> script = Script::Compile(source, name);
script->Run();
// Mark the d8 shell script as native to avoid it showing up as normal source
@@ -838,41 +860,55 @@ class BZip2Decompressor : public v8::StartupDataDecompressor {
Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
- global_template->Set(String::New("print"), FunctionTemplate::New(Print));
- global_template->Set(String::New("write"), FunctionTemplate::New(Write));
- global_template->Set(String::New("read"), FunctionTemplate::New(Read));
- global_template->Set(String::New("readbuffer"),
+ global_template->Set(String::NewFromUtf8(isolate, "print"),
+ FunctionTemplate::New(Print));
+ global_template->Set(String::NewFromUtf8(isolate, "write"),
+ FunctionTemplate::New(Write));
+ global_template->Set(String::NewFromUtf8(isolate, "read"),
+ FunctionTemplate::New(Read));
+ global_template->Set(String::NewFromUtf8(isolate, "readbuffer"),
FunctionTemplate::New(ReadBuffer));
- global_template->Set(String::New("readline"),
+ global_template->Set(String::NewFromUtf8(isolate, "readline"),
FunctionTemplate::New(ReadLine));
- global_template->Set(String::New("load"), FunctionTemplate::New(Load));
- global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
- global_template->Set(String::New("version"), FunctionTemplate::New(Version));
+ global_template->Set(String::NewFromUtf8(isolate, "load"),
+ FunctionTemplate::New(Load));
+ global_template->Set(String::NewFromUtf8(isolate, "quit"),
+ FunctionTemplate::New(Quit));
+ global_template->Set(String::NewFromUtf8(isolate, "version"),
+ FunctionTemplate::New(Version));
// Bind the Realm object.
Handle<ObjectTemplate> realm_template = ObjectTemplate::New();
- realm_template->Set(String::New("current"),
+ realm_template->Set(String::NewFromUtf8(isolate, "current"),
FunctionTemplate::New(RealmCurrent));
- realm_template->Set(String::New("owner"),
+ realm_template->Set(String::NewFromUtf8(isolate, "owner"),
FunctionTemplate::New(RealmOwner));
- realm_template->Set(String::New("global"),
+ realm_template->Set(String::NewFromUtf8(isolate, "global"),
FunctionTemplate::New(RealmGlobal));
- realm_template->Set(String::New("create"),
+ realm_template->Set(String::NewFromUtf8(isolate, "create"),
FunctionTemplate::New(RealmCreate));
- realm_template->Set(String::New("dispose"),
+ realm_template->Set(String::NewFromUtf8(isolate, "dispose"),
FunctionTemplate::New(RealmDispose));
- realm_template->Set(String::New("switch"),
+ realm_template->Set(String::NewFromUtf8(isolate, "switch"),
FunctionTemplate::New(RealmSwitch));
- realm_template->Set(String::New("eval"),
+ realm_template->Set(String::NewFromUtf8(isolate, "eval"),
FunctionTemplate::New(RealmEval));
- realm_template->SetAccessor(String::New("shared"),
+ realm_template->SetAccessor(String::NewFromUtf8(isolate, "shared"),
RealmSharedGet, RealmSharedSet);
- global_template->Set(String::New("Realm"), realm_template);
+ global_template->Set(String::NewFromUtf8(isolate, "Realm"), realm_template);
+
+#ifndef V8_SHARED
+ Handle<ObjectTemplate> performance_template = ObjectTemplate::New();
+ performance_template->Set(String::NewFromUtf8(isolate, "now"),
+ FunctionTemplate::New(PerformanceNow));
+ global_template->Set(String::NewFromUtf8(isolate, "performance"),
+ performance_template);
+#endif // V8_SHARED
#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
- AddOSMethods(os_templ);
- global_template->Set(String::New("os"), os_templ);
+ AddOSMethods(isolate, os_templ);
+ global_template->Set(String::NewFromUtf8(isolate, "os"), os_templ);
#endif // V8_SHARED
return global_template;
@@ -939,15 +975,15 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
- factory->NewFixedArray(js_args.argc());
- for (int j = 0; j < js_args.argc(); j++) {
+ factory->NewFixedArray(js_args.argc);
+ for (int j = 0; j < js_args.argc; j++) {
i::Handle<i::String> arg =
factory->NewStringFromUtf8(i::CStrVector(js_args[j]));
arguments_array->set(j, *arg);
}
i::Handle<i::JSArray> arguments_jsarray =
factory->NewJSArrayWithElements(arguments_array);
- context->Global()->Set(String::New("arguments"),
+ context->Global()->Set(String::NewFromUtf8(isolate, "arguments"),
Utils::ToLocal(arguments_jsarray));
#endif // V8_SHARED
return handle_scope.Close(context);
@@ -1070,7 +1106,7 @@ static void ReadBufferWeakCallback(v8::Isolate* isolate,
-static_cast<intptr_t>(byte_length));
delete[] data;
- array_buffer->Dispose();
+ array_buffer->Reset();
}
@@ -1079,7 +1115,7 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value filename(args[0]);
int length;
if (*filename == NULL) {
- Throw("Error loading file");
+ Throw(args.GetIsolate(), "Error loading file");
return;
}
@@ -1087,10 +1123,10 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
uint8_t* data = reinterpret_cast<uint8_t*>(
ReadChars(args.GetIsolate(), *filename, &length));
if (data == NULL) {
- Throw("Error reading file");
+ Throw(args.GetIsolate(), "Error reading file");
return;
}
- Handle<v8::ArrayBuffer> buffer = ArrayBuffer::New(data, length);
+ Handle<v8::ArrayBuffer> buffer = ArrayBuffer::New(isolate, data, length);
v8::Persistent<v8::ArrayBuffer> weak_handle(isolate, buffer);
weak_handle.MakeWeak(data, ReadBufferWeakCallback);
weak_handle.MarkIndependent();
@@ -1128,7 +1164,8 @@ Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
int size = 0;
char* chars = ReadChars(isolate, name, &size);
if (chars == NULL) return Handle<String>();
- Handle<String> result = String::New(chars, size);
+ Handle<String> result =
+ String::NewFromUtf8(isolate, chars, String::kNormalString, size);
delete[] chars;
return result;
}
@@ -1141,7 +1178,7 @@ void Shell::RunShell(Isolate* isolate) {
v8::Local<v8::Context>::New(isolate, evaluation_context_);
v8::Context::Scope context_scope(context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- Handle<String> name = String::New("(d8)");
+ Handle<String> name = String::NewFromUtf8(isolate, "(d8)");
LineEditor* console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
console->Open(isolate);
@@ -1210,7 +1247,8 @@ void ShellThread::Run() {
Shell::Exit(1);
}
- Shell::ExecuteString(isolate_, str, String::New(filename), false, false);
+ Shell::ExecuteString(
+ isolate_, str, String::NewFromUtf8(isolate_, filename), false, false);
}
ptr = next_line;
@@ -1228,15 +1266,17 @@ SourceGroup::~SourceGroup() {
void SourceGroup::Execute(Isolate* isolate) {
+ bool exception_was_thrown = false;
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
// Execute argument given to -e option directly.
HandleScope handle_scope(isolate);
- Handle<String> file_name = String::New("unnamed");
- Handle<String> source = String::New(argv_[i + 1]);
+ Handle<String> file_name = String::NewFromUtf8(isolate, "unnamed");
+ Handle<String> source = String::NewFromUtf8(isolate, argv_[i + 1]);
if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
- Shell::Exit(1);
+ exception_was_thrown = true;
+ break;
}
++i;
} else if (arg[0] == '-') {
@@ -1244,17 +1284,21 @@ void SourceGroup::Execute(Isolate* isolate) {
} else {
// Use all other arguments as names of files to load and run.
HandleScope handle_scope(isolate);
- Handle<String> file_name = String::New(arg);
+ Handle<String> file_name = String::NewFromUtf8(isolate, arg);
Handle<String> source = ReadFile(isolate, arg);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", arg);
Shell::Exit(1);
}
if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
- Shell::Exit(1);
+ exception_was_thrown = true;
+ break;
}
}
}
+ if (exception_was_thrown != Shell::options.expected_to_throw) {
+ Shell::Exit(1);
+ }
}
@@ -1262,7 +1306,8 @@ Handle<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
int size;
char* chars = ReadChars(isolate, name, &size);
if (chars == NULL) return Handle<String>();
- Handle<String> result = String::New(chars, size);
+ Handle<String> result =
+ String::NewFromUtf8(isolate, chars, String::kNormalString, size);
delete[] chars;
return result;
}
@@ -1338,6 +1383,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--stress-deopt") == 0) {
options.stress_deopt = true;
argv[i] = NULL;
+ } else if (strcmp(argv[i], "--mock-arraybuffer-allocator") == 0) {
+ options.mock_arraybuffer_allocator = true;
+ argv[i] = NULL;
} else if (strcmp(argv[i], "--noalways-opt") == 0) {
// No support for stressing if we can't use --always-opt.
options.stress_opt = false;
@@ -1351,43 +1399,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--send-idle-notification") == 0) {
options.send_idle_notification = true;
argv[i] = NULL;
- } else if (strcmp(argv[i], "--preemption") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#else
- options.use_preemption = true;
- argv[i] = NULL;
-#endif // V8_SHARED
- } else if (strcmp(argv[i], "--nopreemption") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#else
- options.use_preemption = false;
- argv[i] = NULL;
-#endif // V8_SHARED
- } else if (strcmp(argv[i], "--preemption-interval") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#else
- if (++i < argc) {
- argv[i-1] = NULL;
- char* end = NULL;
- options.preemption_interval = strtol(argv[i], &end, 10); // NOLINT
- if (options.preemption_interval <= 0
- || *end != '\0'
- || errno == ERANGE) {
- printf("Invalid value for --preemption-interval '%s'\n", argv[i]);
- return false;
- }
- argv[i] = NULL;
- } else {
- printf("Missing value for --preemption-interval\n");
- return false;
- }
-#endif // V8_SHARED
} else if (strcmp(argv[i], "-f") == 0) {
// Ignore any -f flags for compatibility with other stand-alone
// JavaScript engines.
@@ -1413,6 +1424,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.dump_heap_constants = true;
argv[i] = NULL;
#endif
+ } else if (strcmp(argv[i], "--throws") == 0) {
+ options.expected_to_throw = true;
+ argv[i] = NULL;
}
#ifdef V8_SHARED
else if (strcmp(argv[i], "--dump-counters") == 0) {
@@ -1523,14 +1537,6 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
V8::IdleNotification(kLongIdlePauseInMs);
}
}
-
-#ifndef V8_SHARED
- // Start preemption if threads have been created and preemption is enabled.
- if (threads.length() > 0
- && options.use_preemption) {
- Locker::StartPreemption(options.preemption_interval);
- }
-#endif // V8_SHARED
}
#ifndef V8_SHARED
@@ -1543,11 +1549,6 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
thread->Join();
delete thread;
}
-
- if (threads.length() > 0 && options.use_preemption) {
- Locker lock(isolate);
- Locker::StopPreemption();
- }
#endif // V8_SHARED
return 0;
}
@@ -1559,6 +1560,7 @@ static void SetStandaloneFlagsViaCommandLine() {
char **fake_argv = new char*[2];
fake_argv[0] = NULL;
fake_argv[1] = strdup("--trace-hydrogen-file=hydrogen.cfg");
+ fake_argv[2] = strdup("--redirect-code-traces-to=code.asm");
v8::V8::SetFlagsFromCommandLine(&fake_argc, fake_argv, false);
free(fake_argv[1]);
delete[] fake_argv;
@@ -1643,18 +1645,43 @@ class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
};
+class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ virtual void* Allocate(size_t) V8_OVERRIDE {
+ return malloc(0);
+ }
+ virtual void* AllocateUninitialized(size_t length) V8_OVERRIDE {
+ return malloc(0);
+ }
+ virtual void Free(void*, size_t) V8_OVERRIDE {
+ }
+};
+
+
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICU();
#ifndef V8_SHARED
i::FLAG_trace_hydrogen_file = "hydrogen.cfg";
+ i::FLAG_redirect_code_traces_to = "code.asm";
#else
SetStandaloneFlagsViaCommandLine();
#endif
ShellArrayBufferAllocator array_buffer_allocator;
- v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
+ MockArrayBufferAllocator mock_arraybuffer_allocator;
+ if (options.mock_arraybuffer_allocator) {
+ v8::V8::SetArrayBufferAllocator(&mock_arraybuffer_allocator);
+ } else {
+ v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
+ }
int result = 0;
Isolate* isolate = Isolate::GetCurrent();
+#ifndef V8_SHARED
+ v8::ResourceConstraints constraints;
+ constraints.ConfigureDefaults(i::OS::TotalPhysicalMemory(),
+ i::CPU::NumberOfProcessorsOnline());
+ v8::SetResourceConstraints(isolate, &constraints);
+#endif
DumbLineEditor dumb_line_editor(isolate);
{
Initialize(isolate);
diff --git a/chromium/v8/src/d8.gyp b/chromium/v8/src/d8.gyp
index 15d342dece7..097abc04652 100644
--- a/chromium/v8/src/d8.gyp
+++ b/chromium/v8/src/d8.gyp
@@ -31,7 +31,7 @@
'console%': '',
# Enable support for Intel VTune. Supported on ia32/x64 only
'v8_enable_vtunejit%': 0,
- 'v8_enable_i18n_support%': 0,
+ 'v8_enable_i18n_support%': 1,
},
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
@@ -81,13 +81,13 @@
}],
['v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
- '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icudata',
+ '<(icu_gyp_path):icudata',
],
}],
],
diff --git a/chromium/v8/src/d8.h b/chromium/v8/src/d8.h
index 1ae1bcfe6e7..39352000fd9 100644
--- a/chromium/v8/src/d8.h
+++ b/chromium/v8/src/d8.h
@@ -219,8 +219,6 @@ class ShellOptions {
public:
ShellOptions() :
#ifndef V8_SHARED
- use_preemption(true),
- preemption_interval(10),
num_parallel_files(0),
parallel_files(NULL),
#endif // V8_SHARED
@@ -232,6 +230,8 @@ class ShellOptions {
interactive_shell(false),
test_shell(false),
dump_heap_constants(false),
+ expected_to_throw(false),
+ mock_arraybuffer_allocator(false),
num_isolates(1),
isolate_sources(NULL) { }
@@ -243,8 +243,6 @@ class ShellOptions {
}
#ifndef V8_SHARED
- bool use_preemption;
- int preemption_interval;
int num_parallel_files;
char** parallel_files;
#endif // V8_SHARED
@@ -256,6 +254,8 @@ class ShellOptions {
bool interactive_shell;
bool test_shell;
bool dump_heap_constants;
+ bool expected_to_throw;
+ bool mock_arraybuffer_allocator;
int num_isolates;
SourceGroup* isolate_sources;
};
@@ -300,6 +300,8 @@ class Shell : public i::AllStatic {
Handle<String> command);
static void DispatchDebugMessages();
#endif // ENABLE_DEBUGGER_SUPPORT
+
+ static void PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args);
#endif // V8_SHARED
static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -375,7 +377,8 @@ class Shell : public i::AllStatic {
static void MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void AddOSMethods(Handle<ObjectTemplate> os_template);
+ static void AddOSMethods(v8::Isolate* isolate,
+ Handle<ObjectTemplate> os_template);
static const char* kPrompt;
static ShellOptions options;
@@ -391,6 +394,7 @@ class Shell : public i::AllStatic {
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
static i::Mutex context_mutex_;
+ static const i::TimeTicks kInitialTicks;
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript(Isolate* isolate);
diff --git a/chromium/v8/src/d8.js b/chromium/v8/src/d8.js
index 3efea063787..35b61d54ee7 100644
--- a/chromium/v8/src/d8.js
+++ b/chromium/v8/src/d8.js
@@ -40,7 +40,7 @@ function log10(num) {
function ToInspectableObject(obj) {
if (!obj && typeof obj === 'object') {
- return void 0;
+ return UNDEFINED;
} else {
return Object(obj);
}
@@ -333,7 +333,7 @@ function DebugRequest(cmd_line) {
}
if ((cmd === undefined) || !cmd) {
- this.request_ = void 0;
+ this.request_ = UNDEFINED;
return;
}
@@ -492,7 +492,7 @@ function DebugRequest(cmd_line) {
case 'trace':
case 'tr':
// Return undefined to indicate command handled internally (no JSON).
- this.request_ = void 0;
+ this.request_ = UNDEFINED;
this.traceCommand_(args);
break;
@@ -500,7 +500,7 @@ function DebugRequest(cmd_line) {
case '?':
this.helpCommand_(args);
// Return undefined to indicate command handled internally (no JSON).
- this.request_ = void 0;
+ this.request_ = UNDEFINED;
break;
default:
@@ -2124,7 +2124,7 @@ function SimpleObjectToJSON_(object) {
var property_value_json;
switch (typeof property_value) {
case 'object':
- if (property_value === null) {
+ if (IS_NULL(property_value)) {
property_value_json = 'null';
} else if (typeof property_value.toJSONProtocol == 'function') {
property_value_json = property_value.toJSONProtocol(true);
@@ -2217,7 +2217,7 @@ function Stringify(x, depth) {
case "symbol":
return "Symbol(" + (x.name ? Stringify(x.name, depth) : "") + ")"
case "object":
- if (x === null) return "null";
+ if (IS_NULL(x)) return "null";
if (x.constructor && x.constructor.name === "Array") {
var elems = [];
for (var i = 0; i < x.length; ++i) {
@@ -2233,7 +2233,7 @@ function Stringify(x, depth) {
var props = [];
for (var name in x) {
var desc = Object.getOwnPropertyDescriptor(x, name);
- if (desc === void 0) continue;
+ if (IS_UNDEFINED(desc)) continue;
if ("value" in desc) {
props.push(name + ": " + Stringify(desc.value, depth - 1));
}
diff --git a/chromium/v8/src/date.js b/chromium/v8/src/date.js
index 62999e9de63..f3d4af244f9 100644
--- a/chromium/v8/src/date.js
+++ b/chromium/v8/src/date.js
@@ -41,7 +41,7 @@ function ThrowDateTypeError() {
}
-var timezone_cache_time = $NaN;
+var timezone_cache_time = NAN;
var timezone_cache_timezone;
function LocalTimezone(t) {
@@ -66,10 +66,10 @@ function UTC(time) {
// ECMA 262 - 15.9.1.11
function MakeTime(hour, min, sec, ms) {
- if (!$isFinite(hour)) return $NaN;
- if (!$isFinite(min)) return $NaN;
- if (!$isFinite(sec)) return $NaN;
- if (!$isFinite(ms)) return $NaN;
+ if (!$isFinite(hour)) return NAN;
+ if (!$isFinite(min)) return NAN;
+ if (!$isFinite(sec)) return NAN;
+ if (!$isFinite(ms)) return NAN;
return TO_INTEGER(hour) * msPerHour
+ TO_INTEGER(min) * msPerMinute
+ TO_INTEGER(sec) * msPerSecond
@@ -90,7 +90,7 @@ function TimeInYear(year) {
// MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
// MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
function MakeDay(year, month, date) {
- if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
+ if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return NAN;
// Convert to integer and map -0 to 0.
year = TO_INTEGER_MAP_MINUS_ZERO(year);
@@ -99,7 +99,7 @@ function MakeDay(year, month, date) {
if (year < kMinYear || year > kMaxYear ||
month < kMinMonth || month > kMaxMonth) {
- return $NaN;
+ return NAN;
}
// Now we rely on year and month being SMIs.
@@ -115,15 +115,15 @@ function MakeDate(day, time) {
// is no way that the time can be within range even after UTC
// conversion we return NaN immediately instead of relying on
// TimeClip to do it.
- if ($abs(time) > MAX_TIME_BEFORE_UTC) return $NaN;
+ if ($abs(time) > MAX_TIME_BEFORE_UTC) return NAN;
return time;
}
// ECMA 262 - 15.9.1.14
function TimeClip(time) {
- if (!$isFinite(time)) return $NaN;
- if ($abs(time) > MAX_TIME_MS) return $NaN;
+ if (!$isFinite(time)) return NAN;
+ if ($abs(time) > MAX_TIME_MS) return NAN;
return TO_INTEGER(time);
}
@@ -132,7 +132,7 @@ function TimeClip(time) {
// strings over and over again.
var Date_cache = {
// Cached time value.
- time: $NaN,
+ time: 0,
// String input for which the cached time is valid.
string: null
};
@@ -269,7 +269,7 @@ var parse_buffer = $Array(8);
// ECMA 262 - 15.9.4.2
function DateParse(string) {
var arr = %DateParseString(ToString(string), parse_buffer);
- if (IS_NULL(arr)) return $NaN;
+ if (IS_NULL(arr)) return NAN;
var day = MakeDay(arr[0], arr[1], arr[2]);
var time = MakeTime(arr[3], arr[4], arr[5], arr[6]);
@@ -302,8 +302,7 @@ function DateUTC(year, month, date, hours, minutes, seconds, ms) {
}
-// Mozilla-specific extension. Returns the number of milliseconds
-// elapsed since 1 January 1970 00:00:00 UTC.
+// ECMA 262 - 15.9.4.4
function DateNow() {
return %DateCurrentTime();
}
@@ -671,7 +670,7 @@ function DateGetYear() {
function DateSetYear(year) {
CHECK_DATE(this);
year = ToNumber(year);
- if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, $NaN);
+ if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, NAN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var t = LOCAL_DATE_VALUE(this);
@@ -746,12 +745,12 @@ function DateToJSON(key) {
function ResetDateCache() {
// Reset the timezone cache:
- timezone_cache_time = $NaN;
+ timezone_cache_time = NAN;
timezone_cache_timezone = undefined;
// Reset the date cache:
cache = Date_cache;
- cache.time = $NaN;
+ cache.time = NAN;
cache.string = null;
}
@@ -762,7 +761,7 @@ function SetUpDate() {
%CheckIsBootstrapping();
%SetCode($Date, DateConstructor);
- %FunctionSetPrototype($Date, new $Date($NaN));
+ %FunctionSetPrototype($Date, new $Date(NAN));
// Set up non-enumerable properties of the Date object itself.
InstallFunctions($Date, DONT_ENUM, $Array(
diff --git a/chromium/v8/src/debug-debugger.js b/chromium/v8/src/debug-debugger.js
index 19209d4b95d..b159ae3b298 100644
--- a/chromium/v8/src/debug-debugger.js
+++ b/chromium/v8/src/debug-debugger.js
@@ -448,7 +448,7 @@ ScriptBreakPoint.prototype.set = function (script) {
// If the position is not found in the script (the script might be shorter
// than it used to be) just ignore it.
- if (position === null) return;
+ if (IS_NULL(position)) return;
// Create a break point object and set the break point.
break_point = MakeBreakPoint(position, this);
@@ -2064,7 +2064,7 @@ DebugCommandProcessor.resolveValue_ = function(value_description) {
} else if ("value" in value_description) {
return value_description.value;
} else if (value_description.type == UNDEFINED_TYPE) {
- return void 0;
+ return UNDEFINED;
} else if (value_description.type == NULL_TYPE) {
return null;
} else {
diff --git a/chromium/v8/src/debug.cc b/chromium/v8/src/debug.cc
index 0496b8cb007..25be003f707 100644
--- a/chromium/v8/src/debug.cc
+++ b/chromium/v8/src/debug.cc
@@ -709,7 +709,7 @@ void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
script_cache->collected_scripts_.Add(id);
// Clear the weak handle.
- obj->Dispose();
+ obj->Reset();
}
@@ -1793,10 +1793,14 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// function to be called and not the code for Builtins::FunctionApply or
// Builtins::FunctionCall. The receiver of call/apply is the target
// function.
- if (!holder.is_null() && holder->IsJSFunction() &&
- !JSFunction::cast(*holder)->IsBuiltin()) {
+ if (!holder.is_null() && holder->IsJSFunction()) {
Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
- Debug::FloodWithOneShot(js_function);
+ if (!js_function->IsBuiltin()) {
+ Debug::FloodWithOneShot(js_function);
+ } else if (js_function->shared()->bound()) {
+ // Handle Function.prototype.bind
+ Debug::FloodBoundFunctionWithOneShot(js_function);
+ }
}
} else {
Debug::FloodWithOneShot(function);
@@ -2052,7 +2056,7 @@ void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
- if (FLAG_concurrent_recompilation) {
+ if (isolate_->concurrent_recompilation_enabled()) {
isolate_->optimizing_compiler_thread()->Flush();
}
@@ -2102,6 +2106,7 @@ void Debug::PrepareForBreakPoints() {
if (!shared->allows_lazy_compilation()) continue;
if (!shared->script()->IsScript()) continue;
+ if (function->IsBuiltin()) continue;
if (shared->code()->gc_metadata() == active_code_marker) continue;
Code::Kind kind = function->code()->kind();
@@ -3066,6 +3071,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
bool auto_continue) {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
HandleScope scope(isolate_);
if (!isolate_->debug()->Load()) return;
@@ -3126,13 +3132,12 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
{
v8::Local<v8::Object> api_exec_state =
v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state));
- v8::Local<v8::String> fun_name =
- v8::String::New("debugCommandProcessor");
+ v8::Local<v8::String> fun_name = v8::String::NewFromUtf8(
+ isolate, "debugCommandProcessor");
v8::Local<v8::Function> fun =
v8::Local<v8::Function>::Cast(api_exec_state->Get(fun_name));
- v8::Handle<v8::Boolean> running =
- auto_continue ? v8::True() : v8::False();
+ v8::Handle<v8::Boolean> running = v8::Boolean::New(isolate, auto_continue);
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { running };
cmd_processor = v8::Local<v8::Object>::Cast(
@@ -3175,11 +3180,12 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
v8::Local<v8::Function> fun;
v8::Local<v8::Value> request;
v8::TryCatch try_catch;
- fun_name = v8::String::New("processDebugRequest");
+ fun_name = v8::String::NewFromUtf8(isolate, "processDebugRequest");
fun = v8::Local<v8::Function>::Cast(cmd_processor->Get(fun_name));
- request = v8::String::New(command.text().start(),
- command.text().length());
+ request = v8::String::NewFromTwoByte(isolate, command.text().start(),
+ v8::String::kNormalString,
+ command.text().length());
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { request };
v8::Local<v8::Value> response_val = fun->Call(cmd_processor, kArgc, argv);
@@ -3191,7 +3197,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
if (!response_val->IsUndefined()) {
response = v8::Local<v8::String>::Cast(response_val);
} else {
- response = v8::String::New("");
+ response = v8::String::NewFromUtf8(isolate, "");
}
// Log the JSON request/response.
@@ -3201,7 +3207,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
}
// Get the running state.
- fun_name = v8::String::New("isRunning");
+ fun_name = v8::String::NewFromUtf8(isolate, "isRunning");
fun = v8::Local<v8::Function>::Cast(cmd_processor->Get(fun_name));
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { response };
@@ -3634,7 +3640,7 @@ v8::Handle<v8::Object> MessageImpl::GetEventData() const {
v8::Handle<v8::String> MessageImpl::GetJSON() const {
- v8::HandleScope scope(
+ v8::EscapableHandleScope scope(
reinterpret_cast<v8::Isolate*>(event_data_->GetIsolate()));
if (IsEvent()) {
@@ -3650,7 +3656,7 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const {
if (caught_exception || !json->IsString()) {
return v8::Handle<v8::String>();
}
- return scope.Close(v8::Utils::ToLocal(Handle<String>::cast(json)));
+ return scope.Escape(v8::Utils::ToLocal(Handle<String>::cast(json)));
} else {
return v8::Utils::ToLocal(response_json_);
}
diff --git a/chromium/v8/src/debug.h b/chromium/v8/src/debug.h
index 2b5f43ab495..8e71ea67052 100644
--- a/chromium/v8/src/debug.h
+++ b/chromium/v8/src/debug.h
@@ -38,6 +38,7 @@
#include "frames-inl.h"
#include "hashmap.h"
#include "platform.h"
+#include "platform/socket.h"
#include "string-stream.h"
#include "v8threads.h"
diff --git a/chromium/v8/src/default-platform.cc b/chromium/v8/src/default-platform.cc
new file mode 100644
index 00000000000..ef3c4ebd450
--- /dev/null
+++ b/chromium/v8/src/default-platform.cc
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "default-platform.h"
+
+namespace v8 {
+namespace internal {
+
+
+DefaultPlatform::DefaultPlatform() {}
+
+
+DefaultPlatform::~DefaultPlatform() {}
+
+void DefaultPlatform::CallOnBackgroundThread(Task *task,
+ ExpectedRuntime expected_runtime) {
+ // TODO(jochen): implement.
+ task->Run();
+ delete task;
+}
+
+
+void DefaultPlatform::CallOnForegroundThread(v8::Isolate* isolate, Task* task) {
+ // TODO(jochen): implement.
+ task->Run();
+ delete task;
+}
+
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/marking-thread.h b/chromium/v8/src/default-platform.h
index 021cd5b48c7..fe1bf8e2d64 100644
--- a/chromium/v8/src/marking-thread.h
+++ b/chromium/v8/src/default-platform.h
@@ -25,42 +25,31 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_MARKING_THREAD_H_
-#define V8_MARKING_THREAD_H_
+#ifndef V8_DEFAULT_PLATFORM_H_
+#define V8_DEFAULT_PLATFORM_H_
-#include "atomicops.h"
-#include "flags.h"
-#include "platform.h"
-#include "v8utils.h"
-
-#include "spaces.h"
-
-#include "heap.h"
+#include "v8.h"
namespace v8 {
namespace internal {
-class MarkingThread : public Thread {
+class DefaultPlatform : public Platform {
public:
- explicit MarkingThread(Isolate* isolate);
- ~MarkingThread() {}
+ DefaultPlatform();
+ virtual ~DefaultPlatform();
- void Run();
- void Stop();
- void StartMarking();
- void WaitForMarkingThread();
+ // v8::Platform implementation.
+ virtual void CallOnBackgroundThread(
+ Task *task, ExpectedRuntime expected_runtime) V8_OVERRIDE;
+ virtual void CallOnForegroundThread(v8::Isolate *isolate,
+ Task *task) V8_OVERRIDE;
private:
- Isolate* isolate_;
- Heap* heap_;
- Semaphore start_marking_semaphore_;
- Semaphore end_marking_semaphore_;
- Semaphore stop_semaphore_;
- volatile AtomicWord stop_thread_;
- int id_;
- static Atomic32 id_counter_;
+ DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
};
+
} } // namespace v8::internal
-#endif // V8_MARKING_THREAD_H_
+
+#endif // V8_DEFAULT_PLATFORM_H_
diff --git a/chromium/v8/src/deoptimizer.cc b/chromium/v8/src/deoptimizer.cc
index c979a534d89..76f2fa9bd76 100644
--- a/chromium/v8/src/deoptimizer.cc
+++ b/chromium/v8/src/deoptimizer.cc
@@ -181,7 +181,8 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
// Always use the actual stack slots when calculating the fp to sp
// delta adding two for the function and context.
unsigned stack_slots = code->stack_slots();
- unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize);
+ unsigned fp_to_sp_delta = (stack_slots * kPointerSize) +
+ StandardFrameConstants::kFixedFrameSizeFromFp;
Deoptimizer* deoptimizer = new Deoptimizer(isolate,
function,
@@ -344,9 +345,11 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
if (FLAG_trace_deopt) {
- PrintF("[deoptimizer unlinked: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimizer unlinked: ");
+ function->PrintName(scope.file());
+ PrintF(scope.file(),
+ " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
}
}
};
@@ -409,7 +412,8 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
if (FLAG_trace_deopt) {
- PrintF("[deoptimize all code in all contexts]\n");
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
}
DisallowHeapAllocation no_allocation;
// For all contexts, mark all code, then deoptimize.
@@ -425,7 +429,8 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
if (FLAG_trace_deopt) {
- PrintF("[deoptimize marked code in all contexts]\n");
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimize marked code in all contexts]\n");
}
DisallowHeapAllocation no_allocation;
// For all contexts, deoptimize code already marked.
@@ -440,7 +445,8 @@ void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
if (FLAG_trace_deopt) {
- PrintF("[deoptimize global object @ 0x%08" V8PRIxPTR "]\n",
+ CodeTracer::Scope scope(object->GetHeap()->isolate()->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimize global object @ 0x%08" V8PRIxPTR "]\n",
reinterpret_cast<intptr_t>(object));
}
if (object->IsJSGlobalProxy()) {
@@ -541,7 +547,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
materialized_objects_(NULL),
materialization_value_index_(0),
materialization_object_index_(0),
- trace_(false) {
+ trace_scope_(NULL) {
// For COMPILED_STUBs called from builtins, the function pointer is a SMI
// indicating an internal frame.
if (function->IsSmi()) {
@@ -571,7 +577,8 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
StackFrame::Type frame_type = function == NULL
? StackFrame::STUB
: StackFrame::JAVA_SCRIPT;
- trace_ = TraceEnabledFor(type, frame_type);
+ trace_scope_ = TraceEnabledFor(type, frame_type) ?
+ new CodeTracer::Scope(isolate->GetCodeTracer()) : NULL;
#ifdef DEBUG
CHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
@@ -604,9 +611,10 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
void Deoptimizer::PrintFunctionName() {
if (function_->IsJSFunction()) {
- function_->PrintName();
+ function_->PrintName(trace_scope_->file());
} else {
- PrintF("%s", Code::Kind2String(compiled_code_->kind()));
+ PrintF(trace_scope_->file(),
+ "%s", Code::Kind2String(compiled_code_->kind()));
}
}
@@ -614,6 +622,7 @@ void Deoptimizer::PrintFunctionName() {
Deoptimizer::~Deoptimizer() {
ASSERT(input_ == NULL && output_ == NULL);
ASSERT(disallow_heap_allocation_ == NULL);
+ delete trace_scope_;
}
@@ -681,13 +690,13 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
return data->PcAndState(i)->value();
}
}
- PrintF("[couldn't find pc offset for node=%d]\n", id.ToInt());
- PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
+ PrintF(stderr, "[couldn't find pc offset for node=%d]\n", id.ToInt());
+ PrintF(stderr, "[method: %s]\n", *shared->DebugName()->ToCString());
// Print the source code if available.
HeapStringAllocator string_allocator;
StringStream stream(&string_allocator);
shared->SourceCodePrint(&stream, -1);
- PrintF("[source:\n%s\n]", *stream.ToCString());
+ PrintF(stderr, "[source:\n%s\n]", *stream.ToCString());
FATAL("unable to find pc offset during deoptimization");
return -1;
@@ -722,15 +731,19 @@ void Deoptimizer::DoComputeOutputFrames() {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
ElapsedTimer timer;
- if (trace_) {
+ if (trace_scope_ != NULL) {
timer.Start();
- PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
+ PrintF(trace_scope_->file(),
+ "[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
MessageFor(bailout_type_),
reinterpret_cast<intptr_t>(function_));
PrintFunctionName();
- PrintF(" @%d, FP to SP delta: %d]\n", bailout_id_, fp_to_sp_delta_);
+ PrintF(trace_scope_->file(),
+ " @%d, FP to SP delta: %d]\n",
+ bailout_id_,
+ fp_to_sp_delta_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
- compiled_code_->PrintDeoptLocation(bailout_id_);
+ compiled_code_->PrintDeoptLocation(trace_scope_->file(), bailout_id_);
}
}
@@ -803,15 +816,17 @@ void Deoptimizer::DoComputeOutputFrames() {
}
// Print some helpful diagnostic information.
- if (trace_) {
+ if (trace_scope_ != NULL) {
double ms = timer.Elapsed().InMillisecondsF();
int index = output_count_ - 1; // Index of the topmost frame.
JSFunction* function = output_[index]->GetFunction();
- PrintF("[deoptimizing (%s): end 0x%08" V8PRIxPTR " ",
+ PrintF(trace_scope_->file(),
+ "[deoptimizing (%s): end 0x%08" V8PRIxPTR " ",
MessageFor(bailout_type_),
reinterpret_cast<intptr_t>(function));
PrintFunctionName();
- PrintF(" @%d => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
+ PrintF(trace_scope_->file(),
+ " @%d => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
" took %0.3f ms]\n",
bailout_id_,
node_id.ToInt(),
@@ -839,10 +854,11 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(), " translating ");
+ function->PrintName(trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ " => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
@@ -875,7 +891,8 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// If the optimized frame had alignment padding, adjust the frame pointer
// to point to the new position of the old frame pointer after padding
// is removed. Subtract 2 * kPointerSize for the context and function slots.
- top_address = input_->GetRegister(fp_reg.code()) - (2 * kPointerSize) -
+ top_address = input_->GetRegister(fp_reg.code()) -
+ StandardFrameConstants::kFixedFrameSizeFromFp -
height_in_bytes + has_alignment_padding_ * kPointerSize;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
@@ -909,8 +926,9 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetCallerPc(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_offset, output_offset, value);
}
@@ -932,8 +950,9 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
has_alignment_padding_ * kPointerSize) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
fp_value, output_offset, value);
}
@@ -954,8 +973,9 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR "; context\n",
top_address + output_offset, output_offset, value);
}
@@ -968,8 +988,9 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR "; function\n",
top_address + output_offset, output_offset, value);
}
@@ -1017,8 +1038,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " translating arguments adaptor => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
@@ -1052,8 +1074,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetCallerPc(output_offset, callers_pc);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
@@ -1064,8 +1087,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
fp_value, output_offset, value);
}
@@ -1075,8 +1099,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
intptr_t context = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
output_frame->SetFrameSlot(output_offset, context);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; context (adaptor sentinel)\n",
top_address + output_offset, output_offset, context);
}
@@ -1085,8 +1110,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; function\n",
top_address + output_offset, output_offset, value);
}
@@ -1095,8 +1121,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
@@ -1120,8 +1147,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " translating construct stub => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = ConstructFrameConstants::kFrameSize;
@@ -1163,8 +1191,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetCallerPc(output_offset, callers_pc);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
@@ -1175,8 +1204,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
fp_value, output_offset, value);
}
@@ -1185,8 +1215,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; context\n",
top_address + output_offset, output_offset, value);
}
@@ -1195,8 +1226,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; function (construct sentinel)\n",
top_address + output_offset, output_offset, value);
}
@@ -1205,8 +1237,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(construct_stub);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; code object\n",
top_address + output_offset, output_offset, value);
}
@@ -1215,8 +1248,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
@@ -1227,8 +1261,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; constructor function\n",
top_address + output_offset, output_offset, value);
}
@@ -1239,8 +1274,9 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; allocated receiver\n",
top_address + output_offset, output_offset, value);
}
@@ -1264,18 +1300,19 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
unsigned height = 0;
unsigned height_in_bytes = height * kPointerSize;
const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (trace_) {
- PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " translating %s stub => height=%u\n", kind, height_in_bytes);
}
- // We need 1 stack entry for the return address + 4 stack entries from
- // StackFrame::INTERNAL (FP, context, frame type, code object, see
+ // We need 1 stack entry for the return address and enough entries for the
+ // StackFrame::INTERNAL (FP, context, frame type and code object - see
// MacroAssembler::EnterFrame). For a setter stub frame we need one additional
// entry for the implicit return value, see
// StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = (kPCOnStackSize / kPointerSize) +
- (kFPOnStackSize / kPointerSize) + 3 +
- (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_entries =
+ (StandardFrameConstants::kFixedFrameSize / kPointerSize) + 1 +
+ (is_setter_stub_frame ? 1 : 0);
unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
@@ -1300,8 +1337,9 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetCallerPc(output_offset, callers_pc);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
@@ -1312,8 +1350,9 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; caller's fp\n",
fp_value, output_offset, value);
}
@@ -1322,8 +1361,9 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; context\n",
top_address + output_offset, output_offset, value);
}
@@ -1332,8 +1372,9 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; function (%s sentinel)\n",
top_address + output_offset, output_offset, value, kind);
}
@@ -1346,8 +1387,9 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
Code* accessor_stub = isolate_->builtins()->builtin(name);
value = reinterpret_cast<intptr_t>(accessor_stub);
output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; code object\n",
top_address + output_offset, output_offset, value);
}
@@ -1423,9 +1465,11 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
int input_frame_size = input_->GetFrameSize();
int output_frame_size = height_in_bytes + fixed_frame_size;
- if (trace_) {
- PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " translating %s => StubFailure%sTrampolineStub, height=%d\n",
CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
+ descriptor->HasTailCallContinuation() ? "TailCall" : "",
height_in_bytes);
}
@@ -1441,7 +1485,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// context and function slots.
Register fp_reg = StubFailureTrampolineFrame::fp_register();
intptr_t top_address = input_->GetRegister(fp_reg.code()) -
- (2 * kPointerSize) - height_in_bytes;
+ StandardFrameConstants::kFixedFrameSizeFromFp - height_in_bytes;
output_frame->SetTop(top_address);
// Read caller's PC (JSFunction continuation) from the input frame.
@@ -1449,8 +1493,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
intptr_t value = input_->GetFrameSlot(input_frame_offset);
output_frame->SetCallerPc(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_frame_offset, output_frame_offset, value);
}
@@ -1463,8 +1508,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
intptr_t frame_ptr = input_->GetRegister(fp_reg.code());
output_frame->SetRegister(fp_reg.code(), frame_ptr);
output_frame->SetFp(frame_ptr);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
top_address + output_frame_offset, output_frame_offset, value);
}
@@ -1476,8 +1522,10 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
output_frame->SetRegister(context_reg.code(), value);
output_frame_offset -= kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ ASSERT(reinterpret_cast<Object*>(value)->IsContext());
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; context\n",
top_address + output_frame_offset, output_frame_offset, value);
}
@@ -1487,14 +1535,16 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
value = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; function (stub failure sentinel)\n",
top_address + output_frame_offset, output_frame_offset, value);
}
- intptr_t caller_arg_count = 0;
- bool arg_count_known = descriptor->stack_parameter_count_ == NULL;
+ intptr_t caller_arg_count = descriptor->HasTailCallContinuation()
+ ? compiled_code_->arguments_count() + 1 : 0;
+ bool arg_count_known = !descriptor->stack_parameter_count_.is_valid();
// Build the Arguments object for the caller's parameters and a pointer to it.
output_frame_offset -= kPointerSize;
@@ -1509,8 +1559,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
output_frame->SetFrameSlot(args_arguments_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args.arguments %s\n",
top_address + args_arguments_offset, args_arguments_offset, value,
arg_count_known ? "" : "(the hole)");
@@ -1520,8 +1571,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
int length_frame_offset = output_frame_offset;
value = arg_count_known ? caller_arg_count : the_hole;
output_frame->SetFrameSlot(length_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args.length %s\n",
top_address + length_frame_offset, length_frame_offset, value,
arg_count_known ? "" : "(the hole)");
@@ -1531,34 +1583,54 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
value = frame_ptr + StandardFrameConstants::kCallerSPOffset -
(output_frame_size - output_frame_offset) + kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args*\n",
top_address + output_frame_offset, output_frame_offset, value);
}
// Copy the register parameters to the failure frame.
+ int arguments_length_offset = -1;
for (int i = 0; i < descriptor->register_param_count_; ++i) {
output_frame_offset -= kPointerSize;
DoTranslateCommand(iterator, 0, output_frame_offset);
+
+ if (!arg_count_known && descriptor->IsParameterCountRegister(i)) {
+ arguments_length_offset = output_frame_offset;
+ }
}
+ ASSERT(0 == output_frame_offset);
+
if (!arg_count_known) {
- DoTranslateCommand(iterator, 0, length_frame_offset,
- TRANSLATED_VALUE_IS_NATIVE);
- caller_arg_count = output_frame->GetFrameSlot(length_frame_offset);
+ ASSERT(arguments_length_offset >= 0);
+ // We know it's a smi because 1) the code stub guarantees the stack
+ // parameter count is in smi range, and 2) the DoTranslateCommand in the
+ // parameter loop above translated that to a tagged value.
+ Smi* smi_caller_arg_count = reinterpret_cast<Smi*>(
+ output_frame->GetFrameSlot(arguments_length_offset));
+ caller_arg_count = smi_caller_arg_count->value();
+ output_frame->SetFrameSlot(length_frame_offset, caller_arg_count);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; args.length\n",
+ top_address + length_frame_offset, length_frame_offset,
+ caller_arg_count);
+ }
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
(caller_arg_count - 1) * kPointerSize;
output_frame->SetFrameSlot(args_arguments_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; args.arguments\n",
- top_address + args_arguments_offset, args_arguments_offset, value);
+ top_address + args_arguments_offset, args_arguments_offset,
+ value);
}
}
- ASSERT(0 == output_frame_offset);
-
// Copy the double registers from the input into the output frame.
CopyDoubleRegisters(output_frame);
@@ -1567,15 +1639,18 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// Compute this frame's PC, state, and continuation.
Code* trampoline = NULL;
- StubFunctionMode function_mode = descriptor->function_mode_;
- StubFailureTrampolineStub(function_mode).FindCodeInCache(&trampoline,
- isolate_);
+ if (descriptor->HasTailCallContinuation()) {
+ StubFailureTailCallTrampolineStub().FindCodeInCache(&trampoline, isolate_);
+ } else {
+ StubFunctionMode function_mode = descriptor->function_mode_;
+ StubFailureTrampolineStub(function_mode).FindCodeInCache(&trampoline,
+ isolate_);
+ }
ASSERT(trampoline != NULL);
output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start()));
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
- Code* notify_failure =
- isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
+ Code* notify_failure = NotifyStubFailureBuiltin();
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(notify_failure->entry()));
}
@@ -1614,12 +1689,16 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
}
} else {
// Dispatch on the instance type of the object to be materialized.
- Handle<Map> map = Handle<Map>::cast(MaterializeNextValue());
+ // We also need to make sure that the representation of all fields
+ // in the given object are general enough to hold a tagged value.
+ Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
+ Handle<Map>::cast(MaterializeNextValue()), Representation::Tagged());
switch (map->instance_type()) {
case HEAP_NUMBER_TYPE: {
- Handle<HeapNumber> number =
- Handle<HeapNumber>::cast(MaterializeNextValue());
- materialized_objects_->Add(number);
+ // Reuse the HeapNumber value directly as it is already properly
+ // tagged and skip materializing the HeapNumber explicitly.
+ Handle<Object> object = MaterializeNextValue();
+ materialized_objects_->Add(object);
materialization_value_index_ += kDoubleSize / kPointerSize - 1;
break;
}
@@ -1650,7 +1729,8 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
break;
}
default:
- PrintF("[couldn't handle instance type %d]\n", map->instance_type());
+ PrintF(stderr,
+ "[couldn't handle instance type %d]\n", map->instance_type());
UNREACHABLE();
}
}
@@ -1693,29 +1773,37 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
// output frames are used to materialize arguments objects later on they need
// to already contain valid heap numbers.
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
- HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
+ HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
- if (trace_) {
- PrintF("Materialized a new heap number %p [%e] in slot %p\n",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new heap number %p [%e] in slot %p\n",
reinterpret_cast<void*>(*num),
d.value(),
- d.slot_address());
+ d.destination());
}
- Memory::Object_at(d.slot_address()) = *num;
+ Memory::Object_at(d.destination()) = *num;
}
// Materialize all heap numbers required for arguments/captured objects.
- for (int i = 0; i < values.length(); i++) {
- if (!values.at(i)->IsTheHole()) continue;
- double double_value = deferred_objects_double_values_[i];
- Handle<Object> num = isolate_->factory()->NewNumber(double_value);
- if (trace_) {
- PrintF("Materialized a new heap number %p [%e] for object\n",
- reinterpret_cast<void*>(*num), double_value);
+ for (int i = 0; i < deferred_objects_double_values_.length(); i++) {
+ HeapNumberMaterializationDescriptor<int> d =
+ deferred_objects_double_values_[i];
+ Handle<Object> num = isolate_->factory()->NewNumber(d.value());
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new heap number %p [%e] for object at %d\n",
+ reinterpret_cast<void*>(*num),
+ d.value(),
+ d.destination());
}
- values.Set(i, num);
+ ASSERT(values.at(d.destination())->IsTheHole());
+ values.Set(d.destination(), num);
}
+ // Play it safe and clear all object double values before we continue.
+ deferred_objects_double_values_.Clear();
+
// Materialize arguments/captured objects.
if (!deferred_objects_.is_empty()) {
List<Handle<Object> > materialized_objects(deferred_objects_.length());
@@ -1732,19 +1820,21 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
// the materialized object into the frame slot.
Handle<Object> object = MaterializeNextHeapObject();
Memory::Object_at(descriptor.slot_address()) = *object;
- if (trace_) {
+ if (trace_scope_ != NULL) {
if (descriptor.is_arguments()) {
- PrintF("Materialized %sarguments object of length %d for %p: ",
+ PrintF(trace_scope_->file(),
+ "Materialized %sarguments object of length %d for %p: ",
ArgumentsObjectIsAdapted(object_index) ? "(adapted) " : "",
Handle<JSObject>::cast(object)->elements()->length(),
reinterpret_cast<void*>(descriptor.slot_address()));
} else {
- PrintF("Materialized captured object of size %d for %p: ",
+ PrintF(trace_scope_->file(),
+ "Materialized captured object of size %d for %p: ",
Handle<HeapObject>::cast(object)->Size(),
reinterpret_cast<void*>(descriptor.slot_address()));
}
- object->ShortPrint();
- PrintF("\n");
+ object->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(), "\n");
}
}
@@ -1765,23 +1855,24 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
Address parameters_bottom = parameters_top + parameters_size;
Address expressions_bottom = expressions_top + expressions_size;
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
- HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
+ HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i];
// Check of the heap number to materialize actually belong to the frame
// being extracted.
- Address slot = d.slot_address();
+ Address slot = d.destination();
if (parameters_top <= slot && slot < parameters_bottom) {
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
int index = (info->parameters_count() - 1) -
static_cast<int>(slot - parameters_top) / kPointerSize;
- if (trace_) {
- PrintF("Materializing a new heap number %p [%e] in slot %p"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materializing a new heap number %p [%e] in slot %p"
"for parameter slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
- d.slot_address(),
+ d.destination(),
index);
}
@@ -1792,12 +1883,13 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
int index = info->expression_count() - 1 -
static_cast<int>(slot - expressions_top) / kPointerSize;
- if (trace_) {
- PrintF("Materializing a new heap number %p [%e] in slot %p"
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materializing a new heap number %p [%e] in slot %p"
"for expression slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
- d.slot_address(),
+ d.destination(),
index);
}
@@ -1808,10 +1900,8 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
#endif
-static const char* TraceValueType(bool is_smi, bool is_native = false) {
- if (is_native) {
- return "native";
- } else if (is_smi) {
+static const char* TraceValueType(bool is_smi) {
+ if (is_smi) {
return "smi";
}
@@ -1842,14 +1932,18 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::REGISTER: {
int input_reg = iterator->Next();
intptr_t input_value = input_->GetRegister(input_reg);
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("0x%08" V8PRIxPTR " ; %s ", input_value,
+ PrintF(trace_scope_->file(),
+ "0x%08" V8PRIxPTR " ; %s ", input_value,
converter.NameOfCPURegister(input_reg));
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
+ reinterpret_cast<Object*>(input_value)->ShortPrint(
+ trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ "\n");
}
AddObjectTaggedValue(input_value);
return;
@@ -1859,11 +1953,13 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
int input_reg = iterator->Next();
intptr_t value = input_->GetRegister(input_reg);
bool is_smi = Smi::IsValid(value);
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("%" V8PRIdPTR " ; %s (%s)\n", value,
+ PrintF(trace_scope_->file(),
+ "%" V8PRIdPTR " ; %s (%s)\n", value,
converter.NameOfCPURegister(input_reg),
TraceValueType(is_smi));
}
@@ -1882,11 +1978,13 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
int input_reg = iterator->Next();
uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("%" V8PRIdPTR " ; uint %s (%s)\n", value,
+ PrintF(trace_scope_->file(),
+ "%" V8PRIdPTR " ; uint %s (%s)\n", value,
converter.NameOfCPURegister(input_reg),
TraceValueType(is_smi));
}
@@ -1904,11 +2002,13 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::DOUBLE_REGISTER: {
int input_reg = iterator->Next();
double value = input_->GetDoubleRegister(input_reg);
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("%e ; %s\n", value,
+ PrintF(trace_scope_->file(),
+ "%e ; %s\n", value,
DoubleRegister::AllocationIndexToString(input_reg));
}
AddObjectDoubleValue(value);
@@ -1919,13 +2019,17 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("0x%08" V8PRIxPTR " ; [sp + %d] ", input_value, input_offset);
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
+ PrintF(trace_scope_->file(),
+ "0x%08" V8PRIxPTR " ; [sp + %d] ", input_value, input_offset);
+ reinterpret_cast<Object*>(input_value)->ShortPrint(
+ trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ "\n");
}
AddObjectTaggedValue(input_value);
return;
@@ -1936,11 +2040,13 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t value = input_->GetFrameSlot(input_offset);
bool is_smi = Smi::IsValid(value);
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("%" V8PRIdPTR " ; [sp + %d] (%s)\n",
+ PrintF(trace_scope_->file(),
+ "%" V8PRIdPTR " ; [sp + %d] (%s)\n",
value, input_offset, TraceValueType(is_smi));
}
if (is_smi) {
@@ -1960,11 +2066,13 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
uintptr_t value =
static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("%" V8PRIdPTR " ; [sp + %d] (uint %s)\n",
+ PrintF(trace_scope_->file(),
+ "%" V8PRIdPTR " ; [sp + %d] (uint %s)\n",
value, input_offset, TraceValueType(is_smi));
}
if (is_smi) {
@@ -1982,11 +2090,13 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- PrintF("%e ; [sp + %d]\n", value, input_offset);
+ PrintF(trace_scope_->file(),
+ "%e ; [sp + %d]\n", value, input_offset);
}
AddObjectDoubleValue(value);
return;
@@ -1994,12 +2104,14 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::LITERAL: {
Object* literal = ComputeLiteral(iterator->Next());
- if (trace_) {
- PrintF(" object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- literal->ShortPrint();
- PrintF(" ; literal\n");
+ literal->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ " ; literal\n");
}
intptr_t value = reinterpret_cast<intptr_t>(literal);
AddObjectTaggedValue(value);
@@ -2008,12 +2120,14 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::DUPLICATED_OBJECT: {
int object_index = iterator->Next();
- if (trace_) {
- PrintF(" nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; duplicate of object #%d\n", object_index);
+ isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ " ; duplicate of object #%d\n", object_index);
}
// Use the materialization marker value as a sentinel and fill in
// the object after the deoptimized frame is built.
@@ -2028,12 +2142,14 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::CAPTURED_OBJECT: {
int length = iterator->Next();
bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
- if (trace_) {
- PrintF(" nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
reinterpret_cast<intptr_t>(object_slot),
field_index);
- isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; object (length = %d, is_args = %d)\n", length, is_args);
+ isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ " ; object (length = %d, is_args = %d)\n", length, is_args);
}
// Use the materialization marker value as a sentinel and fill in
// the object after the deoptimized frame is built.
@@ -2054,13 +2170,11 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
- int frame_index,
- unsigned output_offset,
- DeoptimizerTranslatedValueType value_type) {
+ int frame_index,
+ unsigned output_offset) {
disasm::NameConverter converter;
// A GC-safe temporary placeholder that we can put in the output frame.
const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
- bool is_native = value_type == TRANSLATED_VALUE_IS_NATIVE;
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
@@ -2079,15 +2193,17 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::REGISTER: {
int input_reg = iterator->Next();
intptr_t input_value = input_->GetRegister(input_reg);
- if (trace_) {
+ if (trace_scope_ != NULL) {
PrintF(
+ trace_scope_->file(),
" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
output_[frame_index]->GetTop() + output_offset,
output_offset,
input_value,
converter.NameOfCPURegister(input_reg));
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
+ reinterpret_cast<Object*>(input_value)->ShortPrint(
+ trace_scope_->file());
+ PrintF(trace_scope_->file(), "\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@@ -2096,27 +2212,24 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::INT32_REGISTER: {
int input_reg = iterator->Next();
intptr_t value = input_->GetRegister(input_reg);
- bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
- Smi::IsValid(value);
- if (trace_) {
+ bool is_smi = Smi::IsValid(value);
+ if (trace_scope_ != NULL) {
PrintF(
+ trace_scope_->file(),
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
converter.NameOfCPURegister(input_reg),
- TraceValueType(is_smi, is_native));
+ TraceValueType(is_smi));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
- output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
- ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<int32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -2127,28 +2240,25 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::UINT32_REGISTER: {
int input_reg = iterator->Next();
uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
- bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
- (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_) {
+ bool is_smi = value <= static_cast<uintptr_t>(Smi::kMaxValue);
+ if (trace_scope_ != NULL) {
PrintF(
+ trace_scope_->file(),
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
" ; uint %s (%s)\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
converter.NameOfCPURegister(input_reg),
- TraceValueType(is_smi, is_native));
+ TraceValueType(is_smi));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
- output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
- ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<uint32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -2159,8 +2269,9 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::DOUBLE_REGISTER: {
int input_reg = iterator->Next();
double value = input_->GetDoubleRegister(input_reg);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
@@ -2177,15 +2288,18 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
+ PrintF(trace_scope_->file(),
+ "[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
output_offset,
input_value,
input_offset);
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
+ reinterpret_cast<Object*>(input_value)->ShortPrint(
+ trace_scope_->file());
+ PrintF(trace_scope_->file(), "\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@@ -2195,27 +2309,25 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t value = input_->GetFrameSlot(input_offset);
- bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
- Smi::IsValid(value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": ",
+ bool is_smi = Smi::IsValid(value);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
+ PrintF(trace_scope_->file(),
+ "[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
output_offset,
value,
input_offset,
- TraceValueType(is_smi, is_native));
+ TraceValueType(is_smi));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
- output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
- ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<int32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -2228,27 +2340,25 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
uintptr_t value =
static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
- bool is_smi = (value_type == TRANSLATED_VALUE_IS_TAGGED) &&
- (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": ",
+ bool is_smi = value <= static_cast<uintptr_t>(Smi::kMaxValue);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
+ PrintF(trace_scope_->file(),
+ "[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
output_offset,
value,
input_offset,
- TraceValueType(is_smi, is_native));
+ TraceValueType(is_smi));
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
- } else if (value_type == TRANSLATED_VALUE_IS_NATIVE) {
- output_[frame_index]->SetFrameSlot(output_offset, value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
- ASSERT(value_type == TRANSLATED_VALUE_IS_TAGGED);
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<uint32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
@@ -2260,8 +2370,9 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
@@ -2276,12 +2387,13 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::LITERAL: {
Object* literal = ComputeLiteral(iterator->Next());
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
- literal->ShortPrint();
- PrintF(" ; literal\n");
+ literal->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(), " ; literal\n");
}
intptr_t value = reinterpret_cast<intptr_t>(literal);
output_[frame_index]->SetFrameSlot(output_offset, value);
@@ -2290,12 +2402,14 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::DUPLICATED_OBJECT: {
int object_index = iterator->Next();
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
- isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; duplicate of object #%d\n", object_index);
+ isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ " ; duplicate of object #%d\n", object_index);
}
// Use the materialization marker value as a sentinel and fill in
// the object after the deoptimized frame is built.
@@ -2311,12 +2425,14 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::CAPTURED_OBJECT: {
int length = iterator->Next();
bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
- isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; object (length = %d, is_args = %d)\n", length, is_args);
+ isolate_->heap()->arguments_marker()->ShortPrint(trace_scope_->file());
+ PrintF(trace_scope_->file(),
+ " ; object (length = %d, is_args = %d)\n", length, is_args);
}
// Use the materialization marker value as a sentinel and fill in
// the object after the deoptimized frame is built.
@@ -2337,90 +2453,12 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
-void Deoptimizer::PatchInterruptCode(Isolate* isolate,
- Code* unoptimized) {
- DisallowHeapAllocation no_gc;
- Code* replacement_code =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
-
- // Iterate over the back edge table and patch every interrupt
- // call to an unconditional call to the replacement code.
- int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
-
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
- !back_edges.Done();
- back_edges.Next()) {
- if (static_cast<int>(back_edges.loop_depth()) == loop_nesting_level) {
- ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate,
- unoptimized,
- back_edges.pc()));
- PatchInterruptCodeAt(unoptimized,
- back_edges.pc(),
- replacement_code);
- }
- }
-
- unoptimized->set_back_edges_patched_for_osr(true);
- ASSERT(Deoptimizer::VerifyInterruptCode(
- isolate, unoptimized, loop_nesting_level));
-}
-
-
-void Deoptimizer::RevertInterruptCode(Isolate* isolate,
- Code* unoptimized) {
- DisallowHeapAllocation no_gc;
- Code* interrupt_code =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
-
- // Iterate over the back edge table and revert the patched interrupt calls.
- ASSERT(unoptimized->back_edges_patched_for_osr());
- int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
-
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
- !back_edges.Done();
- back_edges.Next()) {
- if (static_cast<int>(back_edges.loop_depth()) <= loop_nesting_level) {
- ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate,
- unoptimized,
- back_edges.pc()));
- RevertInterruptCodeAt(unoptimized, back_edges.pc(), interrupt_code);
- }
- }
-
- unoptimized->set_back_edges_patched_for_osr(false);
- unoptimized->set_allow_osr_at_loop_nesting_level(0);
- // Assert that none of the back edges are patched anymore.
- ASSERT(Deoptimizer::VerifyInterruptCode(isolate, unoptimized, -1));
-}
-
-
-#ifdef DEBUG
-bool Deoptimizer::VerifyInterruptCode(Isolate* isolate,
- Code* unoptimized,
- int loop_nesting_level) {
- DisallowHeapAllocation no_gc;
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
- !back_edges.Done();
- back_edges.Next()) {
- uint32_t loop_depth = back_edges.loop_depth();
- CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
- // Assert that all back edges for shallower loops (and only those)
- // have already been patched.
- CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
- GetInterruptPatchState(isolate,
- unoptimized,
- back_edges.pc()) != NOT_PATCHED);
- }
- return true;
-}
-#endif // DEBUG
-
-
unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned fixed_size = ComputeFixedSize(function_);
// The fp-to-sp delta already takes the context and the function
- // into account so we have to avoid double counting them (-2).
- unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
+ // into account so we have to avoid double counting them.
+ unsigned result = fixed_size + fp_to_sp_delta_ -
+ StandardFrameConstants::kFixedFrameSizeFromFp;
#ifdef DEBUG
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
unsigned stack_slots = compiled_code_->stack_slots();
@@ -2484,18 +2522,19 @@ void Deoptimizer::AddObjectDuplication(intptr_t slot, int object_index) {
void Deoptimizer::AddObjectTaggedValue(intptr_t value) {
deferred_objects_tagged_values_.Add(reinterpret_cast<Object*>(value));
- deferred_objects_double_values_.Add(isolate()->heap()->nan_value()->value());
}
void Deoptimizer::AddObjectDoubleValue(double value) {
deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value());
- deferred_objects_double_values_.Add(value);
+ HeapNumberMaterializationDescriptor<int> value_desc(
+ deferred_objects_tagged_values_.length() - 1, value);
+ deferred_objects_double_values_.Add(value_desc);
}
void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
- HeapNumberMaterializationDescriptor value_desc(
+ HeapNumberMaterializationDescriptor<Address> value_desc(
reinterpret_cast<Address>(slot_address), value);
deferred_heap_numbers_.Add(value_desc);
}
@@ -2814,46 +2853,11 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
const char* Translation::StringFor(Opcode opcode) {
+#define TRANSLATION_OPCODE_CASE(item) case item: return #item;
switch (opcode) {
- case BEGIN:
- return "BEGIN";
- case JS_FRAME:
- return "JS_FRAME";
- case ARGUMENTS_ADAPTOR_FRAME:
- return "ARGUMENTS_ADAPTOR_FRAME";
- case CONSTRUCT_STUB_FRAME:
- return "CONSTRUCT_STUB_FRAME";
- case GETTER_STUB_FRAME:
- return "GETTER_STUB_FRAME";
- case SETTER_STUB_FRAME:
- return "SETTER_STUB_FRAME";
- case COMPILED_STUB_FRAME:
- return "COMPILED_STUB_FRAME";
- case REGISTER:
- return "REGISTER";
- case INT32_REGISTER:
- return "INT32_REGISTER";
- case UINT32_REGISTER:
- return "UINT32_REGISTER";
- case DOUBLE_REGISTER:
- return "DOUBLE_REGISTER";
- case STACK_SLOT:
- return "STACK_SLOT";
- case INT32_STACK_SLOT:
- return "INT32_STACK_SLOT";
- case UINT32_STACK_SLOT:
- return "UINT32_STACK_SLOT";
- case DOUBLE_STACK_SLOT:
- return "DOUBLE_STACK_SLOT";
- case LITERAL:
- return "LITERAL";
- case DUPLICATED_OBJECT:
- return "DUPLICATED_OBJECT";
- case ARGUMENTS_OBJECT:
- return "ARGUMENTS_OBJECT";
- case CAPTURED_OBJECT:
- return "CAPTURED_OBJECT";
+ TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE)
}
+#undef TRANSLATION_OPCODE_CASE
UNREACHABLE();
return "";
}
diff --git a/chromium/v8/src/deoptimizer.h b/chromium/v8/src/deoptimizer.h
index 7ee5908f762..f518546018b 100644
--- a/chromium/v8/src/deoptimizer.h
+++ b/chromium/v8/src/deoptimizer.h
@@ -60,17 +60,18 @@ class FrameDescription;
class TranslationIterator;
class DeoptimizedFrameInfo;
+template<typename T>
class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
public:
- HeapNumberMaterializationDescriptor(Address slot_address, double val)
- : slot_address_(slot_address), val_(val) { }
+ HeapNumberMaterializationDescriptor(T destination, double value)
+ : destination_(destination), value_(value) { }
- Address slot_address() const { return slot_address_; }
- double value() const { return val_; }
+ T destination() const { return destination_; }
+ double value() const { return value_; }
private:
- Address slot_address_;
- double val_;
+ T destination_;
+ double value_;
};
@@ -131,11 +132,6 @@ class Deoptimizer : public Malloced {
DEBUGGER
};
- enum InterruptPatchState {
- NOT_PATCHED,
- PATCHED_FOR_OSR
- };
-
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
struct JumpTableEntry {
@@ -213,39 +209,6 @@ class Deoptimizer : public Malloced {
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
- // Patch all interrupts with allowed loop depth in the unoptimized code to
- // unconditionally call replacement_code.
- static void PatchInterruptCode(Isolate* isolate,
- Code* unoptimized_code);
-
- // Patch the interrupt at the instruction before pc_after in
- // the unoptimized code to unconditionally call replacement_code.
- static void PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code);
-
- // Change all patched interrupts patched in the unoptimized code
- // back to normal interrupts.
- static void RevertInterruptCode(Isolate* isolate,
- Code* unoptimized_code);
-
- // Change patched interrupt in the unoptimized code
- // back to a normal interrupt.
- static void RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code);
-
-#ifdef DEBUG
- static InterruptPatchState GetInterruptPatchState(Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after);
-
- // Verify that all back edges of a certain loop depth are patched.
- static bool VerifyInterruptCode(Isolate* isolate,
- Code* unoptimized_code,
- int loop_nesting_level);
-#endif // DEBUG
-
~Deoptimizer();
void MaterializeHeapObjects(JavaScriptFrameIterator* it);
@@ -370,15 +333,9 @@ class Deoptimizer : public Malloced {
int object_index,
int field_index);
- enum DeoptimizerTranslatedValueType {
- TRANSLATED_VALUE_IS_NATIVE,
- TRANSLATED_VALUE_IS_TAGGED
- };
-
void DoTranslateCommand(TranslationIterator* iterator,
- int frame_index,
- unsigned output_offset,
- DeoptimizerTranslatedValueType value_type = TRANSLATED_VALUE_IS_TAGGED);
+ int frame_index,
+ unsigned output_offset);
unsigned ComputeInputFrameSize() const;
unsigned ComputeFixedSize(JSFunction* function) const;
@@ -449,6 +406,10 @@ class Deoptimizer : public Malloced {
// at the dynamic alignment state slot inside the frame.
bool HasAlignmentPadding(JSFunction* function);
+ // Select the version of NotifyStubFailure builtin that either saves or
+ // doesn't save the double registers depending on CPU features.
+ Code* NotifyStubFailureBuiltin();
+
Isolate* isolate_;
JSFunction* function_;
Code* compiled_code_;
@@ -469,9 +430,10 @@ class Deoptimizer : public Malloced {
// Deferred values to be materialized.
List<Object*> deferred_objects_tagged_values_;
- List<double> deferred_objects_double_values_;
+ List<HeapNumberMaterializationDescriptor<int> >
+ deferred_objects_double_values_;
List<ObjectMaterializationDescriptor> deferred_objects_;
- List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
+ List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_;
// Output frame information. Only used during heap object materialization.
List<Handle<JSFunction> > jsframe_functions_;
@@ -487,7 +449,7 @@ class Deoptimizer : public Malloced {
DisallowHeapAllocation* disallow_heap_allocation_;
#endif // DEBUG
- bool trace_;
+ CodeTracer::Scope* trace_scope_;
static const int table_entry_size_;
@@ -542,7 +504,15 @@ class FrameDescription {
void SetCallerFp(unsigned offset, intptr_t value);
intptr_t GetRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(registers_));
+#if DEBUG
+ // This convoluted ASSERT is needed to work around a gcc problem that
+ // improperly detects an array bounds overflow in optimized debug builds
+ // when using a plain ASSERT.
+ if (n >= ARRAY_SIZE(registers_)) {
+ ASSERT(false);
+ return 0;
+ }
+#endif
return registers_[n];
}
@@ -717,29 +687,36 @@ class TranslationIterator BASE_EMBEDDED {
};
+#define TRANSLATION_OPCODE_LIST(V) \
+ V(BEGIN) \
+ V(JS_FRAME) \
+ V(CONSTRUCT_STUB_FRAME) \
+ V(GETTER_STUB_FRAME) \
+ V(SETTER_STUB_FRAME) \
+ V(ARGUMENTS_ADAPTOR_FRAME) \
+ V(COMPILED_STUB_FRAME) \
+ V(DUPLICATED_OBJECT) \
+ V(ARGUMENTS_OBJECT) \
+ V(CAPTURED_OBJECT) \
+ V(REGISTER) \
+ V(INT32_REGISTER) \
+ V(UINT32_REGISTER) \
+ V(DOUBLE_REGISTER) \
+ V(STACK_SLOT) \
+ V(INT32_STACK_SLOT) \
+ V(UINT32_STACK_SLOT) \
+ V(DOUBLE_STACK_SLOT) \
+ V(LITERAL)
+
+
class Translation BASE_EMBEDDED {
public:
+#define DECLARE_TRANSLATION_OPCODE_ENUM(item) item,
enum Opcode {
- BEGIN,
- JS_FRAME,
- CONSTRUCT_STUB_FRAME,
- GETTER_STUB_FRAME,
- SETTER_STUB_FRAME,
- ARGUMENTS_ADAPTOR_FRAME,
- COMPILED_STUB_FRAME,
- DUPLICATED_OBJECT,
- ARGUMENTS_OBJECT,
- CAPTURED_OBJECT,
- REGISTER,
- INT32_REGISTER,
- UINT32_REGISTER,
- DOUBLE_REGISTER,
- STACK_SLOT,
- INT32_STACK_SLOT,
- UINT32_STACK_SLOT,
- DOUBLE_STACK_SLOT,
- LITERAL
+ TRANSLATION_OPCODE_LIST(DECLARE_TRANSLATION_OPCODE_ENUM)
+ LAST = LITERAL
};
+#undef DECLARE_TRANSLATION_OPCODE_ENUM
Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
Zone* zone)
diff --git a/chromium/v8/src/disassembler.cc b/chromium/v8/src/disassembler.cc
index dd620fb3452..69737ed89f8 100644
--- a/chromium/v8/src/disassembler.cc
+++ b/chromium/v8/src/disassembler.cc
@@ -227,7 +227,7 @@ static int DecodeIt(Isolate* isolate,
out.AddFormatted(" ;; object: %s", *obj_name);
} else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
const char* reference_name =
- ref_encoder.NameOfAddress(*relocinfo.target_reference_address());
+ ref_encoder.NameOfAddress(relocinfo.target_reference());
out.AddFormatted(" ;; external reference (%s)", reference_name);
} else if (RelocInfo::IsCodeTarget(rmode)) {
out.AddFormatted(" ;; code:");
@@ -250,7 +250,7 @@ static int DecodeIt(Isolate* isolate,
if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
out.AddFormatted(", argc = %d", code->arguments_count());
}
- } else if (kind == Code::STUB) {
+ } else if (kind == Code::STUB || kind == Code::HANDLER) {
// Reverse lookup required as the minor key cannot be retrieved
// from the code object.
Object* obj = heap->code_stubs()->SlowReverseLookup(code);
diff --git a/chromium/v8/src/elements-kind.cc b/chromium/v8/src/elements-kind.cc
index 8129051a627..689c2205696 100644
--- a/chromium/v8/src/elements-kind.cc
+++ b/chromium/v8/src/elements-kind.cc
@@ -98,6 +98,14 @@ struct InitializeFastElementsKindSequence {
fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS;
fast_elements_kind_sequence[4] = FAST_ELEMENTS;
fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS;
+
+ // Verify that kFastElementsKindPackedToHoley is correct.
+ STATIC_ASSERT(FAST_SMI_ELEMENTS + kFastElementsKindPackedToHoley ==
+ FAST_HOLEY_SMI_ELEMENTS);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS + kFastElementsKindPackedToHoley ==
+ FAST_HOLEY_DOUBLE_ELEMENTS);
+ STATIC_ASSERT(FAST_ELEMENTS + kFastElementsKindPackedToHoley ==
+ FAST_HOLEY_ELEMENTS);
}
};
diff --git a/chromium/v8/src/elements-kind.h b/chromium/v8/src/elements-kind.h
index 69b40578180..51a690272f7 100644
--- a/chromium/v8/src/elements-kind.h
+++ b/chromium/v8/src/elements-kind.h
@@ -77,6 +77,10 @@ const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1;
+// The number to add to a packed elements kind to reach a holey elements kind
+const int kFastElementsKindPackedToHoley =
+ FAST_HOLEY_SMI_ELEMENTS - FAST_SMI_ELEMENTS;
+
int ElementsKindToShiftSize(ElementsKind elements_kind);
const char* ElementsKindToString(ElementsKind kind);
void PrintElementsKind(FILE* out, ElementsKind kind);
diff --git a/chromium/v8/src/elements.cc b/chromium/v8/src/elements.cc
index 89621cb3694..0b745c4505f 100644
--- a/chromium/v8/src/elements.cc
+++ b/chromium/v8/src/elements.cc
@@ -792,7 +792,7 @@ class ElementsAccessorBase : public ElementsAccessor {
FixedArray* to,
FixedArrayBase* from) {
int len0 = to->length();
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < len0; i++) {
ASSERT(!to->get(i)->IsTheHole());
diff --git a/chromium/v8/src/execution.cc b/chromium/v8/src/execution.cc
index 979641a9de5..c0e9a64fbb0 100644
--- a/chromium/v8/src/execution.cc
+++ b/chromium/v8/src/execution.cc
@@ -354,6 +354,20 @@ Handle<Object> Execution::TryGetConstructorDelegate(
}
+void Execution::RunMicrotasks(Isolate* isolate) {
+ ASSERT(isolate->microtask_pending());
+ bool threw = false;
+ Execution::Call(
+ isolate,
+ isolate->run_microtasks(),
+ isolate->factory()->undefined_value(),
+ 0,
+ NULL,
+ &threw);
+ ASSERT(!threw);
+}
+
+
bool StackGuard::IsStackOverflow() {
ExecutionAccess access(isolate_);
return (thread_local_.jslimit_ != kInterruptLimit &&
@@ -814,8 +828,6 @@ static Object* RuntimePreempt(Isolate* isolate) {
// Clear the preempt request flag.
isolate->stack_guard()->Continue(PREEMPT);
- ContextSwitcher::PreemptionReceived();
-
#ifdef ENABLE_DEBUGGER_SUPPORT
if (isolate->debug()->InDebugger()) {
// If currently in the debugger don't do any actual preemption but record
@@ -951,7 +963,7 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
Deoptimizer::DeoptimizeAll(isolate);
}
if (stack_guard->IsInstallCodeRequest()) {
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(isolate->concurrent_recompilation_enabled());
stack_guard->Continue(INSTALL_CODE);
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
diff --git a/chromium/v8/src/execution.h b/chromium/v8/src/execution.h
index 371ea309d6c..eda416c0da5 100644
--- a/chromium/v8/src/execution.h
+++ b/chromium/v8/src/execution.h
@@ -171,6 +171,8 @@ class Execution : public AllStatic {
static Handle<Object> TryGetConstructorDelegate(Isolate* isolate,
Handle<Object> object,
bool* has_pending_exception);
+
+ static void RunMicrotasks(Isolate* isolate);
};
diff --git a/chromium/v8/src/extensions/externalize-string-extension.cc b/chromium/v8/src/extensions/externalize-string-extension.cc
index 5fd821b9c07..edc7dd8052c 100644
--- a/chromium/v8/src/extensions/externalize-string-extension.cc
+++ b/chromium/v8/src/extensions/externalize-string-extension.cc
@@ -60,9 +60,9 @@ const char* const ExternalizeStringExtension::kSource =
"native function externalizeString();"
"native function isAsciiString();";
-
-v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
- v8::Handle<v8::String> str) {
+v8::Handle<v8::FunctionTemplate>
+ExternalizeStringExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Handle<v8::String> str) {
if (strcmp(*v8::String::Utf8Value(str), "externalizeString") == 0) {
return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize);
} else {
@@ -75,7 +75,8 @@ v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
void ExternalizeStringExtension::Externalize(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1 || !args[0]->IsString()) {
- v8::ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
+ args.GetIsolate(),
"First parameter to externalizeString() must be a string."));
return;
}
@@ -84,7 +85,8 @@ void ExternalizeStringExtension::Externalize(
if (args[1]->IsBoolean()) {
force_two_byte = args[1]->BooleanValue();
} else {
- v8::ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
+ args.GetIsolate(),
"Second parameter to externalizeString() must be a boolean."));
return;
}
@@ -92,7 +94,8 @@ void ExternalizeStringExtension::Externalize(
bool result = false;
Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
if (string->IsExternalString()) {
- v8::ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
+ args.GetIsolate(),
"externalizeString() can't externalize twice."));
return;
}
@@ -120,7 +123,8 @@ void ExternalizeStringExtension::Externalize(
if (!result) delete resource;
}
if (!result) {
- v8::ThrowException(v8::String::New("externalizeString() failed."));
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
+ args.GetIsolate(), "externalizeString() failed."));
return;
}
}
@@ -129,7 +133,8 @@ void ExternalizeStringExtension::Externalize(
void ExternalizeStringExtension::IsAscii(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- v8::ThrowException(v8::String::New(
+ args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
+ args.GetIsolate(),
"isAsciiString() requires a single string argument."));
return;
}
diff --git a/chromium/v8/src/extensions/externalize-string-extension.h b/chromium/v8/src/extensions/externalize-string-extension.h
index ecbc1cf447c..3d1e438f7f2 100644
--- a/chromium/v8/src/extensions/externalize-string-extension.h
+++ b/chromium/v8/src/extensions/externalize-string-extension.h
@@ -36,7 +36,8 @@ namespace internal {
class ExternalizeStringExtension : public v8::Extension {
public:
ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
v8::Handle<v8::String> name);
static void Externalize(const v8::FunctionCallbackInfo<v8::Value>& args);
static void IsAscii(const v8::FunctionCallbackInfo<v8::Value>& args);
diff --git a/chromium/v8/src/extensions/free-buffer-extension.cc b/chromium/v8/src/extensions/free-buffer-extension.cc
new file mode 100644
index 00000000000..5cf2b68146c
--- /dev/null
+++ b/chromium/v8/src/extensions/free-buffer-extension.cc
@@ -0,0 +1,60 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "free-buffer-extension.h"
+#include "platform.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+v8::Handle<v8::FunctionTemplate> FreeBufferExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
+ v8::Handle<v8::String> str) {
+ return v8::FunctionTemplate::New(FreeBufferExtension::FreeBuffer);
+}
+
+
+void FreeBufferExtension::FreeBuffer(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Handle<v8::ArrayBuffer> arrayBuffer = args[0].As<v8::ArrayBuffer>();
+ v8::ArrayBuffer::Contents contents = arrayBuffer->Externalize();
+ V8::ArrayBufferAllocator()->Free(contents.Data(), contents.ByteLength());
+}
+
+
+void FreeBufferExtension::Register() {
+ static char buffer[100];
+ Vector<char> temp_vector(buffer, sizeof(buffer));
+ OS::SNPrintF(temp_vector, "native function freeBuffer();");
+
+ static FreeBufferExtension buffer_free_extension(buffer);
+ static v8::DeclareExtension declaration(&buffer_free_extension);
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/extensions/free-buffer-extension.h b/chromium/v8/src/extensions/free-buffer-extension.h
new file mode 100644
index 00000000000..22d466f61e0
--- /dev/null
+++ b/chromium/v8/src/extensions/free-buffer-extension.h
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
+#define V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+class FreeBufferExtension : public v8::Extension {
+ public:
+ explicit FreeBufferExtension(const char* source)
+ : v8::Extension("v8/free-buffer", source) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
+ v8::Handle<v8::String> name);
+ static void FreeBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Register();
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
diff --git a/chromium/v8/src/extensions/gc-extension.cc b/chromium/v8/src/extensions/gc-extension.cc
index 308879115fc..b8442c1bf85 100644
--- a/chromium/v8/src/extensions/gc-extension.cc
+++ b/chromium/v8/src/extensions/gc-extension.cc
@@ -32,7 +32,8 @@ namespace v8 {
namespace internal {
-v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
+v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
v8::Handle<v8::String> str) {
return v8::FunctionTemplate::New(GCExtension::GC);
}
diff --git a/chromium/v8/src/extensions/gc-extension.h b/chromium/v8/src/extensions/gc-extension.h
index e412b92a4d9..8c25e7d84d5 100644
--- a/chromium/v8/src/extensions/gc-extension.h
+++ b/chromium/v8/src/extensions/gc-extension.h
@@ -36,7 +36,8 @@ namespace internal {
class GCExtension : public v8::Extension {
public:
explicit GCExtension(const char* source) : v8::Extension("v8/gc", source) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
v8::Handle<v8::String> name);
static void GC(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Register();
diff --git a/chromium/v8/src/extensions/statistics-extension.cc b/chromium/v8/src/extensions/statistics-extension.cc
index 651d99d4526..92d152d0de7 100644
--- a/chromium/v8/src/extensions/statistics-extension.cc
+++ b/chromium/v8/src/extensions/statistics-extension.cc
@@ -34,26 +34,38 @@ const char* const StatisticsExtension::kSource =
"native function getV8Statistics();";
-v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunction(
+v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
v8::Handle<v8::String> str) {
ASSERT(strcmp(*v8::String::Utf8Value(str), "getV8Statistics") == 0);
return v8::FunctionTemplate::New(StatisticsExtension::GetCounters);
}
-static void AddCounter(v8::Local<v8::Object> object,
+static void AddCounter(v8::Isolate* isolate,
+ v8::Local<v8::Object> object,
StatsCounter* counter,
const char* name) {
if (counter->Enabled()) {
- object->Set(v8::String::New(name),
+ object->Set(v8::String::NewFromUtf8(isolate, name),
v8::Number::New(*counter->GetInternalPointer()));
}
}
-static void AddNumber(v8::Local<v8::Object> object,
+static void AddNumber(v8::Isolate* isolate,
+ v8::Local<v8::Object> object,
intptr_t value,
const char* name) {
- object->Set(v8::String::New(name),
+ object->Set(v8::String::NewFromUtf8(isolate, name),
+ v8::Number::New(static_cast<double>(value)));
+}
+
+
+static void AddNumber64(v8::Isolate* isolate,
+ v8::Local<v8::Object> object,
+ int64_t value,
+ const char* name) {
+ object->Set(v8::String::NewFromUtf8(isolate, name),
v8::Number::New(static_cast<double>(value)));
}
@@ -73,80 +85,88 @@ void StatisticsExtension::GetCounters(
v8::Local<v8::Object> result = v8::Object::New();
#define ADD_COUNTER(name, caption) \
- AddCounter(result, counters->name(), #name);
+ AddCounter(args.GetIsolate(), result, counters->name(), #name);
STATS_COUNTER_LIST_1(ADD_COUNTER)
STATS_COUNTER_LIST_2(ADD_COUNTER)
#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- AddCounter(result, counters->count_of_##name(), "count_of_" #name); \
- AddCounter(result, counters->size_of_##name(), "size_of_" #name);
+#define ADD_COUNTER(name) \
+ AddCounter(args.GetIsolate(), result, counters->count_of_##name(), \
+ "count_of_" #name); \
+ AddCounter(args.GetIsolate(), result, counters->size_of_##name(), \
+ "size_of_" #name);
INSTANCE_TYPE_LIST(ADD_COUNTER)
#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- AddCounter(result, counters->count_of_CODE_TYPE_##name(), \
- "count_of_CODE_TYPE_" #name); \
- AddCounter(result, counters->size_of_CODE_TYPE_##name(), \
+#define ADD_COUNTER(name) \
+ AddCounter(args.GetIsolate(), result, counters->count_of_CODE_TYPE_##name(), \
+ "count_of_CODE_TYPE_" #name); \
+ AddCounter(args.GetIsolate(), result, counters->size_of_CODE_TYPE_##name(), \
"size_of_CODE_TYPE_" #name);
CODE_KIND_LIST(ADD_COUNTER)
#undef ADD_COUNTER
-#define ADD_COUNTER(name) \
- AddCounter(result, counters->count_of_FIXED_ARRAY_##name(), \
- "count_of_FIXED_ARRAY_" #name); \
- AddCounter(result, counters->size_of_FIXED_ARRAY_##name(), \
+#define ADD_COUNTER(name) \
+ AddCounter(args.GetIsolate(), result, \
+ counters->count_of_FIXED_ARRAY_##name(), \
+ "count_of_FIXED_ARRAY_" #name); \
+ AddCounter(args.GetIsolate(), result, \
+ counters->size_of_FIXED_ARRAY_##name(), \
"size_of_FIXED_ARRAY_" #name);
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADD_COUNTER)
#undef ADD_COUNTER
- AddNumber(result, isolate->memory_allocator()->Size(),
+ AddNumber(args.GetIsolate(), result, isolate->memory_allocator()->Size(),
"total_committed_bytes");
- AddNumber(result, heap->new_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->new_space()->Size(),
"new_space_live_bytes");
- AddNumber(result, heap->new_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->new_space()->Available(),
"new_space_available_bytes");
- AddNumber(result, heap->new_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result, heap->new_space()->CommittedMemory(),
"new_space_commited_bytes");
- AddNumber(result, heap->old_pointer_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->old_pointer_space()->Size(),
"old_pointer_space_live_bytes");
- AddNumber(result, heap->old_pointer_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->old_pointer_space()->Available(),
"old_pointer_space_available_bytes");
- AddNumber(result, heap->old_pointer_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result,
+ heap->old_pointer_space()->CommittedMemory(),
"old_pointer_space_commited_bytes");
- AddNumber(result, heap->old_data_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->old_data_space()->Size(),
"old_data_space_live_bytes");
- AddNumber(result, heap->old_data_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->old_data_space()->Available(),
"old_data_space_available_bytes");
- AddNumber(result, heap->old_data_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result,
+ heap->old_data_space()->CommittedMemory(),
"old_data_space_commited_bytes");
- AddNumber(result, heap->code_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->code_space()->Size(),
"code_space_live_bytes");
- AddNumber(result, heap->code_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->code_space()->Available(),
"code_space_available_bytes");
- AddNumber(result, heap->code_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result, heap->code_space()->CommittedMemory(),
"code_space_commited_bytes");
- AddNumber(result, heap->cell_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->cell_space()->Size(),
"cell_space_live_bytes");
- AddNumber(result, heap->cell_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->cell_space()->Available(),
"cell_space_available_bytes");
- AddNumber(result, heap->cell_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result, heap->cell_space()->CommittedMemory(),
"cell_space_commited_bytes");
- AddNumber(result, heap->property_cell_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->property_cell_space()->Size(),
"property_cell_space_live_bytes");
- AddNumber(result, heap->property_cell_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->property_cell_space()->Available(),
"property_cell_space_available_bytes");
- AddNumber(result, heap->property_cell_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result,
+ heap->property_cell_space()->CommittedMemory(),
"property_cell_space_commited_bytes");
- AddNumber(result, heap->lo_space()->Size(),
+ AddNumber(args.GetIsolate(), result, heap->lo_space()->Size(),
"lo_space_live_bytes");
- AddNumber(result, heap->lo_space()->Available(),
+ AddNumber(args.GetIsolate(), result, heap->lo_space()->Available(),
"lo_space_available_bytes");
- AddNumber(result, heap->lo_space()->CommittedMemory(),
+ AddNumber(args.GetIsolate(), result, heap->lo_space()->CommittedMemory(),
"lo_space_commited_bytes");
- AddNumber(result, heap->amount_of_external_allocated_memory(),
- "amount_of_external_allocated_memory");
+ AddNumber64(args.GetIsolate(), result,
+ heap->amount_of_external_allocated_memory(),
+ "amount_of_external_allocated_memory");
args.GetReturnValue().Set(result);
}
diff --git a/chromium/v8/src/extensions/statistics-extension.h b/chromium/v8/src/extensions/statistics-extension.h
index bfd9c4134e2..f05e7689e73 100644
--- a/chromium/v8/src/extensions/statistics-extension.h
+++ b/chromium/v8/src/extensions/statistics-extension.h
@@ -36,7 +36,8 @@ namespace internal {
class StatisticsExtension : public v8::Extension {
public:
StatisticsExtension() : v8::Extension("v8/statistics", kSource) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate,
v8::Handle<v8::String> name);
static void GetCounters(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Register();
diff --git a/chromium/v8/src/factory.cc b/chromium/v8/src/factory.cc
index acbaf3c862c..483e6a632a3 100644
--- a/chromium/v8/src/factory.cc
+++ b/chromium/v8/src/factory.cc
@@ -79,6 +79,21 @@ Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
}
+Handle<ConstantPoolArray> Factory::NewConstantPoolArray(
+ int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
+ number_of_int32_entries > 0);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateConstantPoolArray(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries),
+ ConstantPoolArray);
+}
+
+
Handle<NameDictionary> Factory::NewNameDictionary(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
@@ -117,15 +132,30 @@ Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) {
}
-Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
+Handle<ObjectHashTable> Factory::NewObjectHashTable(
+ int at_least_space_for,
+ MinimumCapacity capacity_option) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
ObjectHashTable::Allocate(isolate()->heap(),
- at_least_space_for),
+ at_least_space_for,
+ capacity_option),
ObjectHashTable);
}
+Handle<WeakHashTable> Factory::NewWeakHashTable(int at_least_space_for) {
+ ASSERT(0 <= at_least_space_for);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ WeakHashTable::Allocate(isolate()->heap(),
+ at_least_space_for,
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ TENURED),
+ WeakHashTable);
+}
+
+
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
int slack) {
ASSERT(0 <= number_of_descriptors);
@@ -280,8 +310,7 @@ Handle<String> ConcatStringContent(Handle<StringType> result,
Handle<String> Factory::NewFlatConcatString(Handle<String> first,
Handle<String> second) {
int total_length = first->length() + second->length();
- if (first->IsOneByteRepresentationUnderneath() &&
- second->IsOneByteRepresentationUnderneath()) {
+ if (first->IsOneByteRepresentation() && second->IsOneByteRepresentation()) {
return ConcatStringContent<uint8_t>(
NewRawOneByteString(total_length), first, second);
} else {
@@ -336,6 +365,14 @@ Handle<Symbol> Factory::NewSymbol() {
}
+Handle<Symbol> Factory::NewPrivateSymbol() {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocatePrivateSymbol(),
+ Symbol);
+}
+
+
Handle<Context> Factory::NewNativeContext() {
CALL_HEAP_FUNCTION(
isolate(),
@@ -414,6 +451,15 @@ Handle<Struct> Factory::NewStruct(InstanceType type) {
}
+Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
+ int aliased_context_slot) {
+ Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast(
+ NewStruct(ALIASED_ARGUMENTS_ENTRY_TYPE));
+ entry->set_aliased_context_slot(aliased_context_slot);
+ return entry;
+}
+
+
Handle<DeclaredAccessorDescriptor> Factory::NewDeclaredAccessorDescriptor() {
return Handle<DeclaredAccessorDescriptor>::cast(
NewStruct(DECLARED_ACCESSOR_DESCRIPTOR_TYPE));
@@ -511,15 +557,22 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
}
-Handle<PropertyCell> Factory::NewPropertyCell(Handle<Object> value) {
- AllowDeferredHandleDereference convert_to_cell;
+Handle<PropertyCell> Factory::NewPropertyCellWithHole() {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocatePropertyCell(*value),
+ isolate()->heap()->AllocatePropertyCell(),
PropertyCell);
}
+Handle<PropertyCell> Factory::NewPropertyCell(Handle<Object> value) {
+ AllowDeferredHandleDereference convert_to_cell;
+ Handle<PropertyCell> cell = NewPropertyCellWithHole();
+ PropertyCell::SetValueInferType(cell, value);
+ return cell;
+}
+
+
Handle<AllocationSite> Factory::NewAllocationSite() {
CALL_HEAP_FUNCTION(
isolate(),
@@ -539,10 +592,32 @@ Handle<Map> Factory::NewMap(InstanceType type,
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFunctionPrototype(*function),
- JSObject);
+ // Make sure to use globals from the function's context, since the function
+ // can be from a different context.
+ Handle<Context> native_context(function->context()->native_context());
+ Handle<Map> new_map;
+ if (function->shared()->is_generator()) {
+ // Generator prototypes can share maps since they don't have "constructor"
+ // properties.
+ new_map = handle(native_context->generator_object_prototype_map());
+ } else {
+ // Each function prototype gets a fresh map to avoid unwanted sharing of
+ // maps between prototypes of different constructors.
+ Handle<JSFunction> object_function(native_context->object_function());
+ ASSERT(object_function->has_initial_map());
+ new_map = Map::Copy(handle(object_function->initial_map()));
+ }
+
+ Handle<JSObject> prototype = NewJSObjectFromMap(new_map);
+
+ if (!function->shared()->is_generator()) {
+ JSObject::SetLocalPropertyIgnoreAttributes(prototype,
+ constructor_string(),
+ function,
+ DONT_ENUM);
+ }
+
+ return prototype;
}
@@ -560,11 +635,12 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
int instance_size_delta = extra_inobject_properties * kPointerSize;
int max_instance_size_delta =
JSObject::kMaxInstanceSize - copy->instance_size();
- if (instance_size_delta > max_instance_size_delta) {
+ int max_extra_properties = max_instance_size_delta >> kPointerSizeLog2;
+ if (extra_inobject_properties > max_extra_properties) {
// If the instance size overflows, we allocate as many properties
// as we can as inobject properties.
instance_size_delta = max_instance_size_delta;
- extra_inobject_properties = max_instance_size_delta >> kPointerSizeLog2;
+ extra_inobject_properties = max_extra_properties;
}
// Adjust the map with the extra inobject properties.
int inobject_properties =
@@ -598,8 +674,11 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array,
- int new_length) {
- CALL_HEAP_FUNCTION(isolate(), array->CopySize(new_length), FixedArray);
+ int new_length,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(isolate(),
+ array->CopySize(new_length, pretenure),
+ FixedArray);
}
@@ -609,6 +688,12 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
}
+Handle<ConstantPoolArray> Factory::CopyConstantPoolArray(
+ Handle<ConstantPoolArray> array) {
+ CALL_HEAP_FUNCTION(isolate(), array->Copy(), ConstantPoolArray);
+}
+
+
Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Map> function_map,
@@ -769,7 +854,7 @@ Handle<Object> Factory::NewError(const char* maker,
const char* message,
Vector< Handle<Object> > args) {
// Instantiate a closeable HandleScope for EscapeFrom.
- v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate()));
+ v8::EscapableHandleScope scope(reinterpret_cast<v8::Isolate*>(isolate()));
Handle<FixedArray> array = NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
array->set(i, *args[i]);
@@ -972,10 +1057,12 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_ref,
bool immovable,
- bool crankshafted) {
+ bool crankshafted,
+ int prologue_offset) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->CreateCode(
- desc, flags, self_ref, immovable, crankshafted),
+ desc, flags, self_ref, immovable, crankshafted,
+ prologue_offset),
Code);
}
@@ -1002,6 +1089,7 @@ Handle<String> Factory::InternalizedStringFromString(Handle<String> value) {
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
+ JSFunction::EnsureHasInitialMap(constructor);
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSObject(*constructor, pretenure), JSObject);
@@ -1016,14 +1104,79 @@ Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
}
-Handle<GlobalObject> Factory::NewGlobalObject(
- Handle<JSFunction> constructor) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateGlobalObject(*constructor),
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dict->GetIsolate(),
+ dict->Add(*name, *value, details),
+ NameDictionary);
+}
+
+
+static Handle<GlobalObject> NewGlobalObjectFromMap(Isolate* isolate,
+ Handle<Map> map) {
+ CALL_HEAP_FUNCTION(isolate,
+ isolate->heap()->Allocate(*map, OLD_POINTER_SPACE),
GlobalObject);
}
+Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
+ ASSERT(constructor->has_initial_map());
+ Handle<Map> map(constructor->initial_map());
+ ASSERT(map->is_dictionary_map());
+
+ // Make sure no field properties are described in the initial map.
+ // This guarantees us that normalizing the properties does not
+ // require us to change property values to PropertyCells.
+ ASSERT(map->NextFreePropertyIndex() == 0);
+
+ // Make sure we don't have a ton of pre-allocated slots in the
+ // global objects. They will be unused once we normalize the object.
+ ASSERT(map->unused_property_fields() == 0);
+ ASSERT(map->inobject_properties() == 0);
+
+ // Initial size of the backing store to avoid resize of the storage during
+ // bootstrapping. The size differs between the JS global object ad the
+ // builtins object.
+ int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
+
+ // Allocate a dictionary object for backing storage.
+ int at_least_space_for = map->NumberOfOwnDescriptors() * 2 + initial_size;
+ Handle<NameDictionary> dictionary = NewNameDictionary(at_least_space_for);
+
+ // The global object might be created from an object template with accessors.
+ // Fill these accessors into the dictionary.
+ Handle<DescriptorArray> descs(map->instance_descriptors());
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
+ PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
+ Handle<Name> name(descs->GetKey(i));
+ Handle<Object> value(descs->GetCallbacksObject(i), isolate());
+ Handle<PropertyCell> cell = NewPropertyCell(value);
+ NameDictionaryAdd(dictionary, name, cell, d);
+ }
+
+ // Allocate the global object and initialize it with the backing store.
+ Handle<GlobalObject> global = NewGlobalObjectFromMap(isolate(), map);
+ isolate()->heap()->InitializeJSObjectFromMap(*global, *dictionary, *map);
+
+ // Create a new map for the global object.
+ Handle<Map> new_map = Map::CopyDropDescriptors(map);
+ new_map->set_dictionary_map(true);
+
+ // Set up the global object as a normalized object.
+ global->set_map(*new_map);
+ global->set_properties(*dictionary);
+
+ // Make sure result is a global object with properties in dictionary.
+ ASSERT(global->IsGlobalObject() && !global->HasFastProperties());
+ return global;
+}
+
Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map,
PretenureFlag pretenure,
@@ -1083,13 +1236,16 @@ void Factory::SetContent(Handle<JSArray> array,
}
-void Factory::EnsureCanContainElements(Handle<JSArray> array,
- Handle<FixedArrayBase> elements,
- uint32_t length,
- EnsureElementsMode mode) {
- CALL_HEAP_FUNCTION_VOID(
+Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
+ Handle<JSFunction> function) {
+ ASSERT(function->shared()->is_generator());
+ JSFunction::EnsureHasInitialMap(function);
+ Handle<Map> map(function->initial_map());
+ ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
+ CALL_HEAP_FUNCTION(
isolate(),
- array->EnsureCanContainElements(*elements, length, mode));
+ isolate()->heap()->AllocateJSObjectFromMap(*map),
+ JSGeneratorObject);
}
diff --git a/chromium/v8/src/factory.h b/chromium/v8/src/factory.h
index 1bdf474337c..92086d4b304 100644
--- a/chromium/v8/src/factory.h
+++ b/chromium/v8/src/factory.h
@@ -59,6 +59,11 @@ class Factory {
int size,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<ConstantPoolArray> NewConstantPoolArray(
+ int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries);
+
Handle<SeededNumberDictionary> NewSeededNumberDictionary(
int at_least_space_for);
@@ -69,7 +74,11 @@ class Factory {
Handle<ObjectHashSet> NewObjectHashSet(int at_least_space_for);
- Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
+ Handle<ObjectHashTable> NewObjectHashTable(
+ int at_least_space_for,
+ MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
+
+ Handle<WeakHashTable> NewWeakHashTable(int at_least_space_for);
Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors,
int slack = 0);
@@ -177,6 +186,7 @@ class Factory {
// Create a symbol.
Handle<Symbol> NewSymbol();
+ Handle<Symbol> NewPrivateSymbol();
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewNativeContext();
@@ -214,6 +224,9 @@ class Factory {
// the old generation).
Handle<Struct> NewStruct(InstanceType type);
+ Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry(
+ int aliased_context_slot);
+
Handle<DeclaredAccessorDescriptor> NewDeclaredAccessorDescriptor();
Handle<DeclaredAccessorInfo> NewDeclaredAccessorInfo();
@@ -241,6 +254,8 @@ class Factory {
Handle<Cell> NewCell(Handle<Object> value);
+ Handle<PropertyCell> NewPropertyCellWithHole();
+
Handle<PropertyCell> NewPropertyCell(Handle<Object> value);
Handle<AllocationSite> NewAllocationSite();
@@ -265,11 +280,15 @@ class Factory {
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array,
- int new_length);
+ int new_length,
+ PretenureFlag pretenure = NOT_TENURED);
Handle<FixedDoubleArray> CopyFixedDoubleArray(
Handle<FixedDoubleArray> array);
+ Handle<ConstantPoolArray> CopyConstantPoolArray(
+ Handle<ConstantPoolArray> array);
+
// Numbers (e.g. literals) are pretenured by the parser.
Handle<Object> NewNumber(double value,
PretenureFlag pretenure = NOT_TENURED);
@@ -295,7 +314,7 @@ class Factory {
Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure = NOT_TENURED);
- // Global objects are pretenured.
+ // Global objects are pretenured and initialized based on a constructor.
Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
// JS objects are pretenured when allocated by the bootstrapper and
@@ -328,10 +347,7 @@ class Factory {
void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
- void EnsureCanContainElements(Handle<JSArray> array,
- Handle<FixedArrayBase> elements,
- uint32_t length,
- EnsureElementsMode mode);
+ Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
Handle<JSArrayBuffer> NewJSArrayBuffer();
@@ -372,7 +388,8 @@ class Factory {
Code::Flags flags,
Handle<Object> self_reference,
bool immovable = false,
- bool crankshafted = false);
+ bool crankshafted = false,
+ int prologue_offset = Code::kPrologueOffsetNotSet);
Handle<Code> CopyCode(Handle<Code> code);
@@ -462,7 +479,15 @@ class Factory {
&isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
}
ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR_ACCESSOR
+#undef ROOT_ACCESSOR
+
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+ inline Handle<Map> name##_map() { \
+ return Handle<Map>(BitCast<Map**>( \
+ &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
+ }
+ STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
#define STRING_ACCESSOR(name, str) \
inline Handle<String> name() { \
@@ -567,101 +592,6 @@ Handle<Object> Factory::NewNumberFromSize(size_t value,
}
-// Used to "safely" transition from pointer-based runtime code to Handle-based
-// runtime code. When a GC happens during the called Handle-based code, a
-// failure object is returned to the pointer-based code to cause it abort and
-// re-trigger a gc of it's own. Since this double-gc will cause the Handle-based
-// code to be called twice, it must be idempotent.
-class IdempotentPointerToHandleCodeTrampoline {
- public:
- explicit IdempotentPointerToHandleCodeTrampoline(Isolate* isolate)
- : isolate_(isolate) {}
-
- template<typename R>
- MUST_USE_RESULT MaybeObject* Call(R (*function)()) {
- int collections = isolate_->heap()->gc_count();
- (*function)();
- return (collections == isolate_->heap()->gc_count())
- ? isolate_->heap()->true_value()
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- template<typename R>
- MUST_USE_RESULT MaybeObject* CallWithReturnValue(R (*function)()) {
- int collections = isolate_->heap()->gc_count();
- Object* result = (*function)();
- return (collections == isolate_->heap()->gc_count())
- ? result
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- template<typename R, typename P1>
- MUST_USE_RESULT MaybeObject* Call(R (*function)(P1), P1 p1) {
- int collections = isolate_->heap()->gc_count();
- (*function)(p1);
- return (collections == isolate_->heap()->gc_count())
- ? isolate_->heap()->true_value()
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- template<typename R, typename P1>
- MUST_USE_RESULT MaybeObject* CallWithReturnValue(
- R (*function)(P1),
- P1 p1) {
- int collections = isolate_->heap()->gc_count();
- Object* result = (*function)(p1);
- return (collections == isolate_->heap()->gc_count())
- ? result
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- template<typename R, typename P1, typename P2>
- MUST_USE_RESULT MaybeObject* Call(
- R (*function)(P1, P2),
- P1 p1,
- P2 p2) {
- int collections = isolate_->heap()->gc_count();
- (*function)(p1, p2);
- return (collections == isolate_->heap()->gc_count())
- ? isolate_->heap()->true_value()
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- template<typename R, typename P1, typename P2>
- MUST_USE_RESULT MaybeObject* CallWithReturnValue(
- R (*function)(P1, P2),
- P1 p1,
- P2 p2) {
- int collections = isolate_->heap()->gc_count();
- Object* result = (*function)(p1, p2);
- return (collections == isolate_->heap()->gc_count())
- ? result
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- template<typename R, typename P1, typename P2, typename P3, typename P4,
- typename P5, typename P6, typename P7>
- MUST_USE_RESULT MaybeObject* CallWithReturnValue(
- R (*function)(P1, P2, P3, P4, P5, P6, P7),
- P1 p1,
- P2 p2,
- P3 p3,
- P4 p4,
- P5 p5,
- P6 p6,
- P7 p7) {
- int collections = isolate_->heap()->gc_count();
- Handle<Object> result = (*function)(p1, p2, p3, p4, p5, p6, p7);
- return (collections == isolate_->heap()->gc_count())
- ? *result
- : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
- }
-
- private:
- Isolate* isolate_;
-};
-
-
} } // namespace v8::internal
#endif // V8_FACTORY_H_
diff --git a/chromium/v8/src/flag-definitions.h b/chromium/v8/src/flag-definitions.h
index 08cd8304e4d..405a351562e 100644
--- a/chromium/v8/src/flag-definitions.h
+++ b/chromium/v8/src/flag-definitions.h
@@ -90,44 +90,34 @@
#define DEFINE_implication(whenflag, thenflag)
#endif
+#define COMMA ,
#ifdef FLAG_MODE_DECLARE
// Structure used to hold a collection of arguments to the JavaScript code.
-#define JSARGUMENTS_INIT {{}}
struct JSArguments {
public:
- inline int argc() const {
- return static_cast<int>(storage_[0]);
- }
- inline const char** argv() const {
- return reinterpret_cast<const char**>(storage_[1]);
- }
inline const char*& operator[] (int idx) const {
- return argv()[idx];
- }
- inline JSArguments& operator=(JSArguments args) {
- set_argc(args.argc());
- set_argv(args.argv());
- return *this;
+ return argv[idx];
}
static JSArguments Create(int argc, const char** argv) {
JSArguments args;
- args.set_argc(argc);
- args.set_argv(argv);
+ args.argc = argc;
+ args.argv = argv;
return args;
}
-private:
- void set_argc(int argc) {
- storage_[0] = argc;
- }
- void set_argv(const char** argv) {
- storage_[1] = reinterpret_cast<AtomicWord>(argv);
+ int argc;
+ const char** argv;
+};
+
+struct MaybeBoolFlag {
+ static MaybeBoolFlag Create(bool has_value, bool value) {
+ MaybeBoolFlag flag;
+ flag.has_value = has_value;
+ flag.value = value;
+ return flag;
}
-public:
- // Contains argc and argv. Unfortunately we have to store these two fields
- // into a single one to avoid making the initialization macro (which would be
- // "{ 0, NULL }") contain a coma.
- AtomicWord storage_[2];
+ bool has_value;
+ bool value;
};
#endif
@@ -148,10 +138,13 @@ public:
#endif
#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
+#define DEFINE_maybe_bool(nam, cmt) FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, \
+ { false COMMA false }, cmt)
#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
#define DEFINE_string(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
-#define DEFINE_args(nam, def, cmt) FLAG(ARGS, JSArguments, nam, def, cmt)
+#define DEFINE_args(nam, cmt) FLAG(ARGS, JSArguments, nam, \
+ { 0 COMMA NULL }, cmt)
#define DEFINE_ALIAS_bool(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam)
#define DEFINE_ALIAS_int(alias, nam) FLAG_ALIAS(INT, int, alias, nam)
@@ -178,26 +171,24 @@ DEFINE_bool(harmony_modules, false,
"enable harmony modules (implies block scoping)")
DEFINE_bool(harmony_symbols, false,
"enable harmony symbols (a.k.a. private names)")
+DEFINE_bool(harmony_promises, false, "enable harmony promises")
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
DEFINE_bool(harmony_observation, false,
"enable harmony object observation (implies harmony collections")
-DEFINE_bool(harmony_typed_arrays, true,
- "enable harmony typed arrays")
-DEFINE_bool(harmony_array_buffer, true,
- "enable harmony array buffer")
-DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
DEFINE_bool(harmony_numeric_literals, false,
"enable harmony numeric literals (0o77, 0b11)")
DEFINE_bool(harmony_strings, false, "enable harmony string")
DEFINE_bool(harmony_arrays, false, "enable harmony arrays")
+DEFINE_bool(harmony_maths, false, "enable harmony math functions")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
DEFINE_implication(harmony, harmony_symbols)
+DEFINE_implication(harmony, harmony_promises)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation)
@@ -206,20 +197,26 @@ DEFINE_implication(harmony, harmony_iteration)
DEFINE_implication(harmony, harmony_numeric_literals)
DEFINE_implication(harmony, harmony_strings)
DEFINE_implication(harmony, harmony_arrays)
+DEFINE_implication(harmony, harmony_maths)
+DEFINE_implication(harmony_promises, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
-// TODO[dslomov] add harmony => harmony_typed_arrays
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
-DEFINE_bool(clever_optimizations,
- true,
+DEFINE_bool(compiled_keyed_dictionary_loads, true,
+ "use optimizing compiler to generate keyed dictionary load stubs")
+DEFINE_bool(clever_optimizations, true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenuring, true, "allocate objects in old space")
// TODO(hpayer): We will remove this flag as soon as we have pretenuring
// support for specific allocation sites.
DEFINE_bool(pretenuring_call_new, false, "pretenure call new")
+DEFINE_bool(allocation_site_pretenuring, false,
+ "pretenure with allocation sites")
+DEFINE_bool(trace_pretenuring, false,
+ "trace pretenuring decisions of HAllocate instructions")
DEFINE_bool(track_fields, true, "track fields with only smi values")
DEFINE_bool(track_double_fields, true, "track fields with double values")
DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
@@ -229,6 +226,11 @@ DEFINE_implication(track_heap_object_fields, track_fields)
DEFINE_implication(track_computed_fields, track_fields)
DEFINE_bool(smi_binop, true, "support smi representation in binary operations")
+// Flags for optimization types.
+DEFINE_bool(optimize_for_size, false,
+ "Enables optimizations which favor memory size over execution "
+ "speed.")
+
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_bool(string_slices, true, "use string slices")
@@ -251,16 +253,17 @@ DEFINE_int(max_inlined_nodes_cumulative, 400,
"maximum cumulative number of AST nodes considered for inlining")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions")
-DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
- true,
+DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,
"crankshaft harvests type feedback from stub cache")
DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
+DEFINE_bool(trace_check_elimination, false, "trace check elimination phase")
DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
DEFINE_string(trace_hydrogen_filter, "*", "hydrogen tracing filter")
DEFINE_bool(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs")
DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
+DEFINE_bool(trace_load_elimination, false, "trace load elimination")
DEFINE_bool(trace_alloc, false, "trace register allocator")
DEFINE_bool(trace_all_uses, false, "trace all use positions")
DEFINE_bool(trace_range, false, "trace range analysis")
@@ -274,11 +277,9 @@ DEFINE_bool(trace_migration, false, "trace object migration")
DEFINE_bool(trace_generalization, false, "trace map generalization")
DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
DEFINE_bool(stress_environments, false, "environment for every instruction")
-DEFINE_int(deopt_every_n_times,
- 0,
+DEFINE_int(deopt_every_n_times, 0,
"deoptimize every n times a deopt point is passed")
-DEFINE_int(deopt_every_n_garbage_collections,
- 0,
+DEFINE_int(deopt_every_n_garbage_collections, 0,
"deoptimize every n garbage collections")
DEFINE_bool(print_deopt_stress, false, "print number of possible deopt points")
DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
@@ -295,11 +296,12 @@ DEFINE_bool(array_index_dehoisting, true,
"perform array index dehoisting")
DEFINE_bool(analyze_environment_liveness, true,
"analyze liveness of environment slots and zap dead values")
+DEFINE_bool(load_elimination, true, "use load elimination")
+DEFINE_bool(check_elimination, false, "use check elimination")
DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
DEFINE_bool(fold_constants, true, "use constant folding")
DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
-DEFINE_bool(unreachable_code_elimination, false,
- "eliminate unreachable code (hidden behind soft deopts)")
+DEFINE_bool(unreachable_code_elimination, true, "eliminate unreachable code")
DEFINE_bool(track_allocation_sites, true,
"Use allocation site info to reduce transitions")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
@@ -316,6 +318,8 @@ DEFINE_bool(inline_construct, true, "inline constructor calls")
DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
+DEFINE_int(escape_analysis_iterations, 2,
+ "maximum number of escape analysis fix-point iterations")
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
@@ -331,13 +335,18 @@ DEFINE_int(concurrent_recompilation_queue_length, 8,
"the length of the concurrent compilation queue")
DEFINE_int(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
+DEFINE_bool(block_concurrent_recompilation, false,
+ "block queued jobs until released")
DEFINE_bool(concurrent_osr, false,
"concurrent on-stack replacement")
+DEFINE_implication(concurrent_osr, concurrent_recompilation)
DEFINE_bool(omit_map_checks_for_leaf_maps, true,
"do not emit check maps for constant values that have a leaf map, "
"deoptimize the optimized code if the layout of the maps changes.")
+DEFINE_bool(new_string_add, false, "enable new string addition")
+
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
@@ -403,9 +412,11 @@ DEFINE_bool(enable_vldr_imm, false,
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
+#ifdef ADDRESS_SANITIZER
+DEFINE_bool(expose_free_buffer, false, "expose freeBuffer extension")
+#endif
DEFINE_bool(expose_gc, false, "expose gc extension")
-DEFINE_string(expose_gc_as,
- NULL,
+DEFINE_string(expose_gc_as, NULL,
"expose gc extension under the specified name")
DEFINE_implication(expose_gc_as, expose_gc)
DEFINE_bool(expose_externalize_string, false,
@@ -426,8 +437,7 @@ DEFINE_bool(stack_trace_on_abort, true,
DEFINE_bool(trace_codegen, false,
"print name of functions for which code is generated")
DEFINE_bool(trace, false, "trace function calls")
-DEFINE_bool(mask_constants_with_cookie,
- true,
+DEFINE_bool(mask_constants_with_cookie, true,
"use random jit cookie to mask large constants")
// codegen.cc
@@ -504,6 +514,9 @@ DEFINE_bool(trace_gc_ignore_scavenger, false,
"do not print trace line after scavenger collection")
DEFINE_bool(print_cumulative_gc_stat, false,
"print cumulative GC statistics in name=value format on exit")
+DEFINE_bool(print_max_heap_committed, false,
+ "print statistics of the maximum memory committed for the heap "
+ "in name=value format on exit")
DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(trace_fragmentation, false,
@@ -515,6 +528,8 @@ DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_bool(weak_embedded_maps_in_optimized_code, true,
"make maps embedded in optimized code weak")
+DEFINE_bool(weak_embedded_objects_in_optimized_code, true,
+ "make objects embedded in optimized code weak")
DEFINE_bool(flush_code, true,
"flush code that we expect not to use again (during full gc)")
DEFINE_bool(flush_code_incrementally, true,
@@ -533,18 +548,21 @@ DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
DEFINE_int(sweeper_threads, 0,
"number of parallel and concurrent sweeping threads")
-DEFINE_bool(parallel_marking, false, "enable parallel marking")
-DEFINE_int(marking_threads, 0, "number of parallel marking threads")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
+
+// heap-snapshot-generator.cc
+DEFINE_bool(heap_profiler_trace_objects, false,
+ "Dump heap object allocations/movements/size_updates")
+
+
// v8.cc
DEFINE_bool(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
-DEFINE_bool(js_accessor_ics, false, "create ics for js accessors")
// macro-assembler-ia32.cc
DEFINE_bool(native_code_counters, false,
@@ -590,26 +608,23 @@ DEFINE_bool(abort_on_uncaught_exception, false,
"abort program (dump core) when an uncaught exception is thrown")
DEFINE_bool(trace_exception, false,
"print stack trace when throwing exceptions")
-DEFINE_bool(preallocate_message_memory, false,
- "preallocate some memory to build stack traces.")
-DEFINE_bool(randomize_hashes,
- true,
+DEFINE_bool(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
-DEFINE_int(hash_seed,
- 0,
+DEFINE_int(hash_seed, 0,
"Fixed seed to use to hash property keys (0 means random)"
"(with snapshots this option cannot override the baked-in seed)")
-// v8.cc
-DEFINE_bool(preemption, false,
- "activate a 100ms timer that switches between V8 threads")
+// snapshot-common.cc
+DEFINE_bool(profile_deserialization, false,
+ "Print the time it takes to deserialize the snapshot.")
// Regexp
DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
+DEFINE_maybe_bool(testing_maybe_bool_flag, "testing_maybe_bool_flag")
DEFINE_int(testing_int_flag, 13, "testing_int_flag")
DEFINE_float(testing_float_flag, 2.5, "float-flag")
DEFINE_string(testing_string_flag, "Hello, world!", "string-flag")
@@ -626,6 +641,10 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
DEFINE_string(extra_code, NULL, "A filename with extra code to be included in"
" the snapshot (mksnapshot only)")
+// code-stubs-hydrogen.cc
+DEFINE_bool(profile_hydrogen_code_stub_compilation, false,
+ "Print the time it takes to lazily compile hydrogen code stubs.")
+
//
// Dev shell flags
//
@@ -642,7 +661,7 @@ DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
#endif // ENABLE_DEBUGGER_SUPPORT
DEFINE_string(map_counters, "", "Map counters to a file")
-DEFINE_args(js_arguments, JSARGUMENTS_INIT,
+DEFINE_args(js_arguments,
"Pass all remaining arguments to the script. Alias for \"--\".")
#if defined(WEBOS__)
@@ -686,8 +705,10 @@ DEFINE_bool(stress_compaction, false,
#endif
// checks.cc
+#ifdef ENABLE_SLOW_ASSERTS
DEFINE_bool(enable_slow_asserts, false,
"enable asserts that are slow to execute")
+#endif
// codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
DEFINE_bool(print_source, false, "pretty print source code")
@@ -724,8 +745,7 @@ DEFINE_bool(print_interface_details, false, "print interface inference details")
DEFINE_int(print_interface_depth, 5, "depth for printing interfaces")
// objects.cc
-DEFINE_bool(trace_normalization,
- false,
+DEFINE_bool(trace_normalization, false,
"prints when objects are turned into dictionaries.")
// runtime.cc
@@ -739,12 +759,10 @@ DEFINE_bool(collect_heap_spill_statistics, false,
DEFINE_bool(trace_isolates, false, "trace isolate state changes")
// Regexp
-DEFINE_bool(regexp_possessive_quantifier,
- false,
+DEFINE_bool(regexp_possessive_quantifier, false,
"enable possessive quantifier syntax for testing")
DEFINE_bool(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
-DEFINE_bool(trace_regexp_assembler,
- false,
+DEFINE_bool(trace_regexp_assembler, false,
"trace regexp macro assembler calls.")
//
@@ -773,7 +791,12 @@ DEFINE_bool(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_bool(log_regexp, false, "Log regular expression execution.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
+DEFINE_bool(logfile_per_isolate, true, "Separate log files for each isolate.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
+DEFINE_bool(perf_basic_prof, false,
+ "Enable perf linux profiler (basic support).")
+DEFINE_bool(perf_jit_prof, false,
+ "Enable perf linux profiler (experimental annotate support).")
DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
DEFINE_bool(log_internal_timer_events, false, "Time internal events.")
@@ -782,6 +805,12 @@ DEFINE_bool(log_timer_events, false,
DEFINE_implication(log_timer_events, log_internal_timer_events)
DEFINE_implication(log_internal_timer_events, prof)
+DEFINE_bool(redirect_code_traces, false,
+ "output deopt information and disassembly into file "
+ "code-<pid>-<isolate id>.asm")
+DEFINE_string(redirect_code_traces_to, NULL,
+ "output deopt information and disassembly into the given file")
+
//
// Disassembler only flags
//
@@ -795,16 +824,18 @@ DEFINE_implication(log_internal_timer_events, prof)
// elements.cc
DEFINE_bool(trace_elements_transitions, false, "trace elements transitions")
+DEFINE_bool(trace_creation_allocation_sites, false,
+ "trace the creation of allocation sites")
+
// code-stubs.cc
DEFINE_bool(print_code_stubs, false, "print code stubs")
-DEFINE_bool(test_secondary_stub_cache,
- false,
+DEFINE_bool(test_secondary_stub_cache, false,
"test secondary stub cache by disabling the primary one")
-DEFINE_bool(test_primary_stub_cache,
- false,
+DEFINE_bool(test_primary_stub_cache, false,
"test primary stub cache by disabling the secondary one")
+
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(print_code, false, "print generated code")
DEFINE_bool(print_opt_code, false, "print optimized code")
@@ -812,8 +843,19 @@ DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
"printing optimized code based on it")
DEFINE_bool(print_code_verbose, false, "print more information for code")
DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
+DEFINE_bool(emit_opt_code_positions, false,
+ "annotate optimize code with source code positions")
#ifdef ENABLE_DISASSEMBLER
+DEFINE_bool(sodium, false, "print generated code output suitable for use with "
+ "the Sodium code viewer")
+
+DEFINE_implication(sodium, print_code_stubs)
+DEFINE_implication(sodium, print_code)
+DEFINE_implication(sodium, print_opt_code)
+DEFINE_implication(sodium, emit_opt_code_positions)
+DEFINE_implication(sodium, code_comments)
+
DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
DEFINE_implication(print_all_code, print_code)
DEFINE_implication(print_all_code, print_opt_code)
@@ -827,6 +869,16 @@ DEFINE_implication(print_all_code, trace_codegen)
#endif
#endif
+//
+// Read-only flags
+//
+#undef FLAG
+#define FLAG FLAG_READONLY
+
+// assembler-arm.h
+DEFINE_bool(enable_ool_constant_pool, false,
+ "enable use of out-of-line constant pools (ARM only)")
+
// Cleanup...
#undef FLAG_FULL
#undef FLAG_READONLY
@@ -834,6 +886,7 @@ DEFINE_implication(print_all_code, trace_codegen)
#undef FLAG_ALIAS
#undef DEFINE_bool
+#undef DEFINE_maybe_bool
#undef DEFINE_int
#undef DEFINE_string
#undef DEFINE_float
@@ -850,3 +903,5 @@ DEFINE_implication(print_all_code, trace_codegen)
#undef FLAG_MODE_DEFINE_DEFAULTS
#undef FLAG_MODE_META
#undef FLAG_MODE_DEFINE_IMPLICATIONS
+
+#undef COMMA
diff --git a/chromium/v8/src/flags.cc b/chromium/v8/src/flags.cc
index 4e18cc8c808..0c36aed3320 100644
--- a/chromium/v8/src/flags.cc
+++ b/chromium/v8/src/flags.cc
@@ -55,7 +55,8 @@ namespace {
// to the actual flag, default value, comment, etc. This is designed to be POD
// initialized as to avoid requiring static constructors.
struct Flag {
- enum FlagType { TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS };
+ enum FlagType { TYPE_BOOL, TYPE_MAYBE_BOOL, TYPE_INT, TYPE_FLOAT,
+ TYPE_STRING, TYPE_ARGS };
FlagType type_; // What type of flag, bool, int, or string.
const char* name_; // Name of the flag, ex "my_flag".
@@ -75,6 +76,11 @@ struct Flag {
return reinterpret_cast<bool*>(valptr_);
}
+ MaybeBoolFlag* maybe_bool_variable() const {
+ ASSERT(type_ == TYPE_MAYBE_BOOL);
+ return reinterpret_cast<MaybeBoolFlag*>(valptr_);
+ }
+
int* int_variable() const {
ASSERT(type_ == TYPE_INT);
return reinterpret_cast<int*>(valptr_);
@@ -133,6 +139,8 @@ struct Flag {
switch (type_) {
case TYPE_BOOL:
return *bool_variable() == bool_default();
+ case TYPE_MAYBE_BOOL:
+ return maybe_bool_variable()->has_value == false;
case TYPE_INT:
return *int_variable() == int_default();
case TYPE_FLOAT:
@@ -145,7 +153,7 @@ struct Flag {
return strcmp(str1, str2) == 0;
}
case TYPE_ARGS:
- return args_variable()->argc() == 0;
+ return args_variable()->argc == 0;
}
UNREACHABLE();
return true;
@@ -157,6 +165,9 @@ struct Flag {
case TYPE_BOOL:
*bool_variable() = bool_default();
break;
+ case TYPE_MAYBE_BOOL:
+ *maybe_bool_variable() = MaybeBoolFlag::Create(false, false);
+ break;
case TYPE_INT:
*int_variable() = int_default();
break;
@@ -186,6 +197,7 @@ const size_t num_flags = sizeof(flags) / sizeof(*flags);
static const char* Type2String(Flag::FlagType type) {
switch (type) {
case Flag::TYPE_BOOL: return "bool";
+ case Flag::TYPE_MAYBE_BOOL: return "maybe_bool";
case Flag::TYPE_INT: return "int";
case Flag::TYPE_FLOAT: return "float";
case Flag::TYPE_STRING: return "string";
@@ -203,6 +215,11 @@ static SmartArrayPointer<const char> ToString(Flag* flag) {
case Flag::TYPE_BOOL:
buffer.Add("%s", (*flag->bool_variable() ? "true" : "false"));
break;
+ case Flag::TYPE_MAYBE_BOOL:
+ buffer.Add("%s", flag->maybe_bool_variable()->has_value
+ ? (flag->maybe_bool_variable()->value ? "true" : "false")
+ : "unset");
+ break;
case Flag::TYPE_INT:
buffer.Add("%d", *flag->int_variable());
break;
@@ -216,9 +233,9 @@ static SmartArrayPointer<const char> ToString(Flag* flag) {
}
case Flag::TYPE_ARGS: {
JSArguments args = *flag->args_variable();
- if (args.argc() > 0) {
+ if (args.argc > 0) {
buffer.Add("%s", args[0]);
- for (int i = 1; i < args.argc(); i++) {
+ for (int i = 1; i < args.argc; i++) {
buffer.Add(" %s", args[i]);
}
}
@@ -260,7 +277,7 @@ List<const char*>* FlagList::argv() {
buffer.Add("--%s", args_flag->name());
args->Add(buffer.ToCString().Detach());
JSArguments jsargs = *args_flag->args_variable();
- for (int j = 0; j < jsargs.argc(); j++) {
+ for (int j = 0; j < jsargs.argc; j++) {
args->Add(StrDup(jsargs[j]));
}
}
@@ -380,6 +397,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
// if we still need a flag value, use the next argument if available
if (flag->type() != Flag::TYPE_BOOL &&
+ flag->type() != Flag::TYPE_MAYBE_BOOL &&
flag->type() != Flag::TYPE_ARGS &&
value == NULL) {
if (i < *argc) {
@@ -399,6 +417,9 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
case Flag::TYPE_BOOL:
*flag->bool_variable() = !is_bool;
break;
+ case Flag::TYPE_MAYBE_BOOL:
+ *flag->maybe_bool_variable() = MaybeBoolFlag::Create(true, !is_bool);
+ break;
case Flag::TYPE_INT:
*flag->int_variable() = strtol(value, &endp, 10); // NOLINT
break;
@@ -425,8 +446,9 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
}
// handle errors
- if ((flag->type() == Flag::TYPE_BOOL && value != NULL) ||
- (flag->type() != Flag::TYPE_BOOL && is_bool) ||
+ bool is_bool_type = flag->type() == Flag::TYPE_BOOL ||
+ flag->type() == Flag::TYPE_MAYBE_BOOL;
+ if ((is_bool_type && value != NULL) || (!is_bool_type && is_bool) ||
*endp != '\0') {
PrintF(stderr, "Error: illegal value for flag %s of type %s\n"
"Try --help for options\n",
@@ -549,6 +571,7 @@ void FlagList::PrintHelp() {
}
+// static
void FlagList::EnforceFlagImplications() {
#define FLAG_MODE_DEFINE_IMPLICATIONS
#include "flag-definitions.h"
diff --git a/chromium/v8/src/frames.cc b/chromium/v8/src/frames.cc
index 167277f7996..9549c2db653 100644
--- a/chromium/v8/src/frames.cc
+++ b/chromium/v8/src/frames.cc
@@ -38,8 +38,6 @@
#include "string-stream.h"
#include "vm-state-inl.h"
-#include "allocation-inl.h"
-
namespace v8 {
namespace internal {
@@ -986,14 +984,16 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
// to construct a stack trace, the receiver is always in a stack slot.
opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::STACK_SLOT ||
- opcode == Translation::LITERAL);
+ opcode == Translation::LITERAL ||
+ opcode == Translation::CAPTURED_OBJECT ||
+ opcode == Translation::DUPLICATED_OBJECT);
int index = it.Next();
// Get the correct receiver in the optimized frame.
Object* receiver = NULL;
if (opcode == Translation::LITERAL) {
receiver = data->LiteralArray()->get(index);
- } else {
+ } else if (opcode == Translation::STACK_SLOT) {
// Positive index means the value is spilled to the locals
// area. Negative means it is stored in the incoming parameter
// area.
@@ -1009,6 +1009,12 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
? this->receiver()
: this->GetParameter(parameter_index);
}
+ } else {
+ // TODO(3029): Materializing a captured object (or duplicated
+ // object) is hard, we return undefined for now. This breaks the
+ // produced stack trace, as constructor frames aren't marked as
+ // such anymore.
+ receiver = isolate()->heap()->undefined_value();
}
Code* code = function->shared()->code();
@@ -1401,6 +1407,11 @@ Code* StubFailureTrampolineFrame::unchecked_code() const {
return trampoline;
}
+ StubFailureTailCallTrampolineStub().FindCodeInCache(&trampoline, isolate());
+ if (trampoline->contains(pc())) {
+ return trampoline;
+ }
+
UNREACHABLE();
return NULL;
}
diff --git a/chromium/v8/src/frames.h b/chromium/v8/src/frames.h
index 2bbbd98ac07..230144d6800 100644
--- a/chromium/v8/src/frames.h
+++ b/chromium/v8/src/frames.h
@@ -170,14 +170,15 @@ class StandardFrameConstants : public AllStatic {
// context and function.
// StandardFrame::IterateExpressions assumes that kContextOffset is the last
// object pointer.
- static const int kFixedFrameSize = kPCOnStackSize + kFPOnStackSize +
- 2 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kFPOnStackSize;
- static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
+ static const int kFixedFrameSizeFromFp = 2 * kPointerSize;
+ static const int kFixedFrameSize = kPCOnStackSize + kFPOnStackSize +
+ kFixedFrameSizeFromFp;
+ static const int kExpressionsOffset = -3 * kPointerSize;
+ static const int kMarkerOffset = -2 * kPointerSize;
+ static const int kContextOffset = -1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kFPOnStackSize;
+ static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
};
@@ -922,6 +923,13 @@ class StackFrameLocator BASE_EMBEDDED {
};
+// Used specify the type of prologue to generate.
+enum PrologueFrameMode {
+ BUILD_FUNCTION_FRAME,
+ BUILD_STUB_FRAME
+};
+
+
// Reads all frames on the current stack and copies them into the current
// zone memory.
Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone);
diff --git a/chromium/v8/src/full-codegen.cc b/chromium/v8/src/full-codegen.cc
index 91a51731a58..483d1e378d4 100644
--- a/chromium/v8/src/full-codegen.cc
+++ b/chromium/v8/src/full-codegen.cc
@@ -193,12 +193,16 @@ void BreakableStatementChecker::VisitDebuggerStatement(
}
+void BreakableStatementChecker::VisitCaseClause(CaseClause* clause) {
+}
+
+
void BreakableStatementChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void BreakableStatementChecker::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
+void BreakableStatementChecker::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* expr) {
}
@@ -341,8 +345,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_handler_table(*cgen.handler_table());
#ifdef ENABLE_DEBUGGER_SUPPORT
- code->set_has_debug_break_slots(
- info->isolate()->debugger()->IsDebuggerActive());
code->set_compiled_optimizable(info->IsOptimizable());
#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
@@ -826,7 +828,7 @@ void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!isolate()->debugger()->IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+ CodeGenerator::RecordPositions(masm_, stmt->position());
} else {
// Check if the statement will be breakable without adding a debug break
// slot.
@@ -836,7 +838,7 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
// breakable. For breakable statements the actual recording of the
// position will be postponed to the breakable code (typically an IC).
bool position_recorded = CodeGenerator::RecordPositions(
- masm_, stmt->statement_pos(), !checker.is_breakable());
+ masm_, stmt->position(), !checker.is_breakable());
// If the position recording did record a new position generate a debug
// break slot to make the statement breakable.
if (position_recorded) {
@@ -844,15 +846,15 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
}
}
#else
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+ CodeGenerator::RecordPositions(masm_, stmt->position());
#endif
}
-void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
+void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!isolate()->debugger()->IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, pos);
+ CodeGenerator::RecordPositions(masm_, expr->position());
} else {
// Check if the expression will be breakable without adding a debug break
// slot.
@@ -866,7 +868,7 @@ void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
// statement positions this is used for e.g. the condition expression of
// a do while loop.
bool position_recorded = CodeGenerator::RecordPositions(
- masm_, pos, !checker.is_breakable());
+ masm_, expr->position(), !checker.is_breakable());
// If the position recording did record a new position generate a debug
// break slot to make the statement breakable.
if (position_recorded) {
@@ -1293,7 +1295,7 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// possible to break on the condition.
__ bind(loop_statement.continue_label());
PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
- SetExpressionPosition(stmt->cond(), stmt->condition_position());
+ SetExpressionPosition(stmt->cond());
VisitForControl(stmt->cond(),
&book_keeping,
loop_statement.break_label(),
@@ -1515,6 +1517,11 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
}
+void FullCodeGenerator::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+
void FullCodeGenerator::VisitConditional(Conditional* expr) {
Comment cmnt(masm_, "[ Conditional");
Label true_case, false_case, done;
@@ -1522,8 +1529,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS);
__ bind(&true_case);
- SetExpressionPosition(expr->then_expression(),
- expr->then_expression_position());
+ SetExpressionPosition(expr->then_expression());
if (context()->IsTest()) {
const TestContext* for_test = TestContext::cast(context());
VisitForControl(expr->then_expression(),
@@ -1537,8 +1543,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
__ bind(&false_case);
- SetExpressionPosition(expr->else_expression(),
- expr->else_expression_position());
+ SetExpressionPosition(expr->else_expression());
VisitInDuplicateContext(expr->else_expression());
// If control flow falls through Visit, merge it with true case here.
if (!context()->IsTest()) {
@@ -1567,10 +1572,34 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
- EmitNewClosure(expr->shared_function_info(), false);
+void FullCodeGenerator::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* expr) {
+ Comment cmnt(masm_, "[ NativeFunctionLiteral");
+
+ // Compute the function template for the native function.
+ Handle<String> name = expr->name();
+ v8::Handle<v8::FunctionTemplate> fun_template =
+ expr->extension()->GetNativeFunctionTemplate(
+ reinterpret_cast<v8::Isolate*>(isolate()), v8::Utils::ToLocal(name));
+ ASSERT(!fun_template.IsEmpty());
+
+ // Instantiate the function and create a shared function info from it.
+ Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
+ const int literals = fun->NumberOfLiterals();
+ Handle<Code> code = Handle<Code>(fun->shared()->code());
+ Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
+ bool is_generator = false;
+ Handle<SharedFunctionInfo> shared =
+ isolate()->factory()->NewSharedFunctionInfo(name, literals, is_generator,
+ code, Handle<ScopeInfo>(fun->shared()->scope_info()));
+ shared->set_construct_stub(*construct_stub);
+
+ // Copy the function data to the shared function info.
+ shared->set_function_data(fun->shared()->function_data());
+ int parameters = fun->shared()->formal_parameter_count();
+ shared->set_formal_parameter_count(parameters);
+
+ EmitNewClosure(shared, false);
}
@@ -1615,6 +1644,100 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
}
+void BackEdgeTable::Patch(Isolate* isolate,
+ Code* unoptimized) {
+ DisallowHeapAllocation no_gc;
+ Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+
+ // Iterate over the back edge table and patch every interrupt
+ // call to an unconditional call to the replacement code.
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
+
+ BackEdgeTable back_edges(unoptimized, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (static_cast<int>(back_edges.loop_depth(i)) == loop_nesting_level) {
+ ASSERT_EQ(INTERRUPT, GetBackEdgeState(isolate,
+ unoptimized,
+ back_edges.pc(i)));
+ PatchAt(unoptimized, back_edges.pc(i), ON_STACK_REPLACEMENT, patch);
+ }
+ }
+
+ unoptimized->set_back_edges_patched_for_osr(true);
+ ASSERT(Verify(isolate, unoptimized, loop_nesting_level));
+}
+
+
+void BackEdgeTable::Revert(Isolate* isolate,
+ Code* unoptimized) {
+ DisallowHeapAllocation no_gc;
+ Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck);
+
+ // Iterate over the back edge table and revert the patched interrupt calls.
+ ASSERT(unoptimized->back_edges_patched_for_osr());
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
+
+ BackEdgeTable back_edges(unoptimized, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (static_cast<int>(back_edges.loop_depth(i)) <= loop_nesting_level) {
+ ASSERT_NE(INTERRUPT, GetBackEdgeState(isolate,
+ unoptimized,
+ back_edges.pc(i)));
+ PatchAt(unoptimized, back_edges.pc(i), INTERRUPT, patch);
+ }
+ }
+
+ unoptimized->set_back_edges_patched_for_osr(false);
+ unoptimized->set_allow_osr_at_loop_nesting_level(0);
+ // Assert that none of the back edges are patched anymore.
+ ASSERT(Verify(isolate, unoptimized, -1));
+}
+
+
+void BackEdgeTable::AddStackCheck(CompilationInfo* info) {
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = info->isolate();
+ Code* code = info->shared_info()->code();
+ Address pc = code->instruction_start() + info->osr_pc_offset();
+ ASSERT_EQ(ON_STACK_REPLACEMENT, GetBackEdgeState(isolate, code, pc));
+ Code* patch = isolate->builtins()->builtin(Builtins::kOsrAfterStackCheck);
+ PatchAt(code, pc, OSR_AFTER_STACK_CHECK, patch);
+}
+
+
+void BackEdgeTable::RemoveStackCheck(CompilationInfo* info) {
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = info->isolate();
+ Code* code = info->shared_info()->code();
+ Address pc = code->instruction_start() + info->osr_pc_offset();
+ if (GetBackEdgeState(isolate, code, pc) == OSR_AFTER_STACK_CHECK) {
+ Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ PatchAt(code, pc, ON_STACK_REPLACEMENT, patch);
+ }
+}
+
+
+#ifdef DEBUG
+bool BackEdgeTable::Verify(Isolate* isolate,
+ Code* unoptimized,
+ int loop_nesting_level) {
+ DisallowHeapAllocation no_gc;
+ BackEdgeTable back_edges(unoptimized, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ uint32_t loop_depth = back_edges.loop_depth(i);
+ CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
+ // Assert that all back edges for shallower loops (and only those)
+ // have already been patched.
+ CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
+ GetBackEdgeState(isolate,
+ unoptimized,
+ back_edges.pc(i)) != INTERRUPT);
+ }
+ return true;
+}
+#endif // DEBUG
+
+
#undef __
diff --git a/chromium/v8/src/full-codegen.h b/chromium/v8/src/full-codegen.h
index 5580cb3e86c..11d5341ecab 100644
--- a/chromium/v8/src/full-codegen.h
+++ b/chromium/v8/src/full-codegen.h
@@ -139,65 +139,6 @@ class FullCodeGenerator: public AstVisitor {
#error Unsupported target architecture.
#endif
- class BackEdgeTableIterator {
- public:
- explicit BackEdgeTableIterator(Code* unoptimized,
- DisallowHeapAllocation* required) {
- ASSERT(unoptimized->kind() == Code::FUNCTION);
- instruction_start_ = unoptimized->instruction_start();
- cursor_ = instruction_start_ + unoptimized->back_edge_table_offset();
- ASSERT(cursor_ < instruction_start_ + unoptimized->instruction_size());
- table_length_ = Memory::uint32_at(cursor_);
- cursor_ += kTableLengthSize;
- end_ = cursor_ + table_length_ * kEntrySize;
- }
-
- bool Done() { return cursor_ >= end_; }
-
- void Next() {
- ASSERT(!Done());
- cursor_ += kEntrySize;
- }
-
- BailoutId ast_id() {
- ASSERT(!Done());
- return BailoutId(static_cast<int>(
- Memory::uint32_at(cursor_ + kAstIdOffset)));
- }
-
- uint32_t loop_depth() {
- ASSERT(!Done());
- return Memory::uint32_at(cursor_ + kLoopDepthOffset);
- }
-
- uint32_t pc_offset() {
- ASSERT(!Done());
- return Memory::uint32_at(cursor_ + kPcOffsetOffset);
- }
-
- Address pc() {
- ASSERT(!Done());
- return instruction_start_ + pc_offset();
- }
-
- uint32_t table_length() { return table_length_; }
-
- private:
- static const int kTableLengthSize = kIntSize;
- static const int kAstIdOffset = 0 * kIntSize;
- static const int kPcOffsetOffset = 1 * kIntSize;
- static const int kLoopDepthOffset = 2 * kIntSize;
- static const int kEntrySize = 3 * kIntSize;
-
- Address cursor_;
- Address end_;
- Address instruction_start_;
- uint32_t table_length_;
-
- DISALLOW_COPY_AND_ASSIGN(BackEdgeTableIterator);
- };
-
-
private:
class Breakable;
class Iteration;
@@ -556,11 +497,6 @@ class FullCodeGenerator: public AstVisitor {
INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
#undef EMIT_INLINE_RUNTIME_CALL
- void EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask);
-
// Platform-specific code for resuming generators.
void EmitGeneratorResume(Expression *generator,
Expression *value,
@@ -635,7 +571,7 @@ class FullCodeGenerator: public AstVisitor {
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
- void SetExpressionPosition(Expression* expr, int pos);
+ void SetExpressionPosition(Expression* expr);
void SetStatementPosition(int pos);
void SetSourcePosition(int pos);
@@ -940,6 +876,93 @@ class AccessorTable: public TemplateHashMap<Literal,
};
+class BackEdgeTable {
+ public:
+ BackEdgeTable(Code* code, DisallowHeapAllocation* required) {
+ ASSERT(code->kind() == Code::FUNCTION);
+ instruction_start_ = code->instruction_start();
+ Address table_address = instruction_start_ + code->back_edge_table_offset();
+ length_ = Memory::uint32_at(table_address);
+ start_ = table_address + kTableLengthSize;
+ }
+
+ uint32_t length() { return length_; }
+
+ BailoutId ast_id(uint32_t index) {
+ return BailoutId(static_cast<int>(
+ Memory::uint32_at(entry_at(index) + kAstIdOffset)));
+ }
+
+ uint32_t loop_depth(uint32_t index) {
+ return Memory::uint32_at(entry_at(index) + kLoopDepthOffset);
+ }
+
+ uint32_t pc_offset(uint32_t index) {
+ return Memory::uint32_at(entry_at(index) + kPcOffsetOffset);
+ }
+
+ Address pc(uint32_t index) {
+ return instruction_start_ + pc_offset(index);
+ }
+
+ enum BackEdgeState {
+ INTERRUPT,
+ ON_STACK_REPLACEMENT,
+ OSR_AFTER_STACK_CHECK
+ };
+
+ // Patch all interrupts with allowed loop depth in the unoptimized code to
+ // unconditionally call replacement_code.
+ static void Patch(Isolate* isolate,
+ Code* unoptimized_code);
+
+ // Patch the back edge to the target state, provided the correct callee.
+ static void PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code);
+
+ // Change all patched back edges back to normal interrupts.
+ static void Revert(Isolate* isolate,
+ Code* unoptimized_code);
+
+ // Change a back edge patched for on-stack replacement to perform a
+ // stack check first.
+ static void AddStackCheck(CompilationInfo* info);
+
+ // Remove the stack check, if available, and replace by on-stack replacement.
+ static void RemoveStackCheck(CompilationInfo* info);
+
+ // Return the current patch state of the back edge.
+ static BackEdgeState GetBackEdgeState(Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after);
+
+#ifdef DEBUG
+ // Verify that all back edges of a certain loop depth are patched.
+ static bool Verify(Isolate* isolate,
+ Code* unoptimized_code,
+ int loop_nesting_level);
+#endif // DEBUG
+
+ private:
+ Address entry_at(uint32_t index) {
+ ASSERT(index < length_);
+ return start_ + index * kEntrySize;
+ }
+
+ static const int kTableLengthSize = kIntSize;
+ static const int kAstIdOffset = 0 * kIntSize;
+ static const int kPcOffsetOffset = 1 * kIntSize;
+ static const int kLoopDepthOffset = 2 * kIntSize;
+ static const int kEntrySize = 3 * kIntSize;
+
+ Address start_;
+ Address instruction_start_;
+ uint32_t length_;
+};
+
+
} } // namespace v8::internal
#endif // V8_FULL_CODEGEN_H_
diff --git a/chromium/v8/src/func-name-inferrer.cc b/chromium/v8/src/func-name-inferrer.cc
index 84d3bf06b87..5409a4e1800 100644
--- a/chromium/v8/src/func-name-inferrer.cc
+++ b/chromium/v8/src/func-name-inferrer.cc
@@ -62,7 +62,7 @@ void FuncNameInferrer::PushLiteralName(Handle<String> name) {
void FuncNameInferrer::PushVariableName(Handle<String> name) {
- if (IsOpen() && !isolate()->heap()->result_string()->Equals(*name)) {
+ if (IsOpen() && !isolate()->heap()->dot_result_string()->Equals(*name)) {
names_stack_.Add(Name(name, kVariableName), zone());
}
}
diff --git a/chromium/v8/src/global-handles.cc b/chromium/v8/src/global-handles.cc
index 1a98e49ff37..2ebe1c0088f 100644
--- a/chromium/v8/src/global-handles.cc
+++ b/chromium/v8/src/global-handles.cc
@@ -79,7 +79,7 @@ class GlobalHandles::Node {
Internals::kNodeIsPartiallyDependentShift);
}
-#ifdef ENABLE_EXTRA_CHECKS
+#ifdef ENABLE_HANDLE_ZAPPING
~Node() {
// TODO(1428): if it's a weak handle we should have invoked its callback.
// Zap the values for eager trapping.
diff --git a/chromium/v8/src/globals.h b/chromium/v8/src/globals.h
index 1977e68c82e..2f526a83bbe 100644
--- a/chromium/v8/src/globals.h
+++ b/chromium/v8/src/globals.h
@@ -187,8 +187,13 @@ typedef byte* Address;
# define V8_INTPTR_C(x) (x ## LL)
# define V8_PTR_PREFIX "I64"
#elif V8_HOST_ARCH_64_BIT
-# define V8_UINT64_C(x) (x ## UL)
-# define V8_INT64_C(x) (x ## L)
+# if V8_OS_MACOSX
+# define V8_UINT64_C(x) (x ## ULL)
+# define V8_INT64_C(x) (x ## LL)
+# else
+# define V8_UINT64_C(x) (x ## UL)
+# define V8_INT64_C(x) (x ## L)
+# endif
# define V8_INTPTR_C(x) (x ## L)
# define V8_PTR_PREFIX "l"
#else
@@ -208,13 +213,12 @@ typedef byte* Address;
#define V8PRIuPTR V8_PTR_PREFIX "u"
// Fix for Mac OS X defining uintptr_t as "unsigned long":
-#if defined(__APPLE__) && defined(__MACH__)
+#if V8_OS_MACOSX
#undef V8PRIxPTR
#define V8PRIxPTR "lx"
#endif
-#if (defined(__APPLE__) && defined(__MACH__)) || \
- defined(__FreeBSD__) || defined(__OpenBSD__)
+#if V8_OS_MACOSX || defined(__FreeBSD__) || defined(__OpenBSD__)
#define USING_BSD_ABI
#endif
@@ -226,12 +230,22 @@ const int MB = KB * KB;
const int GB = KB * KB * KB;
const int kMaxInt = 0x7FFFFFFF;
const int kMinInt = -kMaxInt - 1;
+const int kMaxInt8 = (1 << 7) - 1;
+const int kMinInt8 = -(1 << 7);
+const int kMaxUInt8 = (1 << 8) - 1;
+const int kMinUInt8 = 0;
+const int kMaxInt16 = (1 << 15) - 1;
+const int kMinInt16 = -(1 << 15);
+const int kMaxUInt16 = (1 << 16) - 1;
+const int kMinUInt16 = 0;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
const int kCharSize = sizeof(char); // NOLINT
const int kShortSize = sizeof(short); // NOLINT
const int kIntSize = sizeof(int); // NOLINT
+const int kInt32Size = sizeof(int32_t); // NOLINT
+const int kInt64Size = sizeof(int64_t); // NOLINT
const int kDoubleSize = sizeof(double); // NOLINT
const int kIntptrSize = sizeof(intptr_t); // NOLINT
const int kPointerSize = sizeof(void*); // NOLINT
@@ -241,17 +255,16 @@ const int kFPOnStackSize = kRegisterSize;
const int kDoubleSizeLog2 = 3;
-// Size of the state of a the random number generator.
-const int kRandomStateSize = 2 * kIntSize;
-
#if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3;
const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
+const bool kIs64BitArch = true;
#else
const int kPointerSizeLog2 = 2;
const intptr_t kIntptrSignBit = 0x80000000;
const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
+const bool kIs64BitArch = false;
#endif
const int kBitsPerByte = 8;
@@ -354,7 +367,7 @@ F FUNCTION_CAST(Address addr) {
// Define DISABLE_ASAN macros.
#if defined(__has_feature)
#if __has_feature(address_sanitizer)
-#define DISABLE_ASAN __attribute__((no_address_safety_analysis))
+#define DISABLE_ASAN __attribute__((no_sanitize_address))
#endif
#endif
diff --git a/chromium/v8/src/handles-inl.h b/chromium/v8/src/handles-inl.h
index 5b879d8f088..ec69c3fdbe6 100644
--- a/chromium/v8/src/handles-inl.h
+++ b/chromium/v8/src/handles-inl.h
@@ -130,16 +130,17 @@ void HandleScope::CloseScope(Isolate* isolate,
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
- current->next = prev_next;
+ std::swap(current->next, prev_next);
current->level--;
if (current->limit != prev_limit) {
current->limit = prev_limit;
DeleteExtensions(isolate);
- }
-
-#ifdef ENABLE_EXTRA_CHECKS
- ZapRange(prev_next, prev_limit);
+#ifdef ENABLE_HANDLE_ZAPPING
+ ZapRange(current->next, prev_limit);
+ } else {
+ ZapRange(current->next, prev_next);
#endif
+ }
}
diff --git a/chromium/v8/src/handles.cc b/chromium/v8/src/handles.cc
index b3704df6989..2d414022e09 100644
--- a/chromium/v8/src/handles.cc
+++ b/chromium/v8/src/handles.cc
@@ -101,7 +101,7 @@ void HandleScope::DeleteExtensions(Isolate* isolate) {
}
-#ifdef ENABLE_EXTRA_CHECKS
+#ifdef ENABLE_HANDLE_ZAPPING
void HandleScope::ZapRange(Object** start, Object** end) {
ASSERT(end - start <= kHandleBlockSize);
for (Object** p = start; p != end; p++) {
@@ -150,54 +150,6 @@ Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
}
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
- // If objects constructed from this function exist then changing
- // 'estimated_nof_properties' is dangerous since the previous value might
- // have been compiled into the fast construct stub. More over, the inobject
- // slack tracking logic might have adjusted the previous value, so even
- // passing the same value is risky.
- if (func->shared()->live_objects_may_exist()) return;
-
- func->shared()->set_expected_nof_properties(nof);
- if (func->has_initial_map()) {
- Handle<Map> new_initial_map =
- func->GetIsolate()->factory()->CopyMap(
- Handle<Map>(func->initial_map()));
- new_initial_map->set_unused_property_fields(nof);
- func->set_initial_map(*new_initial_map);
- }
-}
-
-
-static int ExpectedNofPropertiesFromEstimate(int estimate) {
- // If no properties are added in the constructor, they are more likely
- // to be added later.
- if (estimate == 0) estimate = 2;
-
- // We do not shrink objects that go into a snapshot (yet), so we adjust
- // the estimate conservatively.
- if (Serializer::enabled()) return estimate + 2;
-
- // Inobject slack tracking will reclaim redundant inobject space later,
- // so we can afford to adjust the estimate generously.
- if (FLAG_clever_optimizations) {
- return estimate + 8;
- } else {
- return estimate + 3;
- }
-}
-
-
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
- int estimate) {
- // See the comment in SetExpectedNofProperties.
- if (shared->live_objects_may_exist()) return;
-
- shared->set_expected_nof_properties(
- ExpectedNofPropertiesFromEstimate(estimate));
-}
-
-
void FlattenString(Handle<String> string) {
CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
}
@@ -208,30 +160,12 @@ Handle<String> FlattenGetString(Handle<String> string) {
}
-Handle<Object> SetProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(
- isolate,
- Runtime::SetObjectProperty(
- isolate, object, key, value, attributes, strict_mode),
- Object);
-}
-
-
Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION(
- isolate,
- Runtime::ForceSetObjectProperty(
- isolate, object, key, value, attributes),
- Object);
+ return Runtime::ForceSetObjectProperty(object->GetIsolate(), object, key,
+ value, attributes);
}
@@ -285,30 +219,6 @@ Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
}
-Handle<String> SubString(Handle<String> str,
- int start,
- int end,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(str->GetIsolate(),
- str->SubString(start, end, pretenure), String);
-}
-
-
-Handle<JSObject> Copy(Handle<JSObject> obj) {
- Isolate* isolate = obj->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- isolate->heap()->CopyJSObject(*obj), JSObject);
-}
-
-
-Handle<JSObject> DeepCopy(Handle<JSObject> obj) {
- Isolate* isolate = obj->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- obj->DeepCopy(isolate),
- JSObject);
-}
-
-
// Wrappers for scripts are kept alive and cached in weak global
// handles referred from foreign objects held by the scripts as long as
// they are used. When they are not used anymore, the garbage
@@ -332,9 +242,9 @@ static void ClearWrapperCache(v8::Isolate* v8_isolate,
Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
if (script->wrapper()->foreign_address() != NULL) {
- // Return the script wrapper directly from the cache.
+ // Return a handle for the existing script wrapper from the cache.
return Handle<JSValue>(
- reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
+ *reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
}
Isolate* isolate = script->GetIsolate();
// Construct a new script wrapper.
@@ -345,10 +255,10 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
// The allocation might have triggered a GC, which could have called this
// function recursively, and a wrapper has already been created and cached.
- // In that case, simply return the cached wrapper.
+ // In that case, simply return a handle for the cached wrapper.
if (script->wrapper()->foreign_address() != NULL) {
return Handle<JSValue>(
- reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
+ *reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
}
result->set_value(*script);
@@ -727,7 +637,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
// present enum cache. The first step to using the cache is to set the
// enum length of the map by counting the number of own descriptors that
// are not DONT_ENUM or SYMBOLIC.
- if (own_property_count == Map::kInvalidEnumCache) {
+ if (own_property_count == kInvalidEnumCacheSentinel) {
own_property_count = object->map()->NumberOfDescribedProperties(
OWN_DESCRIPTORS, DONT_SHOW);
@@ -839,31 +749,6 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
}
-Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
- Handle<Object> key) {
- CALL_HEAP_FUNCTION(table->GetIsolate(),
- table->Add(*key),
- ObjectHashSet);
-}
-
-
-Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
- Handle<Object> key) {
- CALL_HEAP_FUNCTION(table->GetIsolate(),
- table->Remove(*key),
- ObjectHashSet);
-}
-
-
-Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
- Handle<Object> key,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(table->GetIsolate(),
- table->Put(*key, *value),
- ObjectHashTable);
-}
-
-
DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
: impl_(isolate->handle_scope_implementer()) {
impl_->BeginDeferredScope();
@@ -905,4 +790,15 @@ DeferredHandles* DeferredHandleScope::Detach() {
}
+void AddWeakObjectToCodeDependency(Heap* heap,
+ Handle<Object> object,
+ Handle<Code> code) {
+ heap->EnsureWeakObjectToCodeTable();
+ Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(*object));
+ dep = DependentCode::Insert(dep, DependentCode::kWeaklyEmbeddedGroup, code);
+ CALL_HEAP_FUNCTION_VOID(heap->isolate(),
+ heap->AddWeakObjectToCodeDependency(*object, *dep));
+}
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/handles.h b/chromium/v8/src/handles.h
index c3e4dca1a6e..7fef9198646 100644
--- a/chromium/v8/src/handles.h
+++ b/chromium/v8/src/handles.h
@@ -83,7 +83,7 @@ class Handle {
// Closes the given scope, but lets this handle escape. See
// implementation in api.h.
- inline Handle<T> EscapeFrom(v8::HandleScope* scope);
+ inline Handle<T> EscapeFrom(v8::EscapableHandleScope* scope);
#ifdef DEBUG
enum DereferenceCheckMode { INCLUDE_DEFERRED_CHECK, NO_DEFERRED_CHECK };
@@ -177,7 +177,7 @@ class HandleScope {
// Extend the handle scope making room for more handles.
static internal::Object** Extend(Isolate* isolate);
-#ifdef ENABLE_EXTRA_CHECKS
+#ifdef ENABLE_HANDLE_ZAPPING
// Zaps the handles in the half-open interval [start, end).
static void ZapRange(Object** start, Object** end);
#endif
@@ -228,13 +228,6 @@ void FlattenString(Handle<String> str);
// string.
Handle<String> FlattenGetString(Handle<String> str);
-Handle<Object> SetProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
-
Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> key,
Handle<Object> value,
@@ -255,10 +248,6 @@ Handle<Object> GetProperty(Isolate* isolate,
Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
uint32_t index);
-Handle<JSObject> Copy(Handle<JSObject> obj);
-
-Handle<JSObject> DeepCopy(Handle<JSObject> obj);
-
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
Handle<JSArray> array);
@@ -303,33 +292,13 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
Handle<FixedArray> second);
-Handle<String> SubString(Handle<String> str,
- int start,
- int end,
- PretenureFlag pretenure = NOT_TENURED);
-
-// Sets the expected number of properties for the function's instances.
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
-
-// Sets the expected number of properties based on estimate from compiler.
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
- int estimate);
-
-
Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
Handle<JSFunction> constructor,
Handle<JSGlobalProxy> global);
-Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
- Handle<Object> key);
-
-Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
- Handle<Object> key);
-
-Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
- Handle<Object> key,
- Handle<Object> value);
-
+void AddWeakObjectToCodeDependency(Heap* heap,
+ Handle<Object> object,
+ Handle<Code> code);
// Seal off the current HandleScope so that new handles can only be created
// if a new HandleScope is entered.
diff --git a/chromium/v8/src/harmony-array.js b/chromium/v8/src/harmony-array.js
index e440299ff61..a9cc3b83841 100644
--- a/chromium/v8/src/harmony-array.js
+++ b/chromium/v8/src/harmony-array.js
@@ -121,4 +121,4 @@ function HarmonyArrayExtendArrayPrototype() {
));
}
-HarmonyArrayExtendArrayPrototype(); \ No newline at end of file
+HarmonyArrayExtendArrayPrototype();
diff --git a/chromium/v8/src/harmony-math.js b/chromium/v8/src/harmony-math.js
new file mode 100644
index 00000000000..a4d3f2e8a5e
--- /dev/null
+++ b/chromium/v8/src/harmony-math.js
@@ -0,0 +1,60 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'use strict';
+
+// ES6 draft 09-27-13, section 20.2.2.28.
+function MathSign(x) {
+ x = TO_NUMBER_INLINE(x);
+ if (x > 0) return 1;
+ if (x < 0) return -1;
+ if (x === 0) return x;
+ return NAN;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.34.
+function MathTrunc(x) {
+ x = TO_NUMBER_INLINE(x);
+ if (x > 0) return MathFloor(x);
+ if (x < 0) return MathCeil(x);
+ if (x === 0) return x;
+ return NAN;
+}
+
+
+function ExtendMath() {
+ %CheckIsBootstrapping();
+
+ // Set up the non-enumerable functions on the Math object.
+ InstallFunctions($Math, DONT_ENUM, $Array(
+ "sign", MathSign,
+ "trunc", MathTrunc
+ ));
+}
+
+ExtendMath();
diff --git a/chromium/v8/src/harmony-string.js b/chromium/v8/src/harmony-string.js
index a5c6f4e2ecd..8e4b9a46264 100644
--- a/chromium/v8/src/harmony-string.js
+++ b/chromium/v8/src/harmony-string.js
@@ -151,4 +151,4 @@ function ExtendStringPrototype() {
));
}
-ExtendStringPrototype(); \ No newline at end of file
+ExtendStringPrototype();
diff --git a/chromium/v8/src/heap-inl.h b/chromium/v8/src/heap-inl.h
index 4f1960386a5..525c634da62 100644
--- a/chromium/v8/src/heap-inl.h
+++ b/chromium/v8/src/heap-inl.h
@@ -140,12 +140,11 @@ MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
// Compute map and object size.
Map* map = ascii_internalized_string_map();
int size = SeqOneByteString::SizeFor(str.length());
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -174,12 +173,11 @@ MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
// Compute map and object size.
Map* map = internalized_string_map();
int size = SeqTwoByteString::SizeFor(str.length());
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -208,14 +206,18 @@ MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
}
+MaybeObject* Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
+ return CopyConstantPoolArrayWithMap(src, src->map());
+}
+
+
MaybeObject* Heap::AllocateRaw(int size_in_bytes,
AllocationSpace space,
AllocationSpace retry_space) {
- ASSERT(AllowHandleAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
- ASSERT(space != NEW_SPACE ||
- retry_space == OLD_POINTER_SPACE ||
- retry_space == OLD_DATA_SPACE ||
- retry_space == LO_SPACE);
+ ASSERT(AllowHandleAllocation::IsAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
+ ASSERT(gc_state_ == NOT_IN_GC);
+ HeapProfiler* profiler = isolate_->heap_profiler();
#ifdef DEBUG
if (FLAG_gc_interval >= 0 &&
!disallow_allocation_failure_ &&
@@ -225,12 +227,17 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
isolate_->counters()->objs_since_last_full()->Increment();
isolate_->counters()->objs_since_last_young()->Increment();
#endif
+
+ HeapObject* object;
MaybeObject* result;
if (NEW_SPACE == space) {
result = new_space_.AllocateRaw(size_in_bytes);
- if (always_allocate() && result->IsFailure()) {
+ if (always_allocate() && result->IsFailure() && retry_space != NEW_SPACE) {
space = retry_space;
} else {
+ if (profiler->is_tracking_allocations() && result->To(&object)) {
+ profiler->AllocationEvent(object->address(), size_in_bytes);
+ }
return result;
}
}
@@ -252,6 +259,9 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
result = map_space_->AllocateRaw(size_in_bytes);
}
if (result->IsFailure()) old_gen_exhausted_ = true;
+ if (profiler->is_tracking_allocations() && result->To(&object)) {
+ profiler->AllocationEvent(object->address(), size_in_bytes);
+ }
return result;
}
@@ -291,40 +301,6 @@ void Heap::FinalizeExternalString(String* string) {
}
-MaybeObject* Heap::AllocateRawMap() {
-#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result = map_space_->AllocateRaw(Map::kSize);
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawCell() {
-#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result = cell_space_->AllocateRaw(Cell::kSize);
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
-MaybeObject* Heap::AllocateRawPropertyCell() {
-#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
-#endif
- MaybeObject* result =
- property_cell_space_->AllocateRaw(PropertyCell::kSize);
- if (result->IsFailure()) old_gen_exhausted_ = true;
- return result;
-}
-
-
bool Heap::InNewSpace(Object* object) {
bool result = new_space_.Contains(object);
ASSERT(!result || // Either not in new space
@@ -507,6 +483,18 @@ void Heap::ScavengePointer(HeapObject** p) {
}
+void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
+ if (FLAG_allocation_site_pretenuring && object->IsJSObject()) {
+ AllocationMemento* memento = AllocationMemento::FindForJSObject(
+ JSObject::cast(object), true);
+ if (memento != NULL) {
+ ASSERT(memento->IsValid());
+ memento->GetAllocationSite()->IncrementMementoFoundCount();
+ }
+ }
+}
+
+
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
@@ -525,6 +513,8 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
return;
}
+ UpdateAllocationSiteFeedback(object);
+
// AllocationMementos are unrooted and shouldn't survive a scavenge
ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
// Call the slow part of scavenge object.
@@ -532,14 +522,6 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
}
-MaybeObject* Heap::AllocateEmptyJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site) {
- return AllocateJSArrayAndStorageWithAllocationSite(elements_kind, 0, 0,
- allocation_site, DONT_INITIALIZE_ARRAY_ELEMENTS);
-}
-
-
bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
const char* collector_reason = NULL;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
@@ -566,10 +548,10 @@ MaybeObject* Heap::PrepareForCompare(String* str) {
}
-intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes) {
+int64_t Heap::AdjustAmountOfExternalAllocatedMemory(
+ int64_t change_in_bytes) {
ASSERT(HasBeenSetUp());
- intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
+ int64_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
if (change_in_bytes > 0) {
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
@@ -579,7 +561,7 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
amount_of_external_allocated_memory_ = 0;
amount_of_external_allocated_memory_at_last_global_gc_ = 0;
}
- intptr_t amount_since_last_global_gc = PromotedExternalMemorySize();
+ int64_t amount_since_last_global_gc = PromotedExternalMemorySize();
if (amount_since_last_global_gc > external_allocation_limit_) {
CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
}
@@ -598,9 +580,9 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
PrintF("Adjust amount of external memory: delta=%6" V8_PTR_PREFIX "d KB, "
"amount=%6" V8_PTR_PREFIX "d KB, since_gc=%6" V8_PTR_PREFIX "d KB, "
"isolate=0x%08" V8PRIxPTR ".\n",
- change_in_bytes / KB,
- amount_of_external_allocated_memory_ / KB,
- PromotedExternalMemorySize() / KB,
+ static_cast<intptr_t>(change_in_bytes / KB),
+ static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB),
+ static_cast<intptr_t>(PromotedExternalMemorySize() / KB),
reinterpret_cast<intptr_t>(isolate()));
}
ASSERT(amount_of_external_allocated_memory_ >= 0);
@@ -847,15 +829,15 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
#ifdef VERIFY_HEAP
-NoWeakEmbeddedMapsVerificationScope::NoWeakEmbeddedMapsVerificationScope() {
+NoWeakObjectVerificationScope::NoWeakObjectVerificationScope() {
Isolate* isolate = Isolate::Current();
- isolate->heap()->no_weak_embedded_maps_verification_scope_depth_++;
+ isolate->heap()->no_weak_object_verification_scope_depth_++;
}
-NoWeakEmbeddedMapsVerificationScope::~NoWeakEmbeddedMapsVerificationScope() {
+NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
Isolate* isolate = Isolate::Current();
- isolate->heap()->no_weak_embedded_maps_verification_scope_depth_--;
+ isolate->heap()->no_weak_object_verification_scope_depth_--;
}
#endif
diff --git a/chromium/v8/src/heap-profiler.cc b/chromium/v8/src/heap-profiler.cc
index e66af3364d8..3d8e3364c90 100644
--- a/chromium/v8/src/heap-profiler.cc
+++ b/chromium/v8/src/heap-profiler.cc
@@ -35,7 +35,9 @@ namespace internal {
HeapProfiler::HeapProfiler(Heap* heap)
: snapshots_(new HeapSnapshotsCollection(heap)),
- next_snapshot_uid_(1) {
+ next_snapshot_uid_(1),
+ is_tracking_allocations_(false),
+ is_tracking_object_moves_(false) {
}
@@ -83,6 +85,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
}
}
snapshots_->SnapshotGenerationFinished(result);
+ is_tracking_object_moves_ = true;
return result;
}
@@ -95,8 +98,14 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
}
-void HeapProfiler::StartHeapObjectsTracking() {
- snapshots_->StartHeapObjectsTracking();
+void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
+ snapshots_->StartHeapObjectsTracking(track_allocations);
+ is_tracking_object_moves_ = true;
+ ASSERT(!is_tracking_allocations_);
+ if (track_allocations) {
+ heap()->DisableInlineAllocation();
+ is_tracking_allocations_ = true;
+ }
}
@@ -107,6 +116,10 @@ SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
void HeapProfiler::StopHeapObjectsTracking() {
snapshots_->StopHeapObjectsTracking();
+ if (is_tracking_allocations_) {
+ heap()->EnableInlineAllocation();
+ is_tracking_allocations_ = false;
+ }
}
@@ -132,14 +145,26 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
}
-void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
- snapshots_->ObjectMoveEvent(from, to);
+void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
+ snapshots_->ObjectMoveEvent(from, to, size);
+}
+
+
+void HeapProfiler::AllocationEvent(Address addr, int size) {
+ snapshots_->AllocationEvent(addr, size);
}
+
+void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
+ snapshots_->UpdateObjectSizeEvent(addr, size);
+}
+
+
void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
RetainedObjectInfo* info) {
// TODO(yurus, marja): Don't route this information through GlobalHandles.
heap()->isolate()->global_handles()->SetRetainedObjectInfo(id, info);
}
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/heap-profiler.h b/chromium/v8/src/heap-profiler.h
index 5ae60fa9234..13e605b12d3 100644
--- a/chromium/v8/src/heap-profiler.h
+++ b/chromium/v8/src/heap-profiler.h
@@ -37,14 +37,6 @@ namespace internal {
class HeapSnapshot;
class HeapSnapshotsCollection;
-#define HEAP_PROFILE(heap, call) \
- do { \
- v8::internal::HeapProfiler* profiler = heap->isolate()->heap_profiler(); \
- if (profiler != NULL && profiler->is_profiling()) { \
- profiler->call; \
- } \
- } while (false)
-
class HeapProfiler {
public:
explicit HeapProfiler(Heap* heap);
@@ -61,33 +53,43 @@ class HeapProfiler {
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
- void StartHeapObjectsTracking();
+ void StartHeapObjectsTracking(bool track_allocations);
void StopHeapObjectsTracking();
+
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
int GetSnapshotsCount();
HeapSnapshot* GetSnapshot(int index);
SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
void DeleteAllSnapshots();
- void ObjectMoveEvent(Address from, Address to);
+ void ObjectMoveEvent(Address from, Address to, int size);
+
+ void AllocationEvent(Address addr, int size);
+
+ void UpdateObjectSizeEvent(Address addr, int size);
void DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
v8::RetainedObjectInfo* ExecuteWrapperClassCallback(uint16_t class_id,
Object** wrapper);
- INLINE(bool is_profiling()) {
- return snapshots_->is_tracking_objects();
- }
-
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
+ bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
+ bool is_tracking_allocations() const { return is_tracking_allocations_; }
+
+ int FindUntrackedObjects() {
+ return snapshots_->FindUntrackedObjects();
+ }
+
private:
Heap* heap() const { return snapshots_->heap(); }
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
+ bool is_tracking_allocations_;
+ bool is_tracking_object_moves_;
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/heap-snapshot-generator-inl.h b/chromium/v8/src/heap-snapshot-generator-inl.h
index 1a878c6df17..43002d2d2b1 100644
--- a/chromium/v8/src/heap-snapshot-generator-inl.h
+++ b/chromium/v8/src/heap-snapshot-generator-inl.h
@@ -85,4 +85,3 @@ int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
} } // namespace v8::internal
#endif // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
-
diff --git a/chromium/v8/src/heap-snapshot-generator.cc b/chromium/v8/src/heap-snapshot-generator.cc
index bd47eec63b3..271f95c5ca0 100644
--- a/chromium/v8/src/heap-snapshot-generator.cc
+++ b/chromium/v8/src/heap-snapshot-generator.cc
@@ -29,6 +29,8 @@
#include "heap-snapshot-generator-inl.h"
+#include "allocation-tracker.h"
+#include "code-stubs.h"
#include "heap-profiler.h"
#include "debug.h"
#include "types.h"
@@ -397,7 +399,7 @@ void HeapObjectsMap::SnapshotGenerationFinished() {
}
-void HeapObjectsMap::MoveObject(Address from, Address to) {
+void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
ASSERT(to != NULL);
ASSERT(from != NULL);
if (from == to) return;
@@ -428,11 +430,27 @@ void HeapObjectsMap::MoveObject(Address from, Address to) {
int from_entry_info_index =
static_cast<int>(reinterpret_cast<intptr_t>(from_value));
entries_.at(from_entry_info_index).addr = to;
+ // Size of an object can change during its life, so to keep information
+ // about the object in entries_ consistent, we have to adjust size when the
+ // object is migrated.
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("Move object from %p to %p old size %6d new size %6d\n",
+ from,
+ to,
+ entries_.at(from_entry_info_index).size,
+ object_size);
+ }
+ entries_.at(from_entry_info_index).size = object_size;
to_entry->value = from_value;
}
}
+void HeapObjectsMap::UpdateObjectSize(Address addr, int size) {
+ FindOrAddEntry(addr, size, false);
+}
+
+
SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
false);
@@ -445,7 +463,8 @@ SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
- unsigned int size) {
+ unsigned int size,
+ bool accessed) {
ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
true);
@@ -453,14 +472,20 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
int entry_index =
static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
EntryInfo& entry_info = entries_.at(entry_index);
- entry_info.accessed = true;
+ entry_info.accessed = accessed;
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("Update object size : %p with old size %d and new size %d\n",
+ addr,
+ entry_info.size,
+ size);
+ }
entry_info.size = size;
return entry_info.id;
}
entry->value = reinterpret_cast<void*>(entries_.length());
SnapshotObjectId id = next_id_;
next_id_ += kObjectIdStep;
- entries_.Add(EntryInfo(id, addr, size));
+ entries_.Add(EntryInfo(id, addr, size, accessed));
ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
return id;
}
@@ -472,6 +497,10 @@ void HeapObjectsMap::StopHeapObjectsTracking() {
void HeapObjectsMap::UpdateHeapObjectsMap() {
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("Begin HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n",
+ entries_map_.occupancy());
+ }
heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"HeapSnapshotsCollection::UpdateHeapObjectsMap");
HeapIterator iterator(heap_);
@@ -479,8 +508,129 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
obj != NULL;
obj = iterator.next()) {
FindOrAddEntry(obj->address(), obj->Size());
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("Update object : %p %6d. Next address is %p\n",
+ obj->address(),
+ obj->Size(),
+ obj->address() + obj->Size());
+ }
}
RemoveDeadEntries();
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("End HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n",
+ entries_map_.occupancy());
+ }
+}
+
+
+namespace {
+
+
+struct HeapObjectInfo {
+ HeapObjectInfo(HeapObject* obj, int expected_size)
+ : obj(obj),
+ expected_size(expected_size) {
+ }
+
+ HeapObject* obj;
+ int expected_size;
+
+ bool IsValid() const { return expected_size == obj->Size(); }
+
+ void Print() const {
+ if (expected_size == 0) {
+ PrintF("Untracked object : %p %6d. Next address is %p\n",
+ obj->address(),
+ obj->Size(),
+ obj->address() + obj->Size());
+ } else if (obj->Size() != expected_size) {
+ PrintF("Wrong size %6d: %p %6d. Next address is %p\n",
+ expected_size,
+ obj->address(),
+ obj->Size(),
+ obj->address() + obj->Size());
+ } else {
+ PrintF("Good object : %p %6d. Next address is %p\n",
+ obj->address(),
+ expected_size,
+ obj->address() + obj->Size());
+ }
+ }
+};
+
+
+static int comparator(const HeapObjectInfo* a, const HeapObjectInfo* b) {
+ if (a->obj < b->obj) return -1;
+ if (a->obj > b->obj) return 1;
+ return 0;
+}
+
+
+} // namespace
+
+
+int HeapObjectsMap::FindUntrackedObjects() {
+ List<HeapObjectInfo> heap_objects(1000);
+
+ HeapIterator iterator(heap_);
+ int untracked = 0;
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next()) {
+ HashMap::Entry* entry = entries_map_.Lookup(
+ obj->address(), ComputePointerHash(obj->address()), false);
+ if (entry == NULL) {
+ ++untracked;
+ if (FLAG_heap_profiler_trace_objects) {
+ heap_objects.Add(HeapObjectInfo(obj, 0));
+ }
+ } else {
+ int entry_index = static_cast<int>(
+ reinterpret_cast<intptr_t>(entry->value));
+ EntryInfo& entry_info = entries_.at(entry_index);
+ if (FLAG_heap_profiler_trace_objects) {
+ heap_objects.Add(HeapObjectInfo(obj,
+ static_cast<int>(entry_info.size)));
+ if (obj->Size() != static_cast<int>(entry_info.size))
+ ++untracked;
+ } else {
+ CHECK_EQ(obj->Size(), static_cast<int>(entry_info.size));
+ }
+ }
+ }
+ if (FLAG_heap_profiler_trace_objects) {
+ PrintF("\nBegin HeapObjectsMap::FindUntrackedObjects. %d entries in map.\n",
+ entries_map_.occupancy());
+ heap_objects.Sort(comparator);
+ int last_printed_object = -1;
+ bool print_next_object = false;
+ for (int i = 0; i < heap_objects.length(); ++i) {
+ const HeapObjectInfo& object_info = heap_objects[i];
+ if (!object_info.IsValid()) {
+ ++untracked;
+ if (last_printed_object != i - 1) {
+ if (i > 0) {
+ PrintF("%d objects were skipped\n", i - 1 - last_printed_object);
+ heap_objects[i - 1].Print();
+ }
+ }
+ object_info.Print();
+ last_printed_object = i;
+ print_next_object = true;
+ } else if (print_next_object) {
+ object_info.Print();
+ print_next_object = false;
+ last_printed_object = i;
+ }
+ }
+ if (last_printed_object < heap_objects.length() - 1) {
+ PrintF("Last %d objects were skipped\n",
+ heap_objects.length() - 1 - last_printed_object);
+ }
+ PrintF("End HeapObjectsMap::FindUntrackedObjects. %d entries in map.\n\n",
+ entries_map_.occupancy());
+ }
+ return untracked;
}
@@ -585,9 +735,9 @@ size_t HeapObjectsMap::GetUsedMemorySize() const {
HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
- : is_tracking_objects_(false),
- names_(heap),
- ids_(heap) {
+ : names_(heap),
+ ids_(heap),
+ allocation_tracker_(NULL) {
}
@@ -597,13 +747,31 @@ static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
HeapSnapshotsCollection::~HeapSnapshotsCollection() {
+ delete allocation_tracker_;
snapshots_.Iterate(DeleteHeapSnapshot);
}
+void HeapSnapshotsCollection::StartHeapObjectsTracking(bool track_allocations) {
+ ids_.UpdateHeapObjectsMap();
+ ASSERT(allocation_tracker_ == NULL);
+ if (track_allocations) {
+ allocation_tracker_ = new AllocationTracker(&ids_, names());
+ }
+}
+
+
+void HeapSnapshotsCollection::StopHeapObjectsTracking() {
+ ids_.StopHeapObjectsTracking();
+ if (allocation_tracker_ != NULL) {
+ delete allocation_tracker_;
+ allocation_tracker_ = NULL;
+ }
+}
+
+
HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
unsigned uid) {
- is_tracking_objects_ = true; // Start watching for heap objects moves.
return new HeapSnapshot(this, name, uid);
}
@@ -644,6 +812,14 @@ Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
}
+void HeapSnapshotsCollection::AllocationEvent(Address addr, int size) {
+ DisallowHeapAllocation no_allocation;
+ if (allocation_tracker_ != NULL) {
+ allocation_tracker_->AllocationEvent(addr, size);
+ }
+}
+
+
size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
size_t size = sizeof(*this);
size += names_.GetUsedMemorySize();
@@ -920,7 +1096,7 @@ class IndexedReferencesExtractor : public ObjectVisitor {
void VisitCodeEntry(Address entry_address) {
Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
generator_->SetInternalReference(parent_obj_, parent_, "code", code);
- generator_->TagObject(code, "(code)");
+ generator_->TagCodeObject(code);
}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
@@ -1180,10 +1356,20 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
int entry, SharedFunctionInfo* shared) {
HeapObject* obj = shared;
+ StringsStorage* names = collection_->names();
+ String* shared_name = shared->DebugName();
+ const char* name = NULL;
+ if (shared_name != *heap_->isolate()->factory()->empty_string()) {
+ name = names->GetName(shared_name);
+ TagObject(shared->code(), names->GetFormatted("(code for %s)", name));
+ } else {
+ TagObject(shared->code(), names->GetFormatted("(%s code)",
+ Code::Kind2String(shared->code()->kind())));
+ }
+
SetInternalReference(obj, entry,
"name", shared->name(),
SharedFunctionInfo::kNameOffset);
- TagObject(shared->code(), "(code)");
SetInternalReference(obj, entry,
"code", shared->code(),
SharedFunctionInfo::kCodeOffset);
@@ -1197,7 +1383,10 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry,
"script", shared->script(),
SharedFunctionInfo::kScriptOffset);
- TagObject(shared->construct_stub(), "(code)");
+ const char* construct_stub_name = name ?
+ names->GetFormatted("(construct stub code for %s)", name) :
+ "(construct stub code)";
+ TagObject(shared->construct_stub(), construct_stub_name);
SetInternalReference(obj, entry,
"construct_stub", shared->construct_stub(),
SharedFunctionInfo::kConstructStubOffset);
@@ -1210,6 +1399,9 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry,
"inferred_name", shared->inferred_name(),
SharedFunctionInfo::kInferredNameOffset);
+ SetInternalReference(obj, entry,
+ "optimized_code_map", shared->optimized_code_map(),
+ SharedFunctionInfo::kOptimizedCodeMapOffset);
SetWeakReference(obj, entry,
1, shared->initial_map(),
SharedFunctionInfo::kInitialMapOffset);
@@ -1259,7 +1451,23 @@ void V8HeapExplorer::ExtractCodeCacheReferences(
}
+void V8HeapExplorer::TagCodeObject(Code* code, const char* external_name) {
+ TagObject(code, collection_->names()->GetFormatted("(%s code)",
+ external_name));
+}
+
+
+void V8HeapExplorer::TagCodeObject(Code* code) {
+ if (code->kind() == Code::STUB) {
+ TagObject(code, collection_->names()->GetFormatted(
+ "(%s code)", CodeStub::MajorName(
+ static_cast<CodeStub::Major>(code->major_key()), true)));
+ }
+}
+
+
void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
+ TagCodeObject(code);
TagObject(code->relocation_info(), "(code relocation info)");
SetInternalReference(code, entry,
"relocation_info", code->relocation_info(),
@@ -1301,6 +1509,19 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
AllocationSite* site) {
SetInternalReference(site, entry, "transition_info", site->transition_info(),
AllocationSite::kTransitionInfoOffset);
+ SetInternalReference(site, entry, "nested_site", site->nested_site(),
+ AllocationSite::kNestedSiteOffset);
+ SetInternalReference(site, entry, "memento_found_count",
+ site->memento_found_count(),
+ AllocationSite::kMementoFoundCountOffset);
+ SetInternalReference(site, entry, "memento_create_count",
+ site->memento_create_count(),
+ AllocationSite::kMementoCreateCountOffset);
+ SetInternalReference(site, entry, "pretenure_decision",
+ site->pretenure_decision(),
+ AllocationSite::kPretenureDecisionOffset);
+ SetInternalReference(site, entry, "dependent_code", site->dependent_code(),
+ AllocationSite::kDependentCodeOffset);
}
@@ -1501,9 +1722,10 @@ class RootsReferencesExtractor : public ObjectVisitor {
};
public:
- RootsReferencesExtractor()
+ explicit RootsReferencesExtractor(Heap* heap)
: collecting_all_references_(false),
- previous_reference_count_(0) {
+ previous_reference_count_(0),
+ heap_(heap) {
}
void VisitPointers(Object** start, Object** end) {
@@ -1518,22 +1740,30 @@ class RootsReferencesExtractor : public ObjectVisitor {
void FillReferences(V8HeapExplorer* explorer) {
ASSERT(strong_references_.length() <= all_references_.length());
+ Builtins* builtins = heap_->isolate()->builtins();
for (int i = 0; i < reference_tags_.length(); ++i) {
explorer->SetGcRootsReference(reference_tags_[i].tag);
}
- int strong_index = 0, all_index = 0, tags_index = 0;
+ int strong_index = 0, all_index = 0, tags_index = 0, builtin_index = 0;
while (all_index < all_references_.length()) {
if (strong_index < strong_references_.length() &&
strong_references_[strong_index] == all_references_[all_index]) {
explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
false,
- all_references_[all_index++]);
+ all_references_[all_index]);
++strong_index;
} else {
explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
true,
- all_references_[all_index++]);
+ all_references_[all_index]);
+ }
+ if (reference_tags_[tags_index].tag ==
+ VisitorSynchronization::kBuiltins) {
+ ASSERT(all_references_[all_index]->IsCode());
+ explorer->TagCodeObject(Code::cast(all_references_[all_index]),
+ builtins->name(builtin_index++));
}
+ ++all_index;
if (reference_tags_[tags_index].index == all_index) ++tags_index;
}
}
@@ -1552,6 +1782,7 @@ class RootsReferencesExtractor : public ObjectVisitor {
List<Object*> all_references_;
int previous_reference_count_;
List<IndexTag> reference_tags_;
+ Heap* heap_;
};
@@ -1577,7 +1808,7 @@ bool V8HeapExplorer::IterateAndExtractReferences(
}
SetRootGcRootsReference();
- RootsReferencesExtractor extractor;
+ RootsReferencesExtractor extractor(heap_);
heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
extractor.SetCollectingAllReferences();
heap_->IterateRoots(&extractor, VISIT_ALL);
@@ -2438,6 +2669,10 @@ const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
+ if (AllocationTracker* allocation_tracker =
+ snapshot_->collection()->allocation_tracker()) {
+ allocation_tracker->PrepareForSerialization();
+ }
ASSERT(writer_ == NULL);
writer_ = new OutputStreamWriter(stream);
SerializeImpl();
@@ -2461,6 +2696,16 @@ void HeapSnapshotJSONSerializer::SerializeImpl() {
SerializeEdges();
if (writer_->aborted()) return;
writer_->AddString("],\n");
+
+ writer_->AddString("\"trace_function_infos\":[");
+ SerializeTraceNodeInfos();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+ writer_->AddString("\"trace_tree\":[");
+ SerializeTraceTree();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+
writer_->AddString("\"strings\":[");
SerializeStrings();
if (writer_->aborted()) return;
@@ -2472,7 +2717,7 @@ void HeapSnapshotJSONSerializer::SerializeImpl() {
int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
HashMap::Entry* cache_entry = strings_.Lookup(
- const_cast<char*>(s), ObjectHash(s), true);
+ const_cast<char*>(s), StringHash(s), true);
if (cache_entry->value == NULL) {
cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
}
@@ -2621,7 +2866,20 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("shortcut") ","
JSON_S("weak")) ","
JSON_S("string_or_number") ","
- JSON_S("node"))));
+ JSON_S("node")) ","
+ JSON_S("trace_function_info_fields") ":" JSON_A(
+ JSON_S("function_id") ","
+ JSON_S("name") ","
+ JSON_S("script_name") ","
+ JSON_S("script_id") ","
+ JSON_S("line") ","
+ JSON_S("column")) ","
+ JSON_S("trace_node_fields") ":" JSON_A(
+ JSON_S("id") ","
+ JSON_S("function_id") ","
+ JSON_S("count") ","
+ JSON_S("size") ","
+ JSON_S("children"))));
#undef JSON_S
#undef JSON_O
#undef JSON_A
@@ -2629,6 +2887,13 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
writer_->AddNumber(snapshot_->entries().length());
writer_->AddString(",\"edge_count\":");
writer_->AddNumber(snapshot_->edges().length());
+ writer_->AddString(",\"trace_function_count\":");
+ uint32_t count = 0;
+ AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ if (tracker) {
+ count = tracker->id_to_function_info()->occupancy();
+ }
+ writer_->AddNumber(count);
}
@@ -2642,6 +2907,100 @@ static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
}
+void HeapSnapshotJSONSerializer::SerializeTraceTree() {
+ AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ if (!tracker) return;
+ AllocationTraceTree* traces = tracker->trace_tree();
+ SerializeTraceNode(traces->root());
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeTraceNode(AllocationTraceNode* node) {
+ // The buffer needs space for 4 unsigned ints, 4 commas, [ and \0
+ const int kBufferSize =
+ 4 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ + 4 + 1 + 1;
+ EmbeddedVector<char, kBufferSize> buffer;
+ int buffer_pos = 0;
+ buffer_pos = utoa(node->id(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(node->function_id(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(node->allocation_count(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(node->allocation_size(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer[buffer_pos++] = '[';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+
+ Vector<AllocationTraceNode*> children = node->children();
+ for (int i = 0; i < children.length(); i++) {
+ if (i > 0) {
+ writer_->AddCharacter(',');
+ }
+ SerializeTraceNode(children[i]);
+ }
+ writer_->AddCharacter(']');
+}
+
+
+// 0-based position is converted to 1-based during the serialization.
+static int SerializePosition(int position, const Vector<char>& buffer,
+ int buffer_pos) {
+ if (position == -1) {
+ buffer[buffer_pos++] = '0';
+ } else {
+ ASSERT(position >= 0);
+ buffer_pos = utoa(static_cast<unsigned>(position + 1), buffer, buffer_pos);
+ }
+ return buffer_pos;
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
+ AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
+ if (!tracker) return;
+ // The buffer needs space for 6 unsigned ints, 6 commas, \n and \0
+ const int kBufferSize =
+ 6 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ + 6 + 1 + 1;
+ EmbeddedVector<char, kBufferSize> buffer;
+ HashMap* id_to_function_info = tracker->id_to_function_info();
+ bool first_entry = true;
+ for (HashMap::Entry* p = id_to_function_info->Start();
+ p != NULL;
+ p = id_to_function_info->Next(p)) {
+ SnapshotObjectId id =
+ static_cast<SnapshotObjectId>(reinterpret_cast<intptr_t>(p->key));
+ AllocationTracker::FunctionInfo* info =
+ reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
+ int buffer_pos = 0;
+ if (first_entry) {
+ first_entry = false;
+ } else {
+ buffer[buffer_pos++] = ',';
+ }
+ buffer_pos = utoa(id, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(GetStringId(info->name), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(GetStringId(info->script_name), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ // The cast is safe because script id is a non-negative Smi.
+ buffer_pos = utoa(static_cast<unsigned>(info->script_id), buffer,
+ buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = SerializePosition(info->line, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = SerializePosition(info->column, buffer, buffer_pos);
+ buffer[buffer_pos++] = '\n';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+ }
+}
+
+
void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
writer_->AddCharacter('\n');
writer_->AddCharacter('\"');
@@ -2693,37 +3052,21 @@ void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
void HeapSnapshotJSONSerializer::SerializeStrings() {
- List<HashMap::Entry*> sorted_strings;
- SortHashMap(&strings_, &sorted_strings);
+ ScopedVector<const unsigned char*> sorted_strings(
+ strings_.occupancy() + 1);
+ for (HashMap::Entry* entry = strings_.Start();
+ entry != NULL;
+ entry = strings_.Next(entry)) {
+ int index = static_cast<int>(reinterpret_cast<uintptr_t>(entry->value));
+ sorted_strings[index] = reinterpret_cast<const unsigned char*>(entry->key);
+ }
writer_->AddString("\"<dummy>\"");
- for (int i = 0; i < sorted_strings.length(); ++i) {
+ for (int i = 1; i < sorted_strings.length(); ++i) {
writer_->AddCharacter(',');
- SerializeString(
- reinterpret_cast<const unsigned char*>(sorted_strings[i]->key));
+ SerializeString(sorted_strings[i]);
if (writer_->aborted()) return;
}
}
-template<typename T>
-inline static int SortUsingEntryValue(const T* x, const T* y) {
- uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value);
- uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value);
- if (x_uint > y_uint) {
- return 1;
- } else if (x_uint == y_uint) {
- return 0;
- } else {
- return -1;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SortHashMap(
- HashMap* map, List<HashMap::Entry*>* sorted_entries) {
- for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
- sorted_entries->Add(p);
- sorted_entries->Sort(SortUsingEntryValue);
-}
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/heap-snapshot-generator.h b/chromium/v8/src/heap-snapshot-generator.h
index 7b0cf8f021e..e55513f890d 100644
--- a/chromium/v8/src/heap-snapshot-generator.h
+++ b/chromium/v8/src/heap-snapshot-generator.h
@@ -33,6 +33,8 @@
namespace v8 {
namespace internal {
+class AllocationTracker;
+class AllocationTraceNode;
class HeapEntry;
class HeapSnapshot;
@@ -227,8 +229,11 @@ class HeapObjectsMap {
void SnapshotGenerationFinished();
SnapshotObjectId FindEntry(Address addr);
- SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size);
- void MoveObject(Address from, Address to);
+ SnapshotObjectId FindOrAddEntry(Address addr,
+ unsigned int size,
+ bool accessed = true);
+ void MoveObject(Address from, Address to, int size);
+ void UpdateObjectSize(Address addr, int size);
SnapshotObjectId last_assigned_id() const {
return next_id_ - kObjectIdStep;
}
@@ -247,6 +252,10 @@ class HeapObjectsMap {
static const SnapshotObjectId kGcRootsFirstSubrootId;
static const SnapshotObjectId kFirstAvailableObjectId;
+ int FindUntrackedObjects();
+
+ void UpdateHeapObjectsMap();
+
private:
struct EntryInfo {
EntryInfo(SnapshotObjectId id, Address addr, unsigned int size)
@@ -265,7 +274,6 @@ class HeapObjectsMap {
uint32_t count;
};
- void UpdateHeapObjectsMap();
void RemoveDeadEntries();
SnapshotObjectId next_id_;
@@ -285,12 +293,11 @@ class HeapSnapshotsCollection {
Heap* heap() const { return ids_.heap(); }
- bool is_tracking_objects() { return is_tracking_objects_; }
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) {
return ids_.PushHeapObjectsStats(stream);
}
- void StartHeapObjectsTracking() { is_tracking_objects_ = true; }
- void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); }
+ void StartHeapObjectsTracking(bool track_allocations);
+ void StopHeapObjectsTracking();
HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
void SnapshotGenerationFinished(HeapSnapshot* snapshot);
@@ -298,6 +305,7 @@ class HeapSnapshotsCollection {
void RemoveSnapshot(HeapSnapshot* snapshot);
StringsStorage* names() { return &names_; }
+ AllocationTracker* allocation_tracker() { return allocation_tracker_; }
SnapshotObjectId FindObjectId(Address object_addr) {
return ids_.FindEntry(object_addr);
@@ -306,18 +314,26 @@ class HeapSnapshotsCollection {
return ids_.FindOrAddEntry(object_addr, object_size);
}
Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
- void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
+ void ObjectMoveEvent(Address from, Address to, int size) {
+ ids_.MoveObject(from, to, size);
+ }
+ void AllocationEvent(Address addr, int size);
+ void UpdateObjectSizeEvent(Address addr, int size) {
+ ids_.UpdateObjectSize(addr, size);
+ }
SnapshotObjectId last_assigned_id() const {
return ids_.last_assigned_id();
}
size_t GetUsedMemorySize() const;
+ int FindUntrackedObjects() { return ids_.FindUntrackedObjects(); }
+
private:
- bool is_tracking_objects_; // Whether tracking object moves is needed.
List<HeapSnapshot*> snapshots_;
StringsStorage names_;
// Mapping from HeapObject addresses to objects' uids.
HeapObjectsMap ids_;
+ AllocationTracker* allocation_tracker_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
};
@@ -426,6 +442,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
int EstimateObjectsCount(HeapIterator* iterator);
bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
void TagGlobalObjects();
+ void TagCodeObject(Code* code);
+ void TagCodeObject(Code* code, const char* external_name);
static String* GetConstructorName(JSObject* object);
@@ -628,7 +646,7 @@ class HeapSnapshotJSONSerializer {
public:
explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
: snapshot_(snapshot),
- strings_(ObjectsMatch),
+ strings_(StringsMatch),
next_node_id_(1),
next_string_id_(1),
writer_(NULL) {
@@ -636,14 +654,16 @@ class HeapSnapshotJSONSerializer {
void Serialize(v8::OutputStream* stream);
private:
- INLINE(static bool ObjectsMatch(void* key1, void* key2)) {
- return key1 == key2;
+ INLINE(static bool StringsMatch(void* key1, void* key2)) {
+ return strcmp(reinterpret_cast<char*>(key1),
+ reinterpret_cast<char*>(key2)) == 0;
}
- INLINE(static uint32_t ObjectHash(const void* key)) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)),
- v8::internal::kZeroHashSeed);
+ INLINE(static uint32_t StringHash(const void* string)) {
+ const char* s = reinterpret_cast<const char*>(string);
+ int len = static_cast<int>(strlen(s));
+ return StringHasher::HashSequentialString(
+ s, len, v8::internal::kZeroHashSeed);
}
int GetStringId(const char* s);
@@ -654,9 +674,11 @@ class HeapSnapshotJSONSerializer {
void SerializeNode(HeapEntry* entry);
void SerializeNodes();
void SerializeSnapshot();
+ void SerializeTraceTree();
+ void SerializeTraceNode(AllocationTraceNode* node);
+ void SerializeTraceNodeInfos();
void SerializeString(const unsigned char* s);
void SerializeStrings();
- void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
static const int kEdgeFieldsCount;
static const int kNodeFieldsCount;
@@ -677,4 +699,3 @@ class HeapSnapshotJSONSerializer {
} } // namespace v8::internal
#endif // V8_HEAP_SNAPSHOT_GENERATOR_H_
-
diff --git a/chromium/v8/src/heap.cc b/chromium/v8/src/heap.cc
index f4cc421b079..1e9091b30d8 100644
--- a/chromium/v8/src/heap.cc
+++ b/chromium/v8/src/heap.cc
@@ -67,33 +67,19 @@ namespace internal {
Heap::Heap()
: isolate_(NULL),
+ code_range_size_(kIs64BitArch ? 512 * MB : 0),
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
-#if V8_TARGET_ARCH_X64
-#define LUMP_OF_MEMORY (2 * MB)
- code_range_size_(512*MB),
-#else
-#define LUMP_OF_MEMORY MB
- code_range_size_(0),
-#endif
-#if defined(ANDROID) || V8_TARGET_ARCH_MIPS
- reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- initial_semispace_size_(Page::kPageSize),
- max_old_generation_size_(192*MB),
- max_executable_size_(max_old_generation_size_),
-#else
- reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
+ max_semispace_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(Page::kPageSize),
- max_old_generation_size_(700ul * LUMP_OF_MEMORY),
- max_executable_size_(256l * LUMP_OF_MEMORY),
-#endif
-
+ max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
+ max_executable_size_(256ul * (kPointerSize / 4) * MB),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
+ maximum_committed_(0),
survived_since_last_expansion_(0),
sweep_generation_(0),
always_allocate_scope_depth_(0),
@@ -127,10 +113,9 @@ Heap::Heap()
amount_of_external_allocated_memory_(0),
amount_of_external_allocated_memory_at_last_global_gc_(0),
old_gen_exhausted_(false),
+ inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
hidden_string_(NULL),
- global_gc_prologue_callback_(NULL),
- global_gc_epilogue_callback_(NULL),
gc_safe_size_of_old_object_(NULL),
total_regexp_code_generated_(0),
tracer_(NULL),
@@ -157,9 +142,11 @@ Heap::Heap()
mark_sweeps_since_idle_round_started_(0),
gc_count_at_last_idle_gc_(0),
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
+ full_codegen_bytes_generated_(0),
+ crankshaft_codegen_bytes_generated_(0),
gcs_since_last_deopt_(0),
#ifdef VERIFY_HEAP
- no_weak_embedded_maps_verification_scope_depth_(0),
+ no_weak_object_verification_scope_depth_(0),
#endif
promotion_queue_(this),
configured_(false),
@@ -172,6 +159,9 @@ Heap::Heap()
max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
#endif
+ // Ensure old_generation_size_ is a multiple of kPageSize.
+ ASSERT(MB >= Page::kPageSize);
+
intptr_t max_virtual = OS::MaxVirtualMemory();
if (max_virtual > 0) {
@@ -243,6 +233,16 @@ intptr_t Heap::CommittedMemoryExecutable() {
}
+void Heap::UpdateMaximumCommitted() {
+ if (!HasBeenSetUp()) return;
+
+ intptr_t current_committed_memory = CommittedMemory();
+ if (current_committed_memory > maximum_committed_) {
+ maximum_committed_ = current_committed_memory;
+ }
+}
+
+
intptr_t Heap::Available() {
if (!HasBeenSetUp()) return 0;
@@ -411,7 +411,7 @@ void Heap::PrintShortHeapStatistics() {
this->Available() / KB,
this->CommittedMemory() / KB);
PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
- amount_of_external_allocated_memory_ / KB);
+ static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
}
@@ -452,6 +452,8 @@ void Heap::GarbageCollectionPrologue() {
#endif
}
+ UpdateMaximumCommitted();
+
#ifdef DEBUG
ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
@@ -461,6 +463,10 @@ void Heap::GarbageCollectionPrologue() {
#endif // DEBUG
store_buffer()->GCPrologue();
+
+ if (isolate()->concurrent_osr_enabled()) {
+ isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
+ }
}
@@ -474,6 +480,20 @@ intptr_t Heap::SizeOfObjects() {
}
+void Heap::ClearAllICsByKind(Code::Kind kind) {
+ HeapObjectIterator it(code_space());
+
+ for (Object* object = it.Next(); object != NULL; object = it.Next()) {
+ Code* code = Code::cast(object);
+ Code::Kind current_kind = code->kind();
+ if (current_kind == Code::FUNCTION ||
+ current_kind == Code::OPTIMIZED_FUNCTION) {
+ code->ClearInlineCaches(kind);
+ }
+ }
+}
+
+
void Heap::RepairFreeListsAfterBoot() {
PagedSpaces spaces(this);
for (PagedSpace* space = spaces.next();
@@ -485,6 +505,40 @@ void Heap::RepairFreeListsAfterBoot() {
void Heap::GarbageCollectionEpilogue() {
+ if (FLAG_allocation_site_pretenuring) {
+ int tenure_decisions = 0;
+ int dont_tenure_decisions = 0;
+ int allocation_mementos_found = 0;
+
+ Object* cur = allocation_sites_list();
+ while (cur->IsAllocationSite()) {
+ AllocationSite* casted = AllocationSite::cast(cur);
+ allocation_mementos_found += casted->memento_found_count()->value();
+ if (casted->DigestPretenuringFeedback()) {
+ if (casted->GetPretenureMode() == TENURED) {
+ tenure_decisions++;
+ } else {
+ dont_tenure_decisions++;
+ }
+ }
+ cur = casted->weak_next();
+ }
+
+ // TODO(mvstanton): Pretenure decisions are only made once for an allocation
+ // site. Find a sane way to decide about revisiting the decision later.
+
+ if (FLAG_trace_track_allocation_sites &&
+ (allocation_mementos_found > 0 ||
+ tenure_decisions > 0 ||
+ dont_tenure_decisions > 0)) {
+ PrintF("GC: (#mementos, #tenure decisions, #donttenure decisions) "
+ "(%d, %d, %d)\n",
+ allocation_mementos_found,
+ tenure_decisions,
+ dont_tenure_decisions);
+ }
+ }
+
store_buffer()->GCEpilogue();
// In release mode, we only zap the from space under heap verification.
@@ -513,6 +567,8 @@ void Heap::GarbageCollectionEpilogue() {
}
}
+ UpdateMaximumCommitted();
+
isolate_->counters()->alive_after_last_gc()->Set(
static_cast<int>(SizeOfObjects()));
@@ -521,10 +577,31 @@ void Heap::GarbageCollectionEpilogue() {
isolate_->counters()->number_of_symbols()->Set(
string_table()->NumberOfElements());
+ if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
+ isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
+ static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
+ (crankshaft_codegen_bytes_generated_
+ + full_codegen_bytes_generated_)));
+ }
+
if (CommittedMemory() > 0) {
isolate_->counters()->external_fragmentation_total()->AddSample(
static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_new_space()->
+ AddSample(static_cast<int>(
+ (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
+ static_cast<int>(
+ (old_pointer_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_old_data_space()->AddSample(
+ static_cast<int>(
+ (old_data_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_code_space()->
+ AddSample(static_cast<int>(
+ (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_map_space()->AddSample(
static_cast<int>(
(map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
@@ -535,6 +612,9 @@ void Heap::GarbageCollectionEpilogue() {
AddSample(static_cast<int>(
(property_cell_space()->CommittedMemory() * 100.0) /
CommittedMemory()));
+ isolate_->counters()->heap_fraction_lo_space()->
+ AddSample(static_cast<int>(
+ (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_sample_total_committed()->AddSample(
static_cast<int>(CommittedMemory() / KB));
@@ -548,6 +628,11 @@ void Heap::GarbageCollectionEpilogue() {
heap_sample_property_cell_space_committed()->
AddSample(static_cast<int>(
property_cell_space()->CommittedMemory() / KB));
+ isolate_->counters()->heap_sample_code_space_committed()->AddSample(
+ static_cast<int>(code_space()->CommittedMemory() / KB));
+
+ isolate_->counters()->heap_sample_maximum_committed()->AddSample(
+ static_cast<int>(MaximumCommittedMemory() / KB));
}
#define UPDATE_COUNTERS_FOR_SPACE(space) \
@@ -610,6 +695,11 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
+ if (isolate()->concurrent_recompilation_enabled()) {
+ // The optimizing compiler may be unnecessarily holding on to memory.
+ DisallowHeapAllocation no_recursive_gc;
+ isolate()->optimizing_compiler_thread()->Flush();
+ }
mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
@@ -706,7 +796,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
int Heap::NotifyContextDisposed() {
- if (FLAG_concurrent_recompilation) {
+ if (isolate()->concurrent_recompilation_enabled()) {
// Flush the queued recompilation tasks.
isolate()->optimizing_compiler_thread()->Flush();
}
@@ -784,9 +874,7 @@ static bool AbortIncrementalMarkingAndCollectGarbage(
}
-void Heap::ReserveSpace(
- int *sizes,
- Address *locations_out) {
+void Heap::ReserveSpace(int *sizes, Address *locations_out) {
bool gc_performed = true;
int counter = 0;
static const int kThreshold = 20;
@@ -884,6 +972,8 @@ void Heap::ClearNormalizedMapCaches() {
void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
+ if (start_new_space_size == 0) return;
+
double survival_rate =
(static_cast<double>(young_survivors_after_last_gc_) * 100) /
start_new_space_size;
@@ -1056,12 +1146,17 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
- global_gc_prologue_callback_();
- }
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_type & gc_prologue_callbacks_[i].gc_type) {
- gc_prologue_callbacks_[i].callback(gc_type, flags);
+ if (!gc_prologue_callbacks_[i].pass_isolate_) {
+ v8::GCPrologueCallback callback =
+ reinterpret_cast<v8::GCPrologueCallback>(
+ gc_prologue_callbacks_[i].callback);
+ callback(gc_type, flags);
+ } else {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+ gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
+ }
}
}
}
@@ -1070,12 +1165,18 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
- gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
+ if (!gc_epilogue_callbacks_[i].pass_isolate_) {
+ v8::GCPrologueCallback callback =
+ reinterpret_cast<v8::GCPrologueCallback>(
+ gc_epilogue_callbacks_[i].callback);
+ callback(gc_type, kNoGCCallbackFlags);
+ } else {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+ gc_epilogue_callbacks_[i].callback(
+ isolate, gc_type, kNoGCCallbackFlags);
+ }
}
}
- if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
- global_gc_epilogue_callback_();
- }
}
@@ -1709,6 +1810,8 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
mark_compact_collector()->is_compacting();
ProcessArrayBuffers(retainer, record_slots);
ProcessNativeContexts(retainer, record_slots);
+ // TODO(mvstanton): AllocationSites only need to be processed during
+ // MARK_COMPACT, as they live in old space. Verify and address.
ProcessAllocationSites(retainer, record_slots);
}
@@ -1814,7 +1917,7 @@ struct WeakListVisitor<AllocationSite> {
}
static void VisitLiveObject(Heap* heap,
- AllocationSite* array_buffer,
+ AllocationSite* site,
WeakObjectRetainer* retainer,
bool record_slots) {}
@@ -1946,6 +2049,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
+STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0);
INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
@@ -2090,8 +2194,12 @@ class ScavengingVisitor : public StaticVisitorBase {
if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
// Update NewSpace stats if necessary.
RecordCopiedObject(heap, target);
- HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Isolate* isolate = heap->isolate();
+ HeapProfiler* heap_profiler = isolate->heap_profiler();
+ if (heap_profiler->is_tracking_object_moves()) {
+ heap_profiler->ObjectMoveEvent(source->address(), target->address(),
+ size);
+ }
if (isolate->logger()->is_logging_code_events() ||
isolate->cpu_profiler()->is_profiling()) {
if (target->IsSharedFunctionInfo()) {
@@ -2128,12 +2236,10 @@ class ScavengingVisitor : public StaticVisitorBase {
MaybeObject* maybe_result;
if (object_contents == DATA_OBJECT) {
- // TODO(mstarzinger): Turn this check into a regular assert soon!
- CHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
} else {
- // TODO(mstarzinger): Turn this check into a regular assert soon!
- CHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
}
@@ -2164,8 +2270,7 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
}
- // TODO(mstarzinger): Turn this check into a regular assert soon!
- CHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
+ ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
@@ -2342,7 +2447,7 @@ void Heap::SelectScavengingVisitorsTable() {
isolate()->logger()->is_logging() ||
isolate()->cpu_profiler()->is_profiling() ||
(isolate()->heap_profiler() != NULL &&
- isolate()->heap_profiler()->is_profiling());
+ isolate()->heap_profiler()->is_tracking_object_moves());
if (!incremental_marking()->IsMarking()) {
if (!logging_and_profiling) {
@@ -2391,7 +2496,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result;
- MaybeObject* maybe_result = AllocateRawMap();
+ MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
// Map::cast cannot be used due to uninitialized map field.
@@ -2405,7 +2510,7 @@ MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
reinterpret_cast<Map*>(result)->set_bit_field(0);
reinterpret_cast<Map*>(result)->set_bit_field2(0);
- int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
+ int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true);
reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
return result;
@@ -2416,7 +2521,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
int instance_size,
ElementsKind elements_kind) {
Object* result;
- MaybeObject* maybe_result = AllocateRawMap();
+ MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!maybe_result->To(&result)) return maybe_result;
Map* map = reinterpret_cast<Map*>(result);
@@ -2437,7 +2542,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_instance_descriptors(empty_descriptor_array());
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
- int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
+ int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true);
map->set_bit_field3(bit_field3);
map->set_elements_kind(elements_kind);
@@ -2649,6 +2754,12 @@ bool Heap::CreateInitialMaps() {
set_fixed_double_array_map(Map::cast(obj));
{ MaybeObject* maybe_obj =
+ AllocateMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_constant_pool_array_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj =
AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -2886,12 +2997,12 @@ bool Heap::CreateInitialMaps() {
MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
+ int size = HeapNumber::kSize;
STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -2901,26 +3012,12 @@ MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
}
-MaybeObject* Heap::AllocateHeapNumber(double value) {
- // Use general version, if we're forced to always allocate.
- if (always_allocate()) return AllocateHeapNumber(value, TENURED);
-
- // This version of AllocateHeapNumber is optimized for
- // allocation in new space.
- STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
- Object* result;
- { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
- HeapNumber::cast(result)->set_value(value);
- return result;
-}
-
-
MaybeObject* Heap::AllocateCell(Object* value) {
+ int size = Cell::kSize;
+ STATIC_ASSERT(Cell::kSize <= Page::kNonCodeObjectAreaSize);
+
Object* result;
- { MaybeObject* maybe_result = AllocateRawCell();
+ { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
@@ -2929,9 +3026,13 @@ MaybeObject* Heap::AllocateCell(Object* value) {
}
-MaybeObject* Heap::AllocatePropertyCell(Object* value) {
+MaybeObject* Heap::AllocatePropertyCell() {
+ int size = PropertyCell::kSize;
+ STATIC_ASSERT(PropertyCell::kSize <= Page::kNonCodeObjectAreaSize);
+
Object* result;
- MaybeObject* maybe_result = AllocateRawPropertyCell();
+ MaybeObject* maybe_result =
+ AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
HeapObject::cast(result)->set_map_no_write_barrier(
@@ -2939,10 +3040,8 @@ MaybeObject* Heap::AllocatePropertyCell(Object* value) {
PropertyCell* cell = PropertyCell::cast(result);
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
- cell->set_value(value);
+ cell->set_value(the_hole_value());
cell->set_type(Type::None());
- maybe_result = cell->SetValueInferType(value);
- if (maybe_result->IsFailure()) return maybe_result;
return result;
}
@@ -2957,17 +3056,16 @@ MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateAllocationSite() {
- Object* result;
+ AllocationSite* site;
MaybeObject* maybe_result = Allocate(allocation_site_map(),
OLD_POINTER_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- AllocationSite* site = AllocationSite::cast(result);
+ if (!maybe_result->To(&site)) return maybe_result;
site->Initialize();
// Link the site
site->set_weak_next(allocation_sites_list());
set_allocation_sites_list(site);
- return result;
+ return site;
}
@@ -3048,6 +3146,12 @@ void Heap::CreateFixedStubs() {
}
+void Heap::CreateStubsRequiringBuiltins() {
+ HandleScope scope(isolate());
+ CodeStub::GenerateStubsRequiringBuiltinsAheadOfTime(isolate());
+}
+
+
bool Heap::CreateInitialObjects() {
Object* obj;
@@ -3244,11 +3348,13 @@ bool Heap::CreateInitialObjects() {
{ MaybeObject* maybe_obj = AllocateSymbol();
if (!maybe_obj->ToObject(&obj)) return false;
}
+ Symbol::cast(obj)->set_is_private(true);
set_frozen_symbol(Symbol::cast(obj));
{ MaybeObject* maybe_obj = AllocateSymbol();
if (!maybe_obj->ToObject(&obj)) return false;
}
+ Symbol::cast(obj)->set_is_private(true);
set_elements_transition_symbol(Symbol::cast(obj));
{ MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
@@ -3260,6 +3366,7 @@ bool Heap::CreateInitialObjects() {
{ MaybeObject* maybe_obj = AllocateSymbol();
if (!maybe_obj->ToObject(&obj)) return false;
}
+ Symbol::cast(obj)->set_is_private(true);
set_observed_symbol(Symbol::cast(obj));
// Handling of script id generation is in Factory::NewScript.
@@ -3887,7 +3994,12 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
int length = end - start;
if (length <= 0) {
return empty_string();
- } else if (length == 1) {
+ }
+
+ // Make an attempt to flatten the buffer to reduce access time.
+ buffer = buffer->TryFlattenGetString();
+
+ if (length == 1) {
return LookupSingleCharacterStringFromCode(buffer->Get(start));
} else if (length == 2) {
// Optimization for 2-byte strings often used as keys in a decompression
@@ -3898,9 +4010,6 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
return MakeOrFindTwoCharacterString(this, c1, c2);
}
- // Make an attempt to flatten the buffer to reduce access time.
- buffer = buffer->TryFlattenGetString();
-
if (!FLAG_string_slices ||
!buffer->IsFlat() ||
length < SlicedString::kMinLength ||
@@ -4042,13 +4151,12 @@ MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
return result;
}
- Object* result;
+ SeqTwoByteString* result;
{ MaybeObject* maybe_result = AllocateRawTwoByteString(1);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ if (!maybe_result->To<SeqTwoByteString>(&result)) return maybe_result;
}
- String* answer = String::cast(result);
- answer->Set(0, code);
- return answer;
+ result->SeqTwoByteStringSet(0, code);
+ return result;
}
@@ -4056,31 +4164,8 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > ByteArray::kMaxLength) {
return Failure::OutOfMemoryException(0x7);
}
- if (pretenure == NOT_TENURED) {
- return AllocateByteArray(length);
- }
int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_DATA_SPACE;
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
- byte_array_map());
- reinterpret_cast<ByteArray*>(result)->set_length(length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateByteArray(int length) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException(0x8);
- }
- int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
{ MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -4111,11 +4196,10 @@ MaybeObject* Heap::AllocateExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ int size = ExternalArray::kAlignedSize;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
- space,
- OLD_DATA_SPACE);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -4133,7 +4217,8 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_reference,
bool immovable,
- bool crankshafted) {
+ bool crankshafted,
+ int prologue_offset) {
// Allocate ByteArray before the Code object, so that we do not risk
// leaving uninitialized Code object (and breaking the heap).
ByteArray* reloc_info;
@@ -4152,7 +4237,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
if (force_lo_space) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
- maybe_result = code_space_->AllocateRaw(obj_size);
+ maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
}
if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
@@ -4174,19 +4259,29 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(reloc_info);
code->set_flags(flags);
+ code->set_raw_kind_specific_flags1(0);
+ code->set_raw_kind_specific_flags2(0);
if (code->is_call_stub() || code->is_keyed_call_stub()) {
code->set_check_type(RECEIVER_MAP_CHECK);
}
code->set_is_crankshafted(crankshafted);
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
+ code->set_raw_type_feedback_info(undefined_value());
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
- code->set_prologue_offset(kPrologueOffsetNotSet);
+ code->set_prologue_offset(prologue_offset);
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
}
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (code->kind() == Code::FUNCTION) {
+ code->set_has_debug_break_slots(
+ isolate_->debugger()->IsDebuggerActive());
+ }
+#endif
+
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -4215,7 +4310,7 @@ MaybeObject* Heap::CopyCode(Code* code) {
if (obj_size > code_space()->AreaSize()) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
- maybe_result = code_space_->AllocateRaw(obj_size);
+ maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
}
Object* result;
@@ -4258,7 +4353,7 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
if (new_obj_size > code_space()->AreaSize()) {
maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
} else {
- maybe_result = code_space_->AllocateRaw(new_obj_size);
+ maybe_result = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
}
Object* result;
@@ -4292,6 +4387,17 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
}
+void Heap::InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site) {
+ memento->set_map_no_write_barrier(allocation_memento_map());
+ ASSERT(allocation_site->map() == allocation_site_map());
+ memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
+ if (FLAG_allocation_site_pretenuring) {
+ allocation_site->IncrementMementoCreateCount();
+ }
+}
+
+
MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
Handle<AllocationSite> allocation_site) {
ASSERT(gc_state_ == NOT_IN_GC);
@@ -4308,8 +4414,7 @@ MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
HeapObject::cast(result)->set_map_no_write_barrier(map);
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
+ InitializeAllocationMemento(alloc_memento, *allocation_site);
return result;
}
@@ -4346,39 +4451,6 @@ void Heap::InitializeFunction(JSFunction* function,
}
-MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
- // Make sure to use globals from the function's context, since the function
- // can be from a different context.
- Context* native_context = function->context()->native_context();
- Map* new_map;
- if (function->shared()->is_generator()) {
- // Generator prototypes can share maps since they don't have "constructor"
- // properties.
- new_map = native_context->generator_object_prototype_map();
- } else {
- // Each function prototype gets a fresh map to avoid unwanted sharing of
- // maps between prototypes of different constructors.
- JSFunction* object_function = native_context->object_function();
- ASSERT(object_function->has_initial_map());
- MaybeObject* maybe_map = object_function->initial_map()->Copy();
- if (!maybe_map->To(&new_map)) return maybe_map;
- }
-
- Object* prototype;
- MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
-
- if (!function->shared()->is_generator()) {
- MaybeObject* maybe_failure =
- JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributesTrampoline(
- constructor_string(), function, DONT_ENUM);
- if (maybe_failure->IsFailure()) return maybe_failure;
- }
-
- return prototype;
-}
-
-
MaybeObject* Heap::AllocateFunction(Map* function_map,
SharedFunctionInfo* shared,
Object* prototype,
@@ -4413,10 +4485,6 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
arguments_object_size = kArgumentsObjectSize;
}
- // This calls Copy directly rather than using Heap::AllocateRaw so we
- // duplicate the check here.
- ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
-
// Check that the size of the boilerplate matches our
// expectations. The ArgumentsAccessStub::GenerateNewObject relies
// on the size being a known constant.
@@ -4454,48 +4522,6 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
}
-MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
- ASSERT(!fun->has_initial_map());
-
- // First create a new map with the size and number of in-object properties
- // suggested by the function.
- InstanceType instance_type;
- int instance_size;
- int in_object_properties;
- if (fun->shared()->is_generator()) {
- instance_type = JS_GENERATOR_OBJECT_TYPE;
- instance_size = JSGeneratorObject::kSize;
- in_object_properties = 0;
- } else {
- instance_type = JS_OBJECT_TYPE;
- instance_size = fun->shared()->CalculateInstanceSize();
- in_object_properties = fun->shared()->CalculateInObjectProperties();
- }
- Map* map;
- MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
- if (!maybe_map->To(&map)) return maybe_map;
-
- // Fetch or allocate prototype.
- Object* prototype;
- if (fun->has_instance_prototype()) {
- prototype = fun->instance_prototype();
- } else {
- MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
- if (!maybe_prototype->To(&prototype)) return maybe_prototype;
- }
- map->set_inobject_properties(in_object_properties);
- map->set_unused_property_fields(in_object_properties);
- map->set_prototype(prototype);
- ASSERT(map->has_fast_object_elements());
-
- if (!fun->shared()->is_generator()) {
- fun->shared()->StartInobjectSlackTracking(map);
- }
-
- return map;
-}
-
-
void Heap::InitializeJSObjectFromMap(JSObject* obj,
FixedArray* properties,
Map* map) {
@@ -4552,9 +4578,8 @@ MaybeObject* Heap::AllocateJSObjectFromMap(
}
// Allocate the JSObject.
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
+ int size = map->instance_size();
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
Object* obj;
MaybeObject* maybe_obj = Allocate(map, space);
if (!maybe_obj->To(&obj)) return maybe_obj;
@@ -4587,8 +4612,8 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
}
// Allocate the JSObject.
- AllocationSpace space = NEW_SPACE;
- if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
+ int size = map->instance_size();
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
Object* obj;
MaybeObject* maybe_obj =
AllocateWithAllocationSite(map, space, allocation_site);
@@ -4603,15 +4628,7 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
PretenureFlag pretenure) {
- // Allocate the initial map if absent.
- if (!constructor->has_initial_map()) {
- Object* initial_map;
- { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
- if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
- }
- constructor->set_initial_map(Map::cast(initial_map));
- Map::cast(initial_map)->set_constructor(constructor);
- }
+ ASSERT(constructor->has_initial_map());
// Allocate the object based on the constructors initial map.
MaybeObject* result = AllocateJSObjectFromMap(
constructor->initial_map(), pretenure);
@@ -4626,21 +4643,12 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
Handle<AllocationSite> allocation_site) {
- // Allocate the initial map if absent.
- if (!constructor->has_initial_map()) {
- Object* initial_map;
- { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
- if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
- }
- constructor->set_initial_map(Map::cast(initial_map));
- Map::cast(initial_map)->set_constructor(constructor);
- }
+ ASSERT(constructor->has_initial_map());
// Allocate the object based on the constructors initial map, or the payload
// advice
Map* initial_map = constructor->initial_map();
- Smi* smi = Smi::cast(allocation_site->transition_info());
- ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
+ ElementsKind to_kind = allocation_site->GetElementsKind();
AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
if (to_kind != initial_map->elements_kind()) {
MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
@@ -4666,23 +4674,6 @@ MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
}
-MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
- ASSERT(function->shared()->is_generator());
- Map *map;
- if (function->has_initial_map()) {
- map = function->initial_map();
- } else {
- // Allocate the initial map if absent.
- MaybeObject* maybe_map = AllocateInitialMap(function);
- if (!maybe_map->To(&map)) return maybe_map;
- function->set_initial_map(map);
- map->set_constructor(function);
- }
- ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
- return AllocateJSObjectFromMap(map);
-}
-
-
MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
// Allocate a fresh map. Modules do not have a prototype.
Map* map;
@@ -4744,20 +4735,6 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
}
-MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
- ElementsKind elements_kind,
- int length,
- int capacity,
- Handle<AllocationSite> allocation_site,
- ArrayStorageAllocationMode mode) {
- MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
- allocation_site);
- JSArray* array;
- if (!maybe_array->To(&array)) return maybe_array;
- return AllocateJSArrayStorage(array, length, capacity, mode);
-}
-
-
MaybeObject* Heap::AllocateJSArrayStorage(
JSArray* array,
int length,
@@ -4860,74 +4837,7 @@ MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
}
-MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
- ASSERT(constructor->has_initial_map());
- Map* map = constructor->initial_map();
- ASSERT(map->is_dictionary_map());
-
- // Make sure no field properties are described in the initial map.
- // This guarantees us that normalizing the properties does not
- // require us to change property values to PropertyCells.
- ASSERT(map->NextFreePropertyIndex() == 0);
-
- // Make sure we don't have a ton of pre-allocated slots in the
- // global objects. They will be unused once we normalize the object.
- ASSERT(map->unused_property_fields() == 0);
- ASSERT(map->inobject_properties() == 0);
-
- // Initial size of the backing store to avoid resize of the storage during
- // bootstrapping. The size differs between the JS global object ad the
- // builtins object.
- int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
-
- // Allocate a dictionary object for backing storage.
- NameDictionary* dictionary;
- MaybeObject* maybe_dictionary =
- NameDictionary::Allocate(
- this,
- map->NumberOfOwnDescriptors() * 2 + initial_size);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
-
- // The global object might be created from an object template with accessors.
- // Fill these accessors into the dictionary.
- DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
- PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
- Object* value = descs->GetCallbacksObject(i);
- MaybeObject* maybe_value = AllocatePropertyCell(value);
- if (!maybe_value->ToObject(&value)) return maybe_value;
-
- MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_added->To(&dictionary)) return maybe_added;
- }
-
- // Allocate the global object and initialize it with the backing store.
- JSObject* global;
- MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
- if (!maybe_global->To(&global)) return maybe_global;
-
- InitializeJSObjectFromMap(global, dictionary, map);
-
- // Create a new map for the global object.
- Map* new_map;
- MaybeObject* maybe_map = map->CopyDropDescriptors();
- if (!maybe_map->To(&new_map)) return maybe_map;
- new_map->set_dictionary_map(true);
-
- // Set up the global object as a normalized object.
- global->set_map(new_map);
- global->set_properties(dictionary);
-
- // Make sure result is a global object with properties in dictionary.
- ASSERT(global->IsGlobalObject());
- ASSERT(!global->HasFastProperties());
- return global;
-}
-
-
-MaybeObject* Heap::CopyJSObject(JSObject* source) {
+MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// Never used to copy functions. If functions need to be copied we
// have to be careful to clear the literals array.
SLOW_ASSERT(!source->IsJSFunction());
@@ -4937,6 +4847,8 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
int object_size = map->instance_size();
Object* clone;
+ ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
+
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
// If we're forced to always allocate, we use the general allocation
@@ -4957,7 +4869,11 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
} else {
wb_mode = SKIP_WRITE_BARRIER;
- { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
+ { int adjusted_object_size = site != NULL
+ ? object_size + AllocationMemento::kSize
+ : object_size;
+ MaybeObject* maybe_clone =
+ AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
SLOW_ASSERT(InNewSpace(clone));
@@ -4966,115 +4882,12 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
CopyBlock(HeapObject::cast(clone)->address(),
source->address(),
object_size);
- }
- SLOW_ASSERT(
- JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
- FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
- FixedArray* properties = FixedArray::cast(source->properties());
- // Update elements if necessary.
- if (elements->length() > 0) {
- Object* elem;
- { MaybeObject* maybe_elem;
- if (elements->map() == fixed_cow_array_map()) {
- maybe_elem = FixedArray::cast(elements);
- } else if (source->HasFastDoubleElements()) {
- maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
- } else {
- maybe_elem = CopyFixedArray(FixedArray::cast(elements));
- }
- if (!maybe_elem->ToObject(&elem)) return maybe_elem;
+ if (site != NULL) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(clone) + object_size);
+ InitializeAllocationMemento(alloc_memento, site);
}
- JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
- }
- // Update properties if necessary.
- if (properties->length() > 0) {
- Object* prop;
- { MaybeObject* maybe_prop = CopyFixedArray(properties);
- if (!maybe_prop->ToObject(&prop)) return maybe_prop;
- }
- JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
- }
- // Return the new clone.
- return clone;
-}
-
-
-MaybeObject* Heap::CopyJSObjectWithAllocationSite(
- JSObject* source,
- AllocationSite* site) {
- // Never used to copy functions. If functions need to be copied we
- // have to be careful to clear the literals array.
- SLOW_ASSERT(!source->IsJSFunction());
-
- // Make the clone.
- Map* map = source->map();
- int object_size = map->instance_size();
- Object* clone;
-
- ASSERT(AllocationSite::CanTrack(map->instance_type()));
- ASSERT(map->instance_type() == JS_ARRAY_TYPE);
- WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
-
- // If we're forced to always allocate, we use the general allocation
- // functions which may leave us with an object in old space.
- int adjusted_object_size = object_size;
- if (always_allocate()) {
- // We'll only track origin if we are certain to allocate in new space
- const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
- if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
- adjusted_object_size += AllocationMemento::kSize;
- }
-
- { MaybeObject* maybe_clone =
- AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- Address clone_address = HeapObject::cast(clone)->address();
- CopyBlock(clone_address,
- source->address(),
- object_size);
- // Update write barrier for all fields that lie beyond the header.
- int write_barrier_offset = adjusted_object_size > object_size
- ? JSArray::kSize + AllocationMemento::kSize
- : JSObject::kHeaderSize;
- if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
- RecordWrites(clone_address,
- write_barrier_offset,
- (object_size - write_barrier_offset) / kPointerSize);
- }
-
- // Track allocation site information, if we failed to allocate it inline.
- if (InNewSpace(clone) &&
- adjusted_object_size == object_size) {
- MaybeObject* maybe_alloc_memento =
- AllocateStruct(ALLOCATION_MEMENTO_TYPE);
- AllocationMemento* alloc_memento;
- if (maybe_alloc_memento->To(&alloc_memento)) {
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
- }
- }
- } else {
- wb_mode = SKIP_WRITE_BARRIER;
- adjusted_object_size += AllocationMemento::kSize;
-
- { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- SLOW_ASSERT(InNewSpace(clone));
- // Since we know the clone is allocated in new space, we can copy
- // the contents without worrying about updating the write barrier.
- CopyBlock(HeapObject::cast(clone)->address(),
- source->address(),
- object_size);
- }
-
- if (adjusted_object_size > object_size) {
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(clone) + object_size);
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
}
SLOW_ASSERT(
@@ -5197,7 +5010,7 @@ MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
- PretenureFlag pretenure) {
+ PretenureFlag pretenure) {
int length = string.length();
if (length == 1) {
return Heap::LookupSingleCharacterStringFromCode(string[0]);
@@ -5365,12 +5178,11 @@ MaybeObject* Heap::AllocateInternalizedStringImpl(
map = internalized_string_map();
size = SeqTwoByteString::SizeFor(chars);
}
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5409,16 +5221,10 @@ MaybeObject* Heap::AllocateRawOneByteString(int length,
}
int size = SeqOneByteString::SizeFor(length);
ASSERT(size <= SeqOneByteString::kMaxSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5439,16 +5245,10 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
}
int size = SeqTwoByteString::SizeFor(length);
ASSERT(size <= SeqTwoByteString::kMaxSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5473,24 +5273,6 @@ MaybeObject* Heap::AllocateJSArray(
}
-MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site) {
- Context* native_context = isolate()->context()->native_context();
- JSFunction* array_function = native_context->array_function();
- Map* map = array_function->initial_map();
- Object* maybe_map_array = native_context->js_array_maps();
- if (!maybe_map_array->IsUndefined()) {
- Object* maybe_transitioned_map =
- FixedArray::cast(maybe_map_array)->get(elements_kind);
- if (!maybe_transitioned_map->IsUndefined()) {
- map = Map::cast(maybe_transitioned_map);
- }
- }
- return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
-}
-
-
MaybeObject* Heap::AllocateEmptyFixedArray() {
int size = FixedArray::SizeFor(0);
Object* result;
@@ -5511,25 +5293,10 @@ MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
}
-MaybeObject* Heap::AllocateRawFixedArray(int length) {
- if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException(0xd);
- }
- ASSERT(length > 0);
- // Use the general function if we're forced to always allocate.
- if (always_allocate()) return AllocateFixedArray(length, TENURED);
- // Allocate the raw data for a fixed array.
- int size = FixedArray::SizeFor(length);
- return size <= Page::kMaxNonCodeHeapObjectSize
- ? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
-}
-
-
MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
int len = src->length();
Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
+ { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
if (InNewSpace(obj)) {
@@ -5569,21 +5336,24 @@ MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
}
-MaybeObject* Heap::AllocateFixedArray(int length) {
- ASSERT(length >= 0);
- if (length == 0) return empty_fixed_array();
- Object* result;
- { MaybeObject* maybe_result = AllocateRawFixedArray(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
+ Map* map) {
+ int int64_entries = src->count_of_int64_entries();
+ int ptr_entries = src->count_of_ptr_entries();
+ int int32_entries = src->count_of_int32_entries();
+ Object* obj;
+ { MaybeObject* maybe_obj =
+ AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- // Initialize header.
- FixedArray* array = reinterpret_cast<FixedArray*>(result);
- array->set_map_no_write_barrier(fixed_array_map());
- array->set_length(length);
- // Initialize body.
- ASSERT(!InNewSpace(undefined_value()));
- MemsetPointer(array->data_start(), undefined_value(), length);
- return result;
+ HeapObject* dst = HeapObject::cast(obj);
+ dst->set_map_no_write_barrier(map);
+ CopyBlock(
+ dst->address() + ConstantPoolArray::kLengthOffset,
+ src->address() + ConstantPoolArray::kLengthOffset,
+ ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
+ - ConstantPoolArray::kLengthOffset);
+ return obj;
}
@@ -5592,35 +5362,26 @@ MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
return Failure::OutOfMemoryException(0xe);
}
int size = FixedArray::SizeFor(length);
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_POINTER_SPACE;
-
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
- return AllocateRaw(size, space, retry_space);
+ return AllocateRaw(size, space, OLD_POINTER_SPACE);
}
-MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
- Heap* heap,
- int length,
- PretenureFlag pretenure,
- Object* filler) {
+MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
+ PretenureFlag pretenure,
+ Object* filler) {
ASSERT(length >= 0);
- ASSERT(heap->empty_fixed_array()->IsFixedArray());
- if (length == 0) return heap->empty_fixed_array();
+ ASSERT(empty_fixed_array()->IsFixedArray());
+ if (length == 0) return empty_fixed_array();
- ASSERT(!heap->InNewSpace(filler));
+ ASSERT(!InNewSpace(filler));
Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
+ { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@@ -5629,19 +5390,13 @@ MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(this,
- length,
- pretenure,
- undefined_value());
+ return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
}
MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(this,
- length,
- pretenure,
- the_hole_value());
+ return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
}
@@ -5649,7 +5404,7 @@ MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
if (length == 0) return empty_fixed_array();
Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
+ { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -5719,24 +5474,52 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
return Failure::OutOfMemoryException(0xf);
}
int size = FixedDoubleArray::SizeFor(length);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
#ifndef V8_HOST_ARCH_64_BIT
size += kPointerSize;
#endif
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
+ HeapObject* object;
+ { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
}
+ return EnsureDoubleAligned(this, object, size);
+}
+
+
+MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
+ number_of_int32_entries > 0);
+ int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries);
+#ifndef V8_HOST_ARCH_64_BIT
+ size += kPointerSize;
+#endif
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
+
HeapObject* object;
- { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
}
+ object = EnsureDoubleAligned(this, object, size);
+ HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map());
- return EnsureDoubleAligned(this, object, size);
+ ConstantPoolArray* constant_pool =
+ reinterpret_cast<ConstantPoolArray*>(object);
+ constant_pool->SetEntryCounts(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries);
+ MemsetPointer(
+ HeapObject::RawField(
+ constant_pool,
+ constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
+ undefined_value(),
+ number_of_ptr_entries);
+ return constant_pool;
}
@@ -5775,12 +5558,22 @@ MaybeObject* Heap::AllocateSymbol() {
Symbol::cast(result)->set_hash_field(
Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
Symbol::cast(result)->set_name(undefined_value());
+ Symbol::cast(result)->set_flags(Smi::FromInt(0));
- ASSERT(result->IsSymbol());
+ ASSERT(!Symbol::cast(result)->is_private());
return result;
}
+MaybeObject* Heap::AllocatePrivateSymbol() {
+ MaybeObject* maybe = AllocateSymbol();
+ Symbol* symbol;
+ if (!maybe->To(&symbol)) return maybe;
+ symbol->set_is_private(true);
+ return symbol;
+}
+
+
MaybeObject* Heap::AllocateNativeContext() {
Object* result;
{ MaybeObject* maybe_result =
@@ -5936,8 +5729,7 @@ STRUCT_LIST(MAKE_CASE)
return Failure::InternalError();
}
int size = map->instance_size();
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
Object* result;
{ MaybeObject* maybe_result = Allocate(map, space);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -6820,7 +6612,15 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
}
-intptr_t Heap::PromotedExternalMemorySize() {
+bool Heap::AdvanceSweepers(int step_size) {
+ ASSERT(isolate()->num_sweeper_threads() == 0);
+ bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
+ sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
+ return sweeping_complete;
+}
+
+
+int64_t Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
return amount_of_external_allocated_memory_
@@ -6828,6 +6628,32 @@ intptr_t Heap::PromotedExternalMemorySize() {
}
+void Heap::EnableInlineAllocation() {
+ ASSERT(inline_allocation_disabled_);
+ inline_allocation_disabled_ = false;
+
+ // Update inline allocation limit for new space.
+ new_space()->UpdateInlineAllocationLimit(0);
+}
+
+
+void Heap::DisableInlineAllocation() {
+ ASSERT(!inline_allocation_disabled_);
+ inline_allocation_disabled_ = true;
+
+ // Update inline allocation limit for new space.
+ new_space()->UpdateInlineAllocationLimit(0);
+
+ // Update inline allocation limit for old spaces.
+ PagedSpaces spaces(this);
+ for (PagedSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
+ space->EmptyAllocationInfo();
+ }
+}
+
+
V8_DECLARE_ONCE(initialize_gc_once);
static void InitializeGCOnce() {
@@ -6940,9 +6766,6 @@ bool Heap::SetUp() {
store_buffer()->SetUp();
if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
-#ifdef DEBUG
- relocation_mutex_locked_by_optimizer_thread_ = false;
-#endif // DEBUG
return true;
}
@@ -6959,6 +6782,7 @@ bool Heap::CreateHeapObjects() {
native_contexts_list_ = undefined_value();
array_buffers_list_ = undefined_value();
allocation_sites_list_ = undefined_value();
+ weak_object_to_code_table_ = undefined_value();
return true;
}
@@ -6987,6 +6811,8 @@ void Heap::TearDown() {
}
#endif
+ UpdateMaximumCommitted();
+
if (FLAG_print_cumulative_gc_stat) {
PrintF("\n");
PrintF("gc_count=%d ", gc_count_);
@@ -7001,6 +6827,31 @@ void Heap::TearDown() {
PrintF("\n\n");
}
+ if (FLAG_print_max_heap_committed) {
+ PrintF("\n");
+ PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
+ MaximumCommittedMemory());
+ PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
+ new_space_.MaximumCommittedMemory());
+ PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
+ old_data_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
+ old_pointer_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
+ old_pointer_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
+ code_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
+ map_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
+ cell_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
+ property_cell_space_->MaximumCommittedMemory());
+ PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
+ lo_space_->MaximumCommittedMemory());
+ PrintF("\n\n");
+ }
+
TearDownArrayBuffers();
isolate_->global_handles()->TearDown();
@@ -7059,18 +6910,21 @@ void Heap::TearDown() {
isolate_->memory_allocator()->TearDown();
delete relocation_mutex_;
+ relocation_mutex_ = NULL;
}
-void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
+void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate) {
ASSERT(callback != NULL);
- GCPrologueCallbackPair pair(callback, gc_type);
+ GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
ASSERT(!gc_prologue_callbacks_.Contains(pair));
return gc_prologue_callbacks_.Add(pair);
}
-void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
ASSERT(callback != NULL);
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_prologue_callbacks_[i].callback == callback) {
@@ -7082,15 +6936,17 @@ void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
}
-void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
+void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+ GCType gc_type,
+ bool pass_isolate) {
ASSERT(callback != NULL);
- GCEpilogueCallbackPair pair(callback, gc_type);
+ GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
ASSERT(!gc_epilogue_callbacks_.Contains(pair));
return gc_epilogue_callbacks_.Add(pair);
}
-void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
ASSERT(callback != NULL);
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_epilogue_callbacks_[i].callback == callback) {
@@ -7102,6 +6958,37 @@ void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
}
+MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj,
+ DependentCode* dep) {
+ ASSERT(!InNewSpace(obj));
+ ASSERT(!InNewSpace(dep));
+ MaybeObject* maybe_obj =
+ WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
+ WeakHashTable* table;
+ if (!maybe_obj->To(&table)) return maybe_obj;
+ if (ShouldZapGarbage() && weak_object_to_code_table_ != table) {
+ WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
+ }
+ set_weak_object_to_code_table(table);
+ ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
+ return weak_object_to_code_table_;
+}
+
+
+DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
+ Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
+ if (dep->IsDependentCode()) return DependentCode::cast(dep);
+ return DependentCode::cast(empty_fixed_array());
+}
+
+
+void Heap::EnsureWeakObjectToCodeTable() {
+ if (!weak_object_to_code_table()->IsHashTable()) {
+ set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
+ }
+}
+
+
#ifdef DEBUG
class PrintHandleVisitor: public ObjectVisitor {
@@ -7958,7 +7845,13 @@ void ExternalStringTable::CleanUp() {
void ExternalStringTable::TearDown() {
+ for (int i = 0; i < new_space_strings_.length(); ++i) {
+ heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
+ }
new_space_strings_.Free();
+ for (int i = 0; i < old_space_strings_.length(); ++i) {
+ heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
+ }
old_space_strings_.Free();
}
@@ -8084,21 +7977,23 @@ void Heap::CheckpointObjectStats() {
static_cast<int>(object_sizes_last_time_[index]));
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = \
+ FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
+ counters->count_of_CODE_AGE_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_CODE_AGE_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_CODE_AGE_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_CODE_AGE_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[index]));
+ CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
ClearObjectStats();
}
-
-Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
- if (FLAG_concurrent_recompilation) {
- heap_->relocation_mutex_->Lock();
-#ifdef DEBUG
- heap_->relocation_mutex_locked_by_optimizer_thread_ =
- heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
-#endif // DEBUG
- }
-}
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/heap.h b/chromium/v8/src/heap.h
index 4dfa076ebd7..1c8e0e16e60 100644
--- a/chromium/v8/src/heap.h
+++ b/chromium/v8/src/heap.h
@@ -71,6 +71,7 @@ namespace internal {
V(Map, scope_info_map, ScopeInfoMap) \
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
+ V(Map, constant_pool_array_map, ConstantPoolArrayMap) \
V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Map, hash_table_map, HashTableMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
@@ -208,8 +209,10 @@ namespace internal {
V(Boolean_string, "Boolean") \
V(callee_string, "callee") \
V(constructor_string, "constructor") \
- V(result_string, ".result") \
+ V(dot_result_string, ".result") \
V(dot_for_string, ".for.") \
+ V(dot_iterator_string, ".iterator") \
+ V(dot_generator_object_string, ".generator_object") \
V(eval_string, "eval") \
V(empty_string, "") \
V(function_string, "function") \
@@ -292,7 +295,10 @@ namespace internal {
V(throw_string, "throw") \
V(done_string, "done") \
V(value_string, "value") \
- V(next_string, "next")
+ V(next_string, "next") \
+ V(byte_length_string, "byteLength") \
+ V(byte_offset_string, "byteOffset") \
+ V(buffer_string, "buffer")
// Forward declarations.
class GCTracer;
@@ -529,6 +535,13 @@ class Heap {
// Returns the amount of phyical memory currently committed for the heap.
size_t CommittedPhysicalMemory();
+ // Returns the maximum amount of memory ever committed for the heap.
+ intptr_t MaximumCommittedMemory() { return maximum_committed_; }
+
+ // Updates the maximum committed memory for the heap. Should be called
+ // whenever a space grows.
+ void UpdateMaximumCommitted();
+
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
@@ -604,9 +617,6 @@ class Heap {
return old_data_space_->allocation_limit_address();
}
- // Uncommit unused semi space.
- bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
-
// Allocates and initializes a new JavaScript object based on a
// constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -620,9 +630,6 @@ class Heap {
JSFunction* constructor,
Handle<AllocationSite> allocation_site);
- MUST_USE_RESULT MaybeObject* AllocateJSGeneratorObject(
- JSFunction* function);
-
MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
ScopeInfo* scope_info);
@@ -635,10 +642,6 @@ class Heap {
pretenure);
}
- inline MUST_USE_RESULT MaybeObject* AllocateEmptyJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site);
-
// Allocate a JSArray with a specified length but elements that are left
// uninitialized.
MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage(
@@ -648,13 +651,6 @@ class Heap {
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorageWithAllocationSite(
- ElementsKind elements_kind,
- int length,
- int capacity,
- Handle<AllocationSite> allocation_site,
- ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
-
MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage(
JSArray* array,
int length,
@@ -668,25 +664,12 @@ class Heap {
int length,
PretenureFlag pretenure = NOT_TENURED);
- // Allocates and initializes a new global object based on a constructor.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateGlobalObject(JSFunction* constructor);
-
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Returns failure if allocation failed.
- MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source);
-
- MUST_USE_RESULT MaybeObject* CopyJSObjectWithAllocationSite(
- JSObject* source, AllocationSite* site);
-
- // Allocates the function prototype.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function);
+ // Optionally takes an AllocationSite to be appended in an AllocationMemento.
+ MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source,
+ AllocationSite* site = NULL);
// Allocates a JS ArrayBuffer object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -754,9 +737,6 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
int instance_size);
- // Allocate a map for the specified function
- MUST_USE_RESULT MaybeObject* AllocateInitialMap(JSFunction* fun);
-
// Allocates an empty code cache.
MUST_USE_RESULT MaybeObject* AllocateCodeCache();
@@ -781,6 +761,9 @@ class Heap {
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
+ // Iterates the whole code space to clear all ICs of the given kind.
+ void ClearAllICsByKind(Code::Kind kind);
+
// For use during bootup.
void RepairFreeListsAfterBoot();
@@ -876,14 +859,9 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(int length,
- PretenureFlag pretenure);
-
- // Allocate a non-tenured byte array of the specified length
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateByteArray(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates an external array of the specified length and type.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -900,22 +878,7 @@ class Heap {
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateSymbol();
-
- // Allocate a tenured simple cell.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateCell(Object* value);
-
- // Allocate a tenured JS global property cell.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocatePropertyCell(Object* value);
-
- // Allocate Box.
- MUST_USE_RESULT MaybeObject* AllocateBox(Object* value,
- PretenureFlag pretenure);
+ MUST_USE_RESULT MaybeObject* AllocatePrivateSymbol();
// Allocate a tenured AllocationSite. It's payload is null
MUST_USE_RESULT MaybeObject* AllocateAllocationSite();
@@ -924,10 +887,9 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length,
- PretenureFlag pretenure);
- // Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateFixedArray(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates an uninitialized fixed array. It must be filled by the caller.
//
@@ -958,6 +920,16 @@ class Heap {
MUST_USE_RESULT MaybeObject* CopyFixedDoubleArrayWithMap(
FixedDoubleArray* src, Map* map);
+ // Make a copy of src and return it. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT inline MaybeObject* CopyConstantPoolArray(
+ ConstantPoolArray* src);
+
+ // Make a copy of src, set the map, and return the copy. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ MUST_USE_RESULT MaybeObject* CopyConstantPoolArrayWithMap(
+ ConstantPoolArray* src, Map* map);
+
// Allocates a fixed array initialized with the hole values.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -966,9 +938,10 @@ class Heap {
int length,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray(
- int length,
- PretenureFlag pretenure);
+ MUST_USE_RESULT MaybeObject* AllocateConstantPoolArray(
+ int first_int64_index,
+ int first_ptr_index,
+ int first_int32_index);
// Allocates a fixed double array with uninitialized values. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
@@ -1056,10 +1029,7 @@ class Heap {
// Allocated a HeapNumber from value.
MUST_USE_RESULT MaybeObject* AllocateHeapNumber(
- double value,
- PretenureFlag pretenure);
- // pretenure = NOT_TENURED
- MUST_USE_RESULT MaybeObject* AllocateHeapNumber(double value);
+ double value, PretenureFlag pretenure = NOT_TENURED);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -1153,11 +1123,13 @@ class Heap {
// self_reference. This allows generated code to reference its own Code
// object by containing this pointer.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* CreateCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference,
- bool immovable = false,
- bool crankshafted = false);
+ MUST_USE_RESULT MaybeObject* CreateCode(
+ const CodeDesc& desc,
+ Code::Flags flags,
+ Handle<Object> self_reference,
+ bool immovable = false,
+ bool crankshafted = false,
+ int prologue_offset = Code::kPrologueOffsetNotSet);
MUST_USE_RESULT MaybeObject* CopyCode(Code* code);
@@ -1200,19 +1172,6 @@ class Heap {
// Converts the given boolean condition to JavaScript boolean value.
inline Object* ToBoolean(bool condition);
- // Code that should be run before and after each GC. Includes some
- // reporting/verification activities when compiled with DEBUG set.
- void GarbageCollectionPrologue();
- void GarbageCollectionEpilogue();
-
- // Performs garbage collection operation.
- // Returns whether there is a chance that another major GC could
- // collect more garbage.
- bool CollectGarbage(AllocationSpace space,
- GarbageCollector collector,
- const char* gc_reason,
- const char* collector_reason);
-
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
@@ -1272,22 +1231,15 @@ class Heap {
void GarbageCollectionGreedyCheck();
#endif
- void AddGCPrologueCallback(
- GCPrologueCallback callback, GCType gc_type_filter);
- void RemoveGCPrologueCallback(GCPrologueCallback callback);
-
- void AddGCEpilogueCallback(
- GCEpilogueCallback callback, GCType gc_type_filter);
- void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
+ void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type_filter,
+ bool pass_isolate = true);
+ void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
- void SetGlobalGCPrologueCallback(GCCallback callback) {
- ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
- global_gc_prologue_callback_ = callback;
- }
- void SetGlobalGCEpilogueCallback(GCCallback callback) {
- ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
- global_gc_epilogue_callback_ = callback;
- }
+ void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+ GCType gc_type_filter,
+ bool pass_isolate = true);
+ void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
@@ -1337,6 +1289,8 @@ class Heap {
Object* allocation_sites_list() { return allocation_sites_list_; }
Object** allocation_sites_list_address() { return &allocation_sites_list_; }
+ Object* weak_object_to_code_table() { return weak_object_to_code_table_; }
+
// Number of mark-sweeps.
unsigned int ms_count() { return ms_count_; }
@@ -1428,8 +1382,8 @@ class Heap {
void Verify();
- bool weak_embedded_maps_verification_enabled() {
- return no_weak_embedded_maps_verification_scope_depth_ == 0;
+ bool weak_embedded_objects_verification_enabled() {
+ return no_weak_object_verification_scope_depth_ == 0;
}
#endif
@@ -1459,9 +1413,6 @@ class Heap {
#endif
}
- // Fill in bogus values in from space
- void ZapFromSpace();
-
// Print short heap statistics.
void PrintShortHeapStatistics();
@@ -1505,8 +1456,10 @@ class Heap {
static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
- // Commits from space if it is uncommitted.
- void EnsureFromSpaceIsCommitted();
+ // An object may have an AllocationSite associated with it through a trailing
+ // AllocationMemento. Its feedback should be updated when objects are found
+ // in the heap.
+ static inline void UpdateAllocationSiteFeedback(HeapObject* object);
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
@@ -1527,13 +1480,8 @@ class Heap {
// Adjusts the amount of registered external memory.
// Returns the adjusted value.
- inline intptr_t AdjustAmountOfExternalAllocatedMemory(
- intptr_t change_in_bytes);
-
- // Allocate uninitialized fixed array.
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
- PretenureFlag pretenure);
+ inline int64_t AdjustAmountOfExternalAllocatedMemory(
+ int64_t change_in_bytes);
// This is only needed for testing high promotion mode.
void SetNewSpaceHighPromotionModeActive(bool mode) {
@@ -1552,7 +1500,10 @@ class Heap {
}
inline intptr_t PromotedTotalSize() {
- return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+ int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+ if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt);
+ if (total < 0) return 0;
+ return static_cast<intptr_t>(total);
}
inline intptr_t OldGenerationSpaceAvailable() {
@@ -1582,6 +1533,13 @@ class Heap {
return Min(limit, halfway_to_the_max);
}
+ // Indicates whether inline bump-pointer allocation has been disabled.
+ bool inline_allocation_disabled() { return inline_allocation_disabled_; }
+
+ // Switch whether inline bump-pointer allocation should be used.
+ void EnableInlineAllocation();
+ void DisableInlineAllocation();
+
// Implements the corresponding V8 API function.
bool IdleNotification(int hint);
@@ -1692,6 +1650,14 @@ class Heap {
total_regexp_code_generated_ += size;
}
+ void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
+ if (is_crankshafted) {
+ crankshaft_codegen_bytes_generated_ += size;
+ } else {
+ full_codegen_bytes_generated_ += size;
+ }
+ }
+
// Returns maximum GC pause.
double get_max_gc_pause() { return max_gc_pause_; }
@@ -1741,12 +1707,7 @@ class Heap {
old_pointer_space()->IsLazySweepingComplete();
}
- bool AdvanceSweepers(int step_size) {
- ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping);
- bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
- sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
- return sweeping_complete;
- }
+ bool AdvanceSweepers(int step_size);
bool EnsureSweepersProgressed(int step_size) {
bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size);
@@ -1827,7 +1788,7 @@ class Heap {
bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
- intptr_t amount_of_external_allocated_memory() {
+ int64_t amount_of_external_allocated_memory() {
return amount_of_external_allocated_memory_;
}
@@ -1838,26 +1799,35 @@ class Heap {
FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
FIRST_FIXED_ARRAY_SUB_TYPE =
FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
- OBJECT_STATS_COUNT =
- FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1
+ FIRST_CODE_AGE_SUB_TYPE =
+ FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+ OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
};
- void RecordObjectStats(InstanceType type, int sub_type, size_t size) {
+ void RecordObjectStats(InstanceType type, size_t size) {
ASSERT(type <= LAST_TYPE);
- if (sub_type < 0) {
- object_counts_[type]++;
- object_sizes_[type] += size;
- } else {
- if (type == CODE_TYPE) {
- ASSERT(sub_type < Code::NUMBER_OF_KINDS);
- object_counts_[FIRST_CODE_KIND_SUB_TYPE + sub_type]++;
- object_sizes_[FIRST_CODE_KIND_SUB_TYPE + sub_type] += size;
- } else if (type == FIXED_ARRAY_TYPE) {
- ASSERT(sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type]++;
- object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type] += size;
- }
- }
+ object_counts_[type]++;
+ object_sizes_[type] += size;
+ }
+
+ void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
+ int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
+ int code_age_index =
+ FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
+ ASSERT(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
+ code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
+ ASSERT(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
+ code_age_index < OBJECT_STATS_COUNT);
+ object_counts_[code_sub_type_index]++;
+ object_sizes_[code_sub_type_index] += size;
+ object_counts_[code_age_index]++;
+ object_sizes_[code_age_index] += size;
+ }
+
+ void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
+ ASSERT(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
+ object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
+ object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
}
void CheckpointObjectStats();
@@ -1866,27 +1836,33 @@ class Heap {
// only when FLAG_concurrent_recompilation is true.
class RelocationLock {
public:
- explicit RelocationLock(Heap* heap);
+ explicit RelocationLock(Heap* heap) : heap_(heap) {
+ if (FLAG_concurrent_recompilation) {
+ heap_->relocation_mutex_->Lock();
+ }
+ }
+
~RelocationLock() {
if (FLAG_concurrent_recompilation) {
-#ifdef DEBUG
- heap_->relocation_mutex_locked_by_optimizer_thread_ = false;
-#endif // DEBUG
heap_->relocation_mutex_->Unlock();
}
}
-#ifdef DEBUG
- static bool IsLockedByOptimizerThread(Heap* heap) {
- return heap->relocation_mutex_locked_by_optimizer_thread_;
- }
-#endif // DEBUG
-
private:
Heap* heap_;
};
+ MaybeObject* AddWeakObjectToCodeDependency(Object* obj, DependentCode* dep);
+
+ DependentCode* LookupWeakObjectToCodeDependency(Object* obj);
+
+ void InitializeWeakObjectToCodeTable() {
+ set_weak_object_to_code_table(undefined_value());
+ }
+
+ void EnsureWeakObjectToCodeTable();
+
private:
Heap();
@@ -1902,6 +1878,7 @@ class Heap {
int initial_semispace_size_;
intptr_t max_old_generation_size_;
intptr_t max_executable_size_;
+ intptr_t maximum_committed_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
@@ -1934,7 +1911,7 @@ class Heap {
int gc_post_processing_depth_;
// Returns the amount of external memory registered since last global gc.
- intptr_t PromotedExternalMemorySize();
+ int64_t PromotedExternalMemorySize();
unsigned int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
@@ -1988,20 +1965,30 @@ class Heap {
// The amount of external memory registered through the API kept alive
// by global handles
- intptr_t amount_of_external_allocated_memory_;
+ int64_t amount_of_external_allocated_memory_;
// Caches the amount of external memory registered at the last global gc.
- intptr_t amount_of_external_allocated_memory_at_last_global_gc_;
+ int64_t amount_of_external_allocated_memory_at_last_global_gc_;
// Indicates that an allocation has failed in the old generation since the
// last GC.
bool old_gen_exhausted_;
+ // Indicates that inline bump-pointer allocation has been globally disabled
+ // for all spaces. This is used to disable allocations in generated code.
+ bool inline_allocation_disabled_;
+
// Weak list heads, threaded through the objects.
+ // List heads are initilized lazily and contain the undefined_value at start.
Object* native_contexts_list_;
Object* array_buffers_list_;
Object* allocation_sites_list_;
+ // WeakHashTable that maps objects embedded in optimized code to dependent
+ // code list. It is initilized lazily and contains the undefined_value at
+ // start.
+ Object* weak_object_to_code_table_;
+
StoreBufferRebuilder store_buffer_rebuilder_;
struct StringTypeTable {
@@ -2032,32 +2019,37 @@ class Heap {
// GC callback function, called before and after mark-compact GC.
// Allocations in the callback function are disallowed.
struct GCPrologueCallbackPair {
- GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type)
- : callback(callback), gc_type(gc_type) {
+ GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate)
+ : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
}
bool operator==(const GCPrologueCallbackPair& pair) const {
return pair.callback == callback;
}
- GCPrologueCallback callback;
+ v8::Isolate::GCPrologueCallback callback;
GCType gc_type;
+ // TODO(dcarney): remove variable
+ bool pass_isolate_;
};
List<GCPrologueCallbackPair> gc_prologue_callbacks_;
struct GCEpilogueCallbackPair {
- GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
- : callback(callback), gc_type(gc_type) {
+ GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate)
+ : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
}
bool operator==(const GCEpilogueCallbackPair& pair) const {
return pair.callback == callback;
}
- GCEpilogueCallback callback;
+ v8::Isolate::GCPrologueCallback callback;
GCType gc_type;
+ // TODO(dcarney): remove variable
+ bool pass_isolate_;
};
List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
- GCCallback global_gc_prologue_callback_;
- GCCallback global_gc_epilogue_callback_;
-
// Support for computing object sizes during GC.
HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object);
@@ -2068,10 +2060,23 @@ class Heap {
gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
}
+ // Code that should be run before and after each GC. Includes some
+ // reporting/verification activities when compiled with DEBUG set.
+ void GarbageCollectionPrologue();
+ void GarbageCollectionEpilogue();
+
// Checks whether a global GC is necessary
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
+ // Performs garbage collection operation.
+ // Returns whether there is a chance that another major GC could
+ // collect more garbage.
+ bool CollectGarbage(AllocationSpace space,
+ GarbageCollector collector,
+ const char* gc_reason,
+ const char* collector_reason);
+
// Performs garbage collection
// Returns whether there is a chance another major GC could
// collect more garbage.
@@ -2080,22 +2085,35 @@ class Heap {
inline void UpdateOldSpaceLimits();
- // Allocate an uninitialized object in map space. The behavior is identical
- // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
- // have to test the allocation space argument and (b) can reduce code size
- // (since both AllocateRaw and AllocateRawMap are inlined).
- MUST_USE_RESULT inline MaybeObject* AllocateRawMap();
+ // Selects the proper allocation space depending on the given object
+ // size, pretenuring decision, and preferred old-space.
+ static AllocationSpace SelectSpace(int object_size,
+ AllocationSpace preferred_old_space,
+ PretenureFlag pretenure) {
+ ASSERT(preferred_old_space == OLD_POINTER_SPACE ||
+ preferred_old_space == OLD_DATA_SPACE);
+ if (object_size > Page::kMaxNonCodeHeapObjectSize) return LO_SPACE;
+ return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
+ }
+
+ // Allocate an uninitialized fixed array.
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(
+ int length, PretenureFlag pretenure);
- // Allocate an uninitialized object in the simple cell space.
- MUST_USE_RESULT inline MaybeObject* AllocateRawCell();
+ // Allocate an uninitialized fixed double array.
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray(
+ int length, PretenureFlag pretenure);
- // Allocate an uninitialized object in the global property cell space.
- MUST_USE_RESULT inline MaybeObject* AllocateRawPropertyCell();
+ // Allocate an initialized fixed array with the given filler value.
+ MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithFiller(
+ int length, PretenureFlag pretenure, Object* filler);
// Initializes a JSObject based on its map.
void InitializeJSObjectFromMap(JSObject* obj,
FixedArray* properties,
Map* map);
+ void InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site);
bool CreateInitialMaps();
bool CreateInitialObjects();
@@ -2106,6 +2124,7 @@ class Heap {
NO_INLINE(void CreateJSConstructEntryStub());
void CreateFixedStubs();
+ void CreateStubsRequiringBuiltins();
MUST_USE_RESULT MaybeObject* CreateOddball(const char* to_string,
Object* to_number,
@@ -2116,10 +2135,6 @@ class Heap {
ElementsKind elements_kind,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site);
-
// Allocate empty fixed array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
@@ -2130,9 +2145,28 @@ class Heap {
// Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
+ // Allocate a tenured simple cell.
+ MUST_USE_RESULT MaybeObject* AllocateCell(Object* value);
+
+ // Allocate a tenured JS global property cell initialized with the hole.
+ MUST_USE_RESULT MaybeObject* AllocatePropertyCell();
+
+ // Allocate Box.
+ MUST_USE_RESULT MaybeObject* AllocateBox(Object* value,
+ PretenureFlag pretenure);
+
// Performs a minor collection in new generation.
void Scavenge();
+ // Commits from space if it is uncommitted.
+ void EnsureFromSpaceIsCommitted();
+
+ // Uncommit unused semi space.
+ bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+
+ // Fill in bogus values in from space
+ void ZapFromSpace();
+
static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
Heap* heap,
Object** pointer);
@@ -2286,6 +2320,15 @@ class Heap {
void ClearObjectStats(bool clear_last_time_stats = false);
+ void set_weak_object_to_code_table(Object* value) {
+ ASSERT(!InNewSpace(value));
+ weak_object_to_code_table_ = value;
+ }
+
+ Object** weak_object_to_code_table_address() {
+ return &weak_object_to_code_table_;
+ }
+
static const int kInitialStringTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
@@ -2335,13 +2378,17 @@ class Heap {
unsigned int gc_count_at_last_idle_gc_;
int scavenges_since_last_idle_round_;
+ // These two counters are monotomically increasing and never reset.
+ size_t full_codegen_bytes_generated_;
+ size_t crankshaft_codegen_bytes_generated_;
+
// If the --deopt_every_n_garbage_collections flag is set to a positive value,
// this variable holds the number of garbage collections since the last
// deoptimization triggered by garbage collection.
int gcs_since_last_deopt_;
#ifdef VERIFY_HEAP
- int no_weak_embedded_maps_verification_scope_depth_;
+ int no_weak_object_verification_scope_depth_;
#endif
static const int kMaxMarkSweepsInIdleRound = 7;
@@ -2375,7 +2422,7 @@ class Heap {
friend class MarkCompactMarkingVisitor;
friend class MapCompact;
#ifdef VERIFY_HEAP
- friend class NoWeakEmbeddedMapsVerificationScope;
+ friend class NoWeakObjectVerificationScope;
#endif
DISALLOW_COPY_AND_ASSIGN(Heap);
@@ -2439,11 +2486,12 @@ class AlwaysAllocateScope {
DisallowAllocationFailure disallow_allocation_failure_;
};
+
#ifdef VERIFY_HEAP
-class NoWeakEmbeddedMapsVerificationScope {
+class NoWeakObjectVerificationScope {
public:
- inline NoWeakEmbeddedMapsVerificationScope();
- inline ~NoWeakEmbeddedMapsVerificationScope();
+ inline NoWeakObjectVerificationScope();
+ inline ~NoWeakObjectVerificationScope();
};
#endif
diff --git a/chromium/v8/src/hydrogen-alias-analysis.h b/chromium/v8/src/hydrogen-alias-analysis.h
index 73e116e63e1..21a54625ff8 100644
--- a/chromium/v8/src/hydrogen-alias-analysis.h
+++ b/chromium/v8/src/hydrogen-alias-analysis.h
@@ -88,15 +88,6 @@ class HAliasAnalyzer : public ZoneObject {
inline bool NoAlias(HValue* a, HValue* b) {
return Query(a, b) == kNoAlias;
}
-
- // Returns the actual value of an instruction. In the case of a chain
- // of informative definitions, return the root of the chain.
- HValue* ActualValue(HValue* obj) {
- while (obj->IsInformativeDefinition()) { // Walk a chain of idefs.
- obj = obj->RedefinedOperand();
- }
- return obj;
- }
};
diff --git a/chromium/v8/src/hydrogen-bce.cc b/chromium/v8/src/hydrogen-bce.cc
index 869db54a2f4..e1a28471273 100644
--- a/chromium/v8/src/hydrogen-bce.cc
+++ b/chromium/v8/src/hydrogen-bce.cc
@@ -91,8 +91,8 @@ class BoundsCheckKey : public ZoneObject {
private:
BoundsCheckKey(HValue* index_base, HValue* length)
- : index_base_(index_base),
- length_(length) { }
+ : index_base_(index_base),
+ length_(length) { }
HValue* index_base_;
HValue* length_;
@@ -144,10 +144,7 @@ class BoundsCheckBbData: public ZoneObject {
// (either upper or lower; note that HasSingleCheck() becomes false).
// Otherwise one of the current checks is modified so that it also covers
// new_offset, and new_check is removed.
- //
- // If the check cannot be modified because the context is unknown it
- // returns false, otherwise it returns true.
- bool CoverCheck(HBoundsCheck* new_check,
+ void CoverCheck(HBoundsCheck* new_check,
int32_t new_offset) {
ASSERT(new_check->index()->representation().IsSmiOrInteger32());
bool keep_new_check = false;
@@ -158,15 +155,7 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
upper_check_ = new_check;
} else {
- bool result = BuildOffsetAdd(upper_check_,
- &added_upper_index_,
- &added_upper_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
- if (!result) return false;
- upper_check_->ReplaceAllUsesWith(upper_check_->index());
- upper_check_->SetOperandAt(0, added_upper_index_);
+ TightenCheck(upper_check_, new_check);
}
} else if (new_offset < lower_offset_) {
lower_offset_ = new_offset;
@@ -174,32 +163,27 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
lower_check_ = new_check;
} else {
- bool result = BuildOffsetAdd(lower_check_,
- &added_lower_index_,
- &added_lower_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
- if (!result) return false;
- lower_check_->ReplaceAllUsesWith(lower_check_->index());
- lower_check_->SetOperandAt(0, added_lower_index_);
+ TightenCheck(lower_check_, new_check);
}
} else {
- ASSERT(false);
+ // Should never have called CoverCheck() in this case.
+ UNREACHABLE();
}
if (!keep_new_check) {
new_check->block()->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
new_check->DeleteAndReplaceWith(new_check->ActualValue());
+ } else {
+ HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_
+ : lower_check_;
+ // The length is guaranteed to be live at first_check.
+ ASSERT(new_check->length() == first_check->length());
+ HInstruction* old_position = new_check->next();
+ new_check->Unlink();
+ new_check->InsertAfter(first_check);
+ MoveIndexIfNecessary(new_check->index(), new_check, old_position);
}
-
- return true;
- }
-
- void RemoveZeroOperations() {
- RemoveZeroAdd(&added_lower_index_, &added_lower_offset_);
- RemoveZeroAdd(&added_upper_index_, &added_upper_offset_);
}
BoundsCheckBbData(BoundsCheckKey* key,
@@ -210,18 +194,14 @@ class BoundsCheckBbData: public ZoneObject {
HBoundsCheck* upper_check,
BoundsCheckBbData* next_in_bb,
BoundsCheckBbData* father_in_dt)
- : key_(key),
- lower_offset_(lower_offset),
- upper_offset_(upper_offset),
- basic_block_(bb),
- lower_check_(lower_check),
- upper_check_(upper_check),
- added_lower_index_(NULL),
- added_lower_offset_(NULL),
- added_upper_index_(NULL),
- added_upper_offset_(NULL),
- next_in_bb_(next_in_bb),
- father_in_dt_(father_in_dt) { }
+ : key_(key),
+ lower_offset_(lower_offset),
+ upper_offset_(upper_offset),
+ basic_block_(bb),
+ lower_check_(lower_check),
+ upper_check_(upper_check),
+ next_in_bb_(next_in_bb),
+ father_in_dt_(father_in_dt) { }
private:
BoundsCheckKey* key_;
@@ -230,57 +210,56 @@ class BoundsCheckBbData: public ZoneObject {
HBasicBlock* basic_block_;
HBoundsCheck* lower_check_;
HBoundsCheck* upper_check_;
- HInstruction* added_lower_index_;
- HConstant* added_lower_offset_;
- HInstruction* added_upper_index_;
- HConstant* added_upper_offset_;
BoundsCheckBbData* next_in_bb_;
BoundsCheckBbData* father_in_dt_;
- // Given an existing add instruction and a bounds check it tries to
- // find the current context (either of the add or of the check index).
- HValue* IndexContext(HInstruction* add, HBoundsCheck* check) {
- if (add != NULL && add->IsAdd()) {
- return HAdd::cast(add)->context();
+ void MoveIndexIfNecessary(HValue* index_raw,
+ HBoundsCheck* insert_before,
+ HInstruction* end_of_scan_range) {
+ if (!index_raw->IsAdd() && !index_raw->IsSub()) {
+ // index_raw can be HAdd(index_base, offset), HSub(index_base, offset),
+ // or index_base directly. In the latter case, no need to move anything.
+ return;
}
- if (check->index()->IsBinaryOperation()) {
- return HBinaryOperation::cast(check->index())->context();
+ HArithmeticBinaryOperation* index =
+ HArithmeticBinaryOperation::cast(index_raw);
+ HValue* left_input = index->left();
+ HValue* right_input = index->right();
+ bool must_move_index = false;
+ bool must_move_left_input = false;
+ bool must_move_right_input = false;
+ for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
+ if (cursor == left_input) must_move_left_input = true;
+ if (cursor == right_input) must_move_right_input = true;
+ if (cursor == index) must_move_index = true;
+ if (cursor->previous() == NULL) {
+ cursor = cursor->block()->dominator()->end();
+ } else {
+ cursor = cursor->previous();
+ }
}
- return NULL;
- }
-
- // This function returns false if it cannot build the add because the
- // current context cannot be determined.
- bool BuildOffsetAdd(HBoundsCheck* check,
- HInstruction** add,
- HConstant** constant,
- HValue* original_value,
- Representation representation,
- int32_t new_offset) {
- HValue* index_context = IndexContext(*add, check);
- if (index_context == NULL) return false;
-
- Zone* zone = BasicBlock()->zone();
- HConstant* new_constant = HConstant::New(zone, index_context,
- new_offset, representation);
- if (*add == NULL) {
- new_constant->InsertBefore(check);
- (*add) = HAdd::New(zone, index_context, original_value, new_constant);
- (*add)->AssumeRepresentation(representation);
- (*add)->InsertBefore(check);
- } else {
- new_constant->InsertBefore(*add);
- (*constant)->DeleteAndReplaceWith(new_constant);
+ if (must_move_index) {
+ index->Unlink();
+ index->InsertBefore(insert_before);
+ }
+ // The BCE algorithm only selects mergeable bounds checks that share
+ // the same "index_base", so we'll only ever have to move constants.
+ if (must_move_left_input) {
+ HConstant::cast(left_input)->Unlink();
+ HConstant::cast(left_input)->InsertBefore(index);
+ }
+ if (must_move_right_input) {
+ HConstant::cast(right_input)->Unlink();
+ HConstant::cast(right_input)->InsertBefore(index);
}
- *constant = new_constant;
- return true;
}
- void RemoveZeroAdd(HInstruction** add, HConstant** constant) {
- if (*add != NULL && (*add)->IsAdd() && (*constant)->Integer32Value() == 0) {
- (*add)->DeleteAndReplaceWith(HAdd::cast(*add)->left());
- (*constant)->DeleteAndReplaceWith(NULL);
- }
+ void TightenCheck(HBoundsCheck* original_check,
+ HBoundsCheck* tighter_check) {
+ ASSERT(original_check->length() == tighter_check->length());
+ MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check);
+ original_check->ReplaceAllUsesWith(original_check->index());
+ original_check->SetOperandAt(0, tighter_check->index());
}
DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData);
@@ -394,11 +373,10 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
bb->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
check->DeleteAndReplaceWith(check->ActualValue());
- } else if (data->BasicBlock() != bb ||
- !data->CoverCheck(check, offset)) {
- // If the check is in the current BB we try to modify it by calling
- // "CoverCheck", but if also that fails we record the current offsets
- // in a new data instance because from now on they are covered.
+ } else if (data->BasicBlock() == bb) {
+ data->CoverCheck(check, offset);
+ } else if (graph()->use_optimistic_licm() ||
+ bb->IsLoopSuccessorDominator()) {
int32_t new_lower_offset = offset < data->LowerOffset()
? offset
: data->LowerOffset();
@@ -424,7 +402,6 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
void HBoundsCheckEliminationPhase::PostProcessBlock(
HBasicBlock* block, BoundsCheckBbData* data) {
while (data != NULL) {
- data->RemoveZeroOperations();
if (data->FatherInDominatorTree()) {
table_.Insert(data->Key(), data->FatherInDominatorTree(), zone());
} else {
diff --git a/chromium/v8/src/hydrogen-canonicalize.cc b/chromium/v8/src/hydrogen-canonicalize.cc
index 4d96415e6a8..d3f72e93398 100644
--- a/chromium/v8/src/hydrogen-canonicalize.cc
+++ b/chromium/v8/src/hydrogen-canonicalize.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "hydrogen-canonicalize.h"
+#include "hydrogen-redundant-phi.h"
namespace v8 {
namespace internal {
@@ -57,8 +58,15 @@ void HCanonicalizePhase::Run() {
}
}
}
+
// Perform actual Canonicalization pass.
+ HRedundantPhiEliminationPhase redundant_phi_eliminator(graph());
for (int i = 0; i < blocks->length(); ++i) {
+ // Eliminate redundant phis in the block first; changes to their inputs
+ // might have made them redundant, and eliminating them creates more
+ // opportunities for constant folding and strength reduction.
+ redundant_phi_eliminator.ProcessBlock(blocks->at(i));
+ // Now canonicalize each instruction.
for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
HValue* value = instr->Canonicalize();
diff --git a/chromium/v8/src/hydrogen-check-elimination.cc b/chromium/v8/src/hydrogen-check-elimination.cc
new file mode 100644
index 00000000000..bbd3042fb7a
--- /dev/null
+++ b/chromium/v8/src/hydrogen-check-elimination.cc
@@ -0,0 +1,536 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-check-elimination.h"
+#include "hydrogen-alias-analysis.h"
+#include "hydrogen-flow-engine.h"
+
+#define GLOBAL 1
+
+// Only collect stats in debug mode.
+#if DEBUG
+#define INC_STAT(x) phase_->x++
+#else
+#define INC_STAT(x)
+#endif
+
+// For code de-uglification.
+#define TRACE(x) if (FLAG_trace_check_elimination) PrintF x
+
+namespace v8 {
+namespace internal {
+
+typedef UniqueSet<Map>* MapSet;
+
+struct HCheckTableEntry {
+ HValue* object_; // The object being approximated. NULL => invalid entry.
+ HValue* check_; // The last check instruction.
+ MapSet maps_; // The set of known maps for the object.
+};
+
+
+// The main datastructure used during check elimination, which stores a
+// set of known maps for each object.
+class HCheckTable : public ZoneObject {
+ public:
+ static const int kMaxTrackedObjects = 10;
+
+ explicit HCheckTable(HCheckEliminationPhase* phase)
+ : phase_(phase),
+ cursor_(0),
+ size_(0) {
+ }
+
+ // The main processing of instructions.
+ HCheckTable* Process(HInstruction* instr, Zone* zone) {
+ switch (instr->opcode()) {
+ case HValue::kCheckMaps: {
+ ReduceCheckMaps(HCheckMaps::cast(instr));
+ break;
+ }
+ case HValue::kCheckValue: {
+ ReduceCheckValue(HCheckValue::cast(instr));
+ break;
+ }
+ case HValue::kLoadNamedField: {
+ ReduceLoadNamedField(HLoadNamedField::cast(instr));
+ break;
+ }
+ case HValue::kStoreNamedField: {
+ ReduceStoreNamedField(HStoreNamedField::cast(instr));
+ break;
+ }
+ case HValue::kCompareMap: {
+ ReduceCompareMap(HCompareMap::cast(instr));
+ break;
+ }
+ case HValue::kTransitionElementsKind: {
+ ReduceTransitionElementsKind(
+ HTransitionElementsKind::cast(instr));
+ break;
+ }
+ case HValue::kCheckMapValue: {
+ ReduceCheckMapValue(HCheckMapValue::cast(instr));
+ break;
+ }
+ case HValue::kCheckHeapObject: {
+ ReduceCheckHeapObject(HCheckHeapObject::cast(instr));
+ break;
+ }
+ default: {
+ // If the instruction changes maps uncontrollably, drop everything.
+ if (instr->CheckGVNFlag(kChangesMaps) ||
+ instr->CheckGVNFlag(kChangesOsrEntries)) {
+ Kill();
+ }
+ }
+ // Improvements possible:
+ // - eliminate redundant HCheckSmi, HCheckInstanceType instructions
+ // - track which values have been HCheckHeapObject'd
+ }
+
+ return this;
+ }
+
+ // Global analysis: Copy state to successor block.
+ HCheckTable* Copy(HBasicBlock* succ, Zone* zone) {
+ HCheckTable* copy = new(phase_->zone()) HCheckTable(phase_);
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* old_entry = &entries_[i];
+ HCheckTableEntry* new_entry = &copy->entries_[i];
+ // TODO(titzer): keep the check if this block dominates the successor?
+ new_entry->object_ = old_entry->object_;
+ new_entry->check_ = NULL;
+ new_entry->maps_ = old_entry->maps_->Copy(phase_->zone());
+ }
+ if (succ->predecessors()->length() == 1) {
+ HControlInstruction* end = succ->predecessors()->at(0)->end();
+ if (end->IsCompareMap() && end->SuccessorAt(0) == succ) {
+ // Learn on the true branch of if(CompareMap(x)).
+ HCompareMap* cmp = HCompareMap::cast(end);
+ HValue* object = cmp->value()->ActualValue();
+ HCheckTableEntry* entry = copy->Find(object);
+ if (entry == NULL) {
+ copy->Insert(object, cmp->map());
+ } else {
+ MapSet list = new(phase_->zone()) UniqueSet<Map>();
+ list->Add(cmp->map(), phase_->zone());
+ entry->maps_ = list;
+ }
+ }
+ // TODO(titzer): is it worthwhile to learn on false branch too?
+ }
+ return copy;
+ }
+
+ // Global analysis: Merge this state with the other incoming state.
+ HCheckTable* Merge(HBasicBlock* succ, HCheckTable* that, Zone* zone) {
+ if (that->size_ == 0) {
+ // If the other state is empty, simply reset.
+ size_ = 0;
+ cursor_ = 0;
+ return this;
+ }
+ bool compact = false;
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* this_entry = &entries_[i];
+ HCheckTableEntry* that_entry = that->Find(this_entry->object_);
+ if (that_entry == NULL) {
+ this_entry->object_ = NULL;
+ compact = true;
+ } else {
+ this_entry->maps_ = this_entry->maps_->Union(
+ that_entry->maps_, phase_->zone());
+ if (this_entry->check_ != that_entry->check_) this_entry->check_ = NULL;
+ ASSERT(this_entry->maps_->size() > 0);
+ }
+ }
+ if (compact) Compact();
+ return this;
+ }
+
+ void ReduceCheckMaps(HCheckMaps* instr) {
+ HValue* object = instr->value()->ActualValue();
+ HCheckTableEntry* entry = Find(object);
+ if (entry != NULL) {
+ // entry found;
+ MapSet a = entry->maps_;
+ MapSet i = instr->map_set().Copy(phase_->zone());
+ if (a->IsSubset(i)) {
+ // The first check is more strict; the second is redundant.
+ if (entry->check_ != NULL) {
+ instr->DeleteAndReplaceWith(entry->check_);
+ INC_STAT(redundant_);
+ } else {
+ instr->DeleteAndReplaceWith(instr->value());
+ INC_STAT(removed_);
+ }
+ return;
+ }
+ i = i->Intersect(a, phase_->zone());
+ if (i->size() == 0) {
+ // Intersection is empty; probably megamorphic, which is likely to
+ // deopt anyway, so just leave things as they are.
+ INC_STAT(empty_);
+ } else {
+ // TODO(titzer): replace the first check with a more strict check
+ INC_STAT(narrowed_);
+ }
+ } else {
+ // No entry; insert a new one.
+ Insert(object, instr, instr->map_set().Copy(phase_->zone()));
+ }
+ }
+
+ void ReduceCheckValue(HCheckValue* instr) {
+ // Canonicalize HCheckValues; they might have their values load-eliminated.
+ HValue* value = instr->Canonicalize();
+ if (value == NULL) {
+ instr->DeleteAndReplaceWith(instr->value());
+ INC_STAT(removed_);
+ } else if (value != instr) {
+ instr->DeleteAndReplaceWith(value);
+ INC_STAT(redundant_);
+ }
+ }
+
+ void ReduceLoadNamedField(HLoadNamedField* instr) {
+ // Reduce a load of the map field when it is known to be a constant.
+ if (!IsMapAccess(instr->access())) return;
+
+ HValue* object = instr->object()->ActualValue();
+ MapSet maps = FindMaps(object);
+ if (maps == NULL || maps->size() != 1) return; // Not a constant.
+
+ Unique<Map> map = maps->at(0);
+ HConstant* constant = HConstant::CreateAndInsertBefore(
+ instr->block()->graph()->zone(), map, true, instr);
+ instr->DeleteAndReplaceWith(constant);
+ INC_STAT(loads_);
+ }
+
+ void ReduceCheckMapValue(HCheckMapValue* instr) {
+ if (!instr->map()->IsConstant()) return; // Nothing to learn.
+
+ HValue* object = instr->value()->ActualValue();
+ // Match a HCheckMapValue(object, HConstant(map))
+ Unique<Map> map = MapConstant(instr->map());
+ MapSet maps = FindMaps(object);
+ if (maps != NULL) {
+ if (maps->Contains(map)) {
+ if (maps->size() == 1) {
+ // Object is known to have exactly this map.
+ instr->DeleteAndReplaceWith(NULL);
+ INC_STAT(removed_);
+ } else {
+ // Only one map survives the check.
+ maps->Clear();
+ maps->Add(map, phase_->zone());
+ }
+ }
+ } else {
+ // No prior information.
+ Insert(object, map);
+ }
+ }
+
+ void ReduceCheckHeapObject(HCheckHeapObject* instr) {
+ if (FindMaps(instr->value()->ActualValue()) != NULL) {
+ // If the object has known maps, it's definitely a heap object.
+ instr->DeleteAndReplaceWith(instr->value());
+ INC_STAT(removed_cho_);
+ }
+ }
+
+ void ReduceStoreNamedField(HStoreNamedField* instr) {
+ HValue* object = instr->object()->ActualValue();
+ if (instr->has_transition()) {
+ // This store transitions the object to a new map.
+ Kill(object);
+ Insert(object, MapConstant(instr->transition()));
+ } else if (IsMapAccess(instr->access())) {
+ // This is a store directly to the map field of the object.
+ Kill(object);
+ if (!instr->value()->IsConstant()) return;
+ Insert(object, MapConstant(instr->value()));
+ } else {
+ // If the instruction changes maps, it should be handled above.
+ CHECK(!instr->CheckGVNFlag(kChangesMaps));
+ }
+ }
+
+ void ReduceCompareMap(HCompareMap* instr) {
+ MapSet maps = FindMaps(instr->value()->ActualValue());
+ if (maps == NULL) return;
+ if (maps->Contains(instr->map())) {
+ if (maps->size() == 1) {
+ // TODO(titzer): replace with goto true branch
+ INC_STAT(compares_true_);
+ }
+ } else {
+ // TODO(titzer): replace with goto false branch
+ INC_STAT(compares_false_);
+ }
+ }
+
+ void ReduceTransitionElementsKind(HTransitionElementsKind* instr) {
+ MapSet maps = FindMaps(instr->object()->ActualValue());
+ // Can only learn more about an object that already has a known set of maps.
+ if (maps == NULL) return;
+ if (maps->Contains(instr->original_map())) {
+ // If the object has the original map, it will be transitioned.
+ maps->Remove(instr->original_map());
+ maps->Add(instr->transitioned_map(), phase_->zone());
+ } else {
+ // Object does not have the given map, thus the transition is redundant.
+ instr->DeleteAndReplaceWith(instr->object());
+ INC_STAT(transitions_);
+ }
+ }
+
+ // Kill everything in the table.
+ void Kill() {
+ size_ = 0;
+ cursor_ = 0;
+ }
+
+ // Kill everything in the table that may alias {object}.
+ void Kill(HValue* object) {
+ bool compact = false;
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* entry = &entries_[i];
+ ASSERT(entry->object_ != NULL);
+ if (phase_->aliasing_->MayAlias(entry->object_, object)) {
+ entry->object_ = NULL;
+ compact = true;
+ }
+ }
+ if (compact) Compact();
+ ASSERT(Find(object) == NULL);
+ }
+
+ void Compact() {
+ // First, compact the array in place.
+ int max = size_, dest = 0, old_cursor = cursor_;
+ for (int i = 0; i < max; i++) {
+ if (entries_[i].object_ != NULL) {
+ if (dest != i) entries_[dest] = entries_[i];
+ dest++;
+ } else {
+ if (i < old_cursor) cursor_--;
+ size_--;
+ }
+ }
+ ASSERT(size_ == dest);
+ ASSERT(cursor_ <= size_);
+
+ // Preserve the age of the entries by moving the older entries to the end.
+ if (cursor_ == size_) return; // Cursor already points at end.
+ if (cursor_ != 0) {
+ // | L = oldest | R = newest | |
+ // ^ cursor ^ size ^ MAX
+ HCheckTableEntry tmp_entries[kMaxTrackedObjects];
+ int L = cursor_;
+ int R = size_ - cursor_;
+
+ OS::MemMove(&tmp_entries[0], &entries_[0], L * sizeof(HCheckTableEntry));
+ OS::MemMove(&entries_[0], &entries_[L], R * sizeof(HCheckTableEntry));
+ OS::MemMove(&entries_[R], &tmp_entries[0], L * sizeof(HCheckTableEntry));
+ }
+
+ cursor_ = size_; // Move cursor to end.
+ }
+
+ void Print() {
+ for (int i = 0; i < size_; i++) {
+ HCheckTableEntry* entry = &entries_[i];
+ ASSERT(entry->object_ != NULL);
+ PrintF(" checkmaps-table @%d: object #%d ", i, entry->object_->id());
+ if (entry->check_ != NULL) {
+ PrintF("check #%d ", entry->check_->id());
+ }
+ MapSet list = entry->maps_;
+ PrintF("%d maps { ", list->size());
+ for (int j = 0; j < list->size(); j++) {
+ if (j > 0) PrintF(", ");
+ PrintF("%" V8PRIxPTR, list->at(j).Hashcode());
+ }
+ PrintF(" }\n");
+ }
+ }
+
+ private:
+ HCheckTableEntry* Find(HValue* object) {
+ for (int i = size_ - 1; i >= 0; i--) {
+ // Search from most-recently-inserted to least-recently-inserted.
+ HCheckTableEntry* entry = &entries_[i];
+ ASSERT(entry->object_ != NULL);
+ if (phase_->aliasing_->MustAlias(entry->object_, object)) return entry;
+ }
+ return NULL;
+ }
+
+ MapSet FindMaps(HValue* object) {
+ HCheckTableEntry* entry = Find(object);
+ return entry == NULL ? NULL : entry->maps_;
+ }
+
+ void Insert(HValue* object, Unique<Map> map) {
+ MapSet list = new(phase_->zone()) UniqueSet<Map>();
+ list->Add(map, phase_->zone());
+ Insert(object, NULL, list);
+ }
+
+ void Insert(HValue* object, HCheckMaps* check, MapSet maps) {
+ HCheckTableEntry* entry = &entries_[cursor_++];
+ entry->object_ = object;
+ entry->check_ = check;
+ entry->maps_ = maps;
+ // If the table becomes full, wrap around and overwrite older entries.
+ if (cursor_ == kMaxTrackedObjects) cursor_ = 0;
+ if (size_ < kMaxTrackedObjects) size_++;
+ }
+
+ bool IsMapAccess(HObjectAccess access) {
+ return access.IsInobject() && access.offset() == JSObject::kMapOffset;
+ }
+
+ Unique<Map> MapConstant(HValue* value) {
+ return Unique<Map>::cast(HConstant::cast(value)->GetUnique());
+ }
+
+ friend class HCheckMapsEffects;
+
+ HCheckEliminationPhase* phase_;
+ HCheckTableEntry entries_[kMaxTrackedObjects];
+ int16_t cursor_; // Must be <= kMaxTrackedObjects
+ int16_t size_; // Must be <= kMaxTrackedObjects
+ // TODO(titzer): STATIC_ASSERT kMaxTrackedObjects < max(cursor_)
+};
+
+
+// Collects instructions that can cause effects that invalidate information
+// needed for check elimination.
+class HCheckMapsEffects : public ZoneObject {
+ public:
+ explicit HCheckMapsEffects(Zone* zone)
+ : maps_stored_(false),
+ stores_(5, zone) { }
+
+ inline bool Disabled() {
+ return false; // Effects are _not_ disabled.
+ }
+
+ // Process a possibly side-effecting instruction.
+ void Process(HInstruction* instr, Zone* zone) {
+ switch (instr->opcode()) {
+ case HValue::kStoreNamedField: {
+ stores_.Add(HStoreNamedField::cast(instr), zone);
+ break;
+ }
+ case HValue::kOsrEntry: {
+ // Kill everything. Loads must not be hoisted past the OSR entry.
+ maps_stored_ = true;
+ }
+ default: {
+ maps_stored_ |= (instr->CheckGVNFlag(kChangesMaps) |
+ instr->CheckGVNFlag(kChangesElementsKind));
+ }
+ }
+ }
+
+ // Apply these effects to the given check elimination table.
+ void Apply(HCheckTable* table) {
+ if (maps_stored_) {
+ // Uncontrollable map modifications; kill everything.
+ table->Kill();
+ return;
+ }
+
+ // Kill maps for each store contained in these effects.
+ for (int i = 0; i < stores_.length(); i++) {
+ HStoreNamedField* s = stores_[i];
+ if (table->IsMapAccess(s->access()) || s->has_transition()) {
+ table->Kill(s->object()->ActualValue());
+ }
+ }
+ }
+
+ // Union these effects with the other effects.
+ void Union(HCheckMapsEffects* that, Zone* zone) {
+ maps_stored_ |= that->maps_stored_;
+ for (int i = 0; i < that->stores_.length(); i++) {
+ stores_.Add(that->stores_[i], zone);
+ }
+ }
+
+ private:
+ bool maps_stored_ : 1;
+ ZoneList<HStoreNamedField*> stores_;
+};
+
+
+// The main routine of the analysis phase. Use the HFlowEngine for either a
+// local or a global analysis.
+void HCheckEliminationPhase::Run() {
+ HFlowEngine<HCheckTable, HCheckMapsEffects> engine(graph(), zone());
+ HCheckTable* table = new(zone()) HCheckTable(this);
+
+ if (GLOBAL) {
+ // Perform a global analysis.
+ engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table);
+ } else {
+ // Perform only local analysis.
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ table->Kill();
+ engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
+ }
+ }
+
+ if (FLAG_trace_check_elimination) PrintStats();
+}
+
+
+// Are we eliminated yet?
+void HCheckEliminationPhase::PrintStats() {
+#if DEBUG
+ #define PRINT_STAT(x) if (x##_ > 0) PrintF(" %-16s = %2d\n", #x, x##_)
+#else
+ #define PRINT_STAT(x)
+#endif
+ PRINT_STAT(redundant);
+ PRINT_STAT(removed);
+ PRINT_STAT(removed_cho);
+ PRINT_STAT(narrowed);
+ PRINT_STAT(loads);
+ PRINT_STAT(empty);
+ PRINT_STAT(compares_true);
+ PRINT_STAT(compares_false);
+ PRINT_STAT(transitions);
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/hydrogen-check-elimination.h b/chromium/v8/src/hydrogen-check-elimination.h
new file mode 100644
index 00000000000..b429b174623
--- /dev/null
+++ b/chromium/v8/src/hydrogen-check-elimination.h
@@ -0,0 +1,80 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_CHECK_ELIMINATION_H_
+#define V8_HYDROGEN_CHECK_ELIMINATION_H_
+
+#include "hydrogen.h"
+#include "hydrogen-alias-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Remove CheckMaps instructions through flow- and branch-sensitive analysis.
+class HCheckEliminationPhase : public HPhase {
+ public:
+ explicit HCheckEliminationPhase(HGraph* graph)
+ : HPhase("H_Check Elimination", graph), aliasing_() {
+#ifdef DEBUG
+ redundant_ = 0;
+ removed_ = 0;
+ removed_cho_ = 0;
+ narrowed_ = 0;
+ loads_ = 0;
+ empty_ = 0;
+ compares_true_ = 0;
+ compares_false_ = 0;
+ transitions_ = 0;
+#endif
+ }
+
+ void Run();
+
+ friend class HCheckTable;
+
+ private:
+ void PrintStats();
+
+ HAliasAnalyzer* aliasing_;
+#ifdef DEBUG
+ int redundant_;
+ int removed_;
+ int removed_cho_;
+ int narrowed_;
+ int loads_;
+ int empty_;
+ int compares_true_;
+ int compares_false_;
+ int transitions_;
+#endif
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_CHECK_ELIMINATION_H_
diff --git a/chromium/v8/src/hydrogen-dce.cc b/chromium/v8/src/hydrogen-dce.cc
index 0e7253d5a48..e101ee5bcc5 100644
--- a/chromium/v8/src/hydrogen-dce.cc
+++ b/chromium/v8/src/hydrogen-dce.cc
@@ -31,56 +31,60 @@
namespace v8 {
namespace internal {
-bool HDeadCodeEliminationPhase::MarkLive(HValue* ref, HValue* instr) {
- if (instr->CheckFlag(HValue::kIsLive)) return false;
- instr->SetFlag(HValue::kIsLive);
-
- if (FLAG_trace_dead_code_elimination) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- if (ref != NULL) {
- ref->PrintTo(&stream);
- } else {
- stream.Add("root ");
+void HDeadCodeEliminationPhase::MarkLive(
+ HValue* instr, ZoneList<HValue*>* worklist) {
+ if (instr->CheckFlag(HValue::kIsLive)) return; // Already live.
+
+ if (FLAG_trace_dead_code_elimination) PrintLive(NULL, instr);
+
+ // Transitively mark all inputs of live instructions live.
+ worklist->Add(instr, zone());
+ while (!worklist->is_empty()) {
+ HValue* instr = worklist->RemoveLast();
+ instr->SetFlag(HValue::kIsLive);
+ for (int i = 0; i < instr->OperandCount(); ++i) {
+ HValue* input = instr->OperandAt(i);
+ if (!input->CheckFlag(HValue::kIsLive)) {
+ input->SetFlag(HValue::kIsLive);
+ worklist->Add(input, zone());
+ if (FLAG_trace_dead_code_elimination) PrintLive(instr, input);
+ }
}
- stream.Add(" -> ");
- instr->PrintTo(&stream);
- PrintF("[MarkLive %s]\n", *stream.ToCString());
}
+}
+
- return true;
+void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ if (ref != NULL) {
+ ref->PrintTo(&stream);
+ } else {
+ stream.Add("root ");
+ }
+ stream.Add(" -> ");
+ instr->PrintTo(&stream);
+ PrintF("[MarkLive %s]\n", *stream.ToCString());
}
void HDeadCodeEliminationPhase::MarkLiveInstructions() {
- ZoneList<HValue*> worklist(graph()->blocks()->length(), zone());
+ ZoneList<HValue*> worklist(10, zone());
- // Mark initial root instructions for dead code elimination.
+ // Transitively mark all live instructions, starting from roots.
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- if (instr->CannotBeEliminated() && MarkLive(NULL, instr)) {
- worklist.Add(instr, zone());
- }
+ if (instr->CannotBeEliminated()) MarkLive(instr, &worklist);
}
for (int j = 0; j < block->phis()->length(); j++) {
HPhi* phi = block->phis()->at(j);
- if (phi->CannotBeEliminated() && MarkLive(NULL, phi)) {
- worklist.Add(phi, zone());
- }
+ if (phi->CannotBeEliminated()) MarkLive(phi, &worklist);
}
}
- // Transitively mark all inputs of live instructions live.
- while (!worklist.is_empty()) {
- HValue* instr = worklist.RemoveLast();
- for (int i = 0; i < instr->OperandCount(); ++i) {
- if (MarkLive(instr, instr->OperandAt(i))) {
- worklist.Add(instr->OperandAt(i), zone());
- }
- }
- }
+ ASSERT(worklist.is_empty()); // Should have processed everything.
}
@@ -93,10 +97,8 @@ void HDeadCodeEliminationPhase::RemoveDeadInstructions() {
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
if (!instr->CheckFlag(HValue::kIsLive)) {
- // Instruction has not been marked live; assume it is dead and remove.
- // TODO(titzer): we don't remove constants because some special ones
- // might be used by later phases and are assumed to be in the graph
- if (!instr->IsConstant()) instr->DeleteAndReplaceWith(NULL);
+ // Instruction has not been marked live, so remove it.
+ instr->DeleteAndReplaceWith(NULL);
} else {
// Clear the liveness flag to leave the graph clean for the next DCE.
instr->ClearFlag(HValue::kIsLive);
diff --git a/chromium/v8/src/hydrogen-dce.h b/chromium/v8/src/hydrogen-dce.h
index 19749f279a2..2d73b380e40 100644
--- a/chromium/v8/src/hydrogen-dce.h
+++ b/chromium/v8/src/hydrogen-dce.h
@@ -45,7 +45,8 @@ class HDeadCodeEliminationPhase : public HPhase {
}
private:
- bool MarkLive(HValue* ref, HValue* instr);
+ void MarkLive(HValue* instr, ZoneList<HValue*>* worklist);
+ void PrintLive(HValue* ref, HValue* instr);
void MarkLiveInstructions();
void RemoveDeadInstructions();
};
diff --git a/chromium/v8/src/hydrogen-deoptimizing-mark.cc b/chromium/v8/src/hydrogen-deoptimizing-mark.cc
deleted file mode 100644
index 626848e012f..00000000000
--- a/chromium/v8/src/hydrogen-deoptimizing-mark.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-deoptimizing-mark.h"
-
-namespace v8 {
-namespace internal {
-
-void HPropagateDeoptimizingMarkPhase::MarkAsDeoptimizing() {
- HBasicBlock* block = graph()->entry_block();
- ZoneList<HBasicBlock*> stack(graph()->blocks()->length(), zone());
- while (block != NULL) {
- const ZoneList<HBasicBlock*>* dominated_blocks(block->dominated_blocks());
- if (!dominated_blocks->is_empty()) {
- if (block->IsDeoptimizing()) {
- for (int i = 0; i < dominated_blocks->length(); ++i) {
- dominated_blocks->at(i)->MarkAsDeoptimizing();
- }
- }
- for (int i = 1; i < dominated_blocks->length(); ++i) {
- stack.Add(dominated_blocks->at(i), zone());
- }
- block = dominated_blocks->at(0);
- } else if (!stack.is_empty()) {
- // Pop next block from stack.
- block = stack.RemoveLast();
- } else {
- // All blocks processed.
- block = NULL;
- }
- }
-}
-
-
-void HPropagateDeoptimizingMarkPhase::NullifyUnreachableInstructions() {
- if (!FLAG_unreachable_code_elimination) return;
- for (int i = 0; i < graph()->blocks()->length(); ++i) {
- HBasicBlock* block = graph()->blocks()->at(i);
- bool nullify = false;
- const ZoneList<HBasicBlock*>* predecessors = block->predecessors();
- int predecessors_length = predecessors->length();
- bool all_predecessors_deoptimizing = (predecessors_length > 0);
- for (int j = 0; j < predecessors_length; ++j) {
- if (!predecessors->at(j)->IsDeoptimizing()) {
- all_predecessors_deoptimizing = false;
- break;
- }
- }
- if (all_predecessors_deoptimizing) nullify = true;
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- // Leave the basic structure of the graph intact.
- if (instr->IsBlockEntry()) continue;
- if (instr->IsControlInstruction()) continue;
- if (instr->IsSimulate()) continue;
- if (instr->IsEnterInlined()) continue;
- if (instr->IsLeaveInlined()) continue;
- if (nullify) {
- HInstruction* last_dummy = NULL;
- for (int j = 0; j < instr->OperandCount(); ++j) {
- HValue* operand = instr->OperandAt(j);
- // Insert an HDummyUse for each operand, unless the operand
- // is an HDummyUse itself. If it's even from the same block,
- // remember it as a potential replacement for the instruction.
- if (operand->IsDummyUse()) {
- if (operand->block() == instr->block() &&
- last_dummy == NULL) {
- last_dummy = HInstruction::cast(operand);
- }
- continue;
- }
- if (operand->IsControlInstruction()) {
- // Inserting a dummy use for a value that's not defined anywhere
- // will fail. Some instructions define fake inputs on such
- // values as control flow dependencies.
- continue;
- }
- HDummyUse* dummy = new(graph()->zone()) HDummyUse(operand);
- dummy->InsertBefore(instr);
- last_dummy = dummy;
- }
- if (last_dummy == NULL) last_dummy = graph()->GetConstant1();
- instr->DeleteAndReplaceWith(last_dummy);
- continue;
- }
- if (instr->IsDeoptimize()) {
- ASSERT(block->IsDeoptimizing());
- nullify = true;
- }
- }
- }
-}
-
-
-void HPropagateDeoptimizingMarkPhase::Run() {
- // Skip this phase if there is nothing to be done anyway.
- if (!graph()->has_soft_deoptimize()) return;
- MarkAsDeoptimizing();
- NullifyUnreachableInstructions();
-}
-
-} } // namespace v8::internal
diff --git a/chromium/v8/src/hydrogen-environment-liveness.cc b/chromium/v8/src/hydrogen-environment-liveness.cc
index fad9755e5c7..d7501ac49e6 100644
--- a/chromium/v8/src/hydrogen-environment-liveness.cc
+++ b/chromium/v8/src/hydrogen-environment-liveness.cc
@@ -201,7 +201,7 @@ void HEnvironmentLivenessAnalysisPhase::Run() {
HBasicBlock* block = graph()->blocks()->at(block_id);
UpdateLivenessAtBlockEnd(block, &live);
- for (HInstruction* instr = block->last(); instr != NULL;
+ for (HInstruction* instr = block->end(); instr != NULL;
instr = instr->previous()) {
UpdateLivenessAtInstruction(instr, &live);
}
diff --git a/chromium/v8/src/hydrogen-escape-analysis.cc b/chromium/v8/src/hydrogen-escape-analysis.cc
index 997e4f9445f..10230199233 100644
--- a/chromium/v8/src/hydrogen-escape-analysis.cc
+++ b/chromium/v8/src/hydrogen-escape-analysis.cc
@@ -154,9 +154,8 @@ HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
HValue* value = state->map_value();
// TODO(mstarzinger): This will narrow a map check against a set of maps
// down to the first element in the set. Revisit and fix this.
- Handle<Map> map_object = mapcheck->map_set()->first();
- UniqueValueId map_id = mapcheck->map_unique_ids()->first();
- HCheckValue* check = HCheckValue::New(zone, NULL, value, map_object, map_id);
+ HCheckValue* check = HCheckValue::New(
+ zone, NULL, value, mapcheck->first_map(), false);
check->InsertBefore(mapcheck);
return check;
}
@@ -307,7 +306,7 @@ void HEscapeAnalysisPhase::PerformScalarReplacement() {
number_of_objects_++;
block_states_.Clear();
- // Perform actual analysis steps.
+ // Perform actual analysis step.
AnalyzeDataFlow(allocate);
cumulative_values_ += number_of_values_;
@@ -321,8 +320,13 @@ void HEscapeAnalysisPhase::Run() {
// TODO(mstarzinger): We disable escape analysis with OSR for now, because
// spill slots might be uninitialized. Needs investigation.
if (graph()->has_osr()) return;
- CollectCapturedValues();
- PerformScalarReplacement();
+ int max_fixpoint_iteration_count = FLAG_escape_analysis_iterations;
+ for (int i = 0; i < max_fixpoint_iteration_count; i++) {
+ CollectCapturedValues();
+ if (captured_.is_empty()) break;
+ PerformScalarReplacement();
+ captured_.Clear();
+ }
}
diff --git a/chromium/v8/src/hydrogen-flow-engine.h b/chromium/v8/src/hydrogen-flow-engine.h
new file mode 100644
index 00000000000..4e1275546f6
--- /dev/null
+++ b/chromium/v8/src/hydrogen-flow-engine.h
@@ -0,0 +1,242 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_FLOW_ENGINE_H_
+#define V8_HYDROGEN_FLOW_ENGINE_H_
+
+#include "hydrogen.h"
+#include "hydrogen-instructions.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// An example implementation of effects that doesn't collect anything.
+class NoEffects : public ZoneObject {
+ public:
+ explicit NoEffects(Zone* zone) { }
+
+ inline bool Disabled() {
+ return true; // Nothing to do.
+ }
+ template <class State>
+ inline void Apply(State* state) {
+ // do nothing.
+ }
+ inline void Process(HInstruction* value, Zone* zone) {
+ // do nothing.
+ }
+ inline void Union(NoEffects* other, Zone* zone) {
+ // do nothing.
+ }
+};
+
+
+// An example implementation of state that doesn't track anything.
+class NoState {
+ public:
+ inline NoState* Copy(HBasicBlock* succ, Zone* zone) {
+ return this;
+ }
+ inline NoState* Process(HInstruction* value, Zone* zone) {
+ return this;
+ }
+ inline NoState* Merge(HBasicBlock* succ, NoState* other, Zone* zone) {
+ return this;
+ }
+};
+
+
+// This class implements an engine that can drive flow-sensitive analyses
+// over a graph of basic blocks, either one block at a time (local analysis)
+// or over the entire graph (global analysis). The flow engine is parameterized
+// by the type of the state and the effects collected while walking over the
+// graph.
+//
+// The "State" collects which facts are known while passing over instructions
+// in control flow order, and the "Effects" collect summary information about
+// which facts could be invalidated on other control flow paths. The effects
+// are necessary to correctly handle loops in the control flow graph without
+// doing a fixed-point iteration. Thus the flow engine is guaranteed to visit
+// each block at most twice; once for state, and optionally once for effects.
+//
+// The flow engine requires the State and Effects classes to implement methods
+// like the example NoState and NoEffects above. It's not necessary to provide
+// an effects implementation for local analysis.
+template <class State, class Effects>
+class HFlowEngine {
+ public:
+ HFlowEngine(HGraph* graph, Zone* zone)
+ : graph_(graph),
+ zone_(zone),
+#if DEBUG
+ pred_counts_(graph->blocks()->length(), zone),
+#endif
+ block_states_(graph->blocks()->length(), zone),
+ loop_effects_(graph->blocks()->length(), zone) {
+ loop_effects_.AddBlock(NULL, graph_->blocks()->length(), zone);
+ }
+
+ // Local analysis. Iterates over the instructions in the given block.
+ State* AnalyzeOneBlock(HBasicBlock* block, State* state) {
+ // Go through all instructions of the current block, updating the state.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ state = state->Process(it.Current(), zone_);
+ }
+ return state;
+ }
+
+ // Global analysis. Iterates over all blocks that are dominated by the given
+ // block, starting with the initial state. Computes effects for nested loops.
+ void AnalyzeDominatedBlocks(HBasicBlock* root, State* initial) {
+ InitializeStates();
+ SetStateAt(root, initial);
+
+ // Iterate all dominated blocks starting from the given start block.
+ for (int i = root->block_id(); i < graph_->blocks()->length(); i++) {
+ HBasicBlock* block = graph_->blocks()->at(i);
+
+ // Skip blocks not dominated by the root node.
+ if (SkipNonDominatedBlock(root, block)) continue;
+ State* state = StateAt(block);
+
+ if (block->IsLoopHeader()) {
+ // Apply loop effects before analyzing loop body.
+ ComputeLoopEffects(block)->Apply(state);
+ } else {
+ // Must have visited all predecessors before this block.
+ CheckPredecessorCount(block);
+ }
+
+ // Go through all instructions of the current block, updating the state.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ state = state->Process(it.Current(), zone_);
+ }
+
+ // Propagate the block state forward to all successor blocks.
+ int max = block->end()->SuccessorCount();
+ for (int i = 0; i < max; i++) {
+ HBasicBlock* succ = block->end()->SuccessorAt(i);
+ IncrementPredecessorCount(succ);
+ if (StateAt(succ) == NULL) {
+ // This is the first state to reach the successor.
+ if (max == 1 && succ->predecessors()->length() == 1) {
+ // Optimization: successor can inherit this state.
+ SetStateAt(succ, state);
+ } else {
+ // Successor needs a copy of the state.
+ SetStateAt(succ, state->Copy(succ, zone_));
+ }
+ } else {
+ // Merge the current state with the state already at the successor.
+ SetStateAt(succ, state->Merge(succ, StateAt(succ), zone_));
+ }
+ }
+ }
+ }
+
+ private:
+ // Computes and caches the loop effects for the loop which has the given
+ // block as its loop header.
+ Effects* ComputeLoopEffects(HBasicBlock* block) {
+ ASSERT(block->IsLoopHeader());
+ Effects* effects = loop_effects_[block->block_id()];
+ if (effects != NULL) return effects; // Already analyzed this loop.
+
+ effects = new(zone_) Effects(zone_);
+ loop_effects_[block->block_id()] = effects;
+ if (effects->Disabled()) return effects; // No effects for this analysis.
+
+ HLoopInformation* loop = block->loop_information();
+ int end = loop->GetLastBackEdge()->block_id();
+ // Process the blocks between the header and the end.
+ for (int i = block->block_id(); i <= end; i++) {
+ HBasicBlock* member = graph_->blocks()->at(i);
+ if (i != block->block_id() && member->IsLoopHeader()) {
+ // Recursively compute and cache the effects of the nested loop.
+ ASSERT(member->loop_information()->parent_loop() == loop);
+ Effects* nested = ComputeLoopEffects(member);
+ effects->Union(nested, zone_);
+ // Skip the nested loop's blocks.
+ i = member->loop_information()->GetLastBackEdge()->block_id();
+ } else {
+ // Process all the effects of the block.
+ ASSERT(member->current_loop() == loop);
+ for (HInstructionIterator it(member); !it.Done(); it.Advance()) {
+ effects->Process(it.Current(), zone_);
+ }
+ }
+ }
+ return effects;
+ }
+
+ inline bool SkipNonDominatedBlock(HBasicBlock* root, HBasicBlock* other) {
+ if (root->block_id() == 0) return false; // Visit the whole graph.
+ if (root == other) return false; // Always visit the root.
+ return !root->Dominates(other); // Only visit dominated blocks.
+ }
+
+ inline State* StateAt(HBasicBlock* block) {
+ return block_states_.at(block->block_id());
+ }
+
+ inline void SetStateAt(HBasicBlock* block, State* state) {
+ block_states_.Set(block->block_id(), state);
+ }
+
+ inline void InitializeStates() {
+#if DEBUG
+ pred_counts_.Rewind(0);
+ pred_counts_.AddBlock(0, graph_->blocks()->length(), zone_);
+#endif
+ block_states_.Rewind(0);
+ block_states_.AddBlock(NULL, graph_->blocks()->length(), zone_);
+ }
+
+ inline void CheckPredecessorCount(HBasicBlock* block) {
+ ASSERT(block->predecessors()->length() == pred_counts_[block->block_id()]);
+ }
+
+ inline void IncrementPredecessorCount(HBasicBlock* block) {
+#if DEBUG
+ pred_counts_[block->block_id()]++;
+#endif
+ }
+
+ HGraph* graph_; // The hydrogen graph.
+ Zone* zone_; // Temporary zone.
+#if DEBUG
+ ZoneList<int> pred_counts_; // Finished predecessors (by block id).
+#endif
+ ZoneList<State*> block_states_; // Block states (by block id).
+ ZoneList<Effects*> loop_effects_; // Loop effects (by block id).
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_FLOW_ENGINE_H_
diff --git a/chromium/v8/src/hydrogen-gvn.cc b/chromium/v8/src/hydrogen-gvn.cc
index 9a02a1dcf4b..7553abe206c 100644
--- a/chromium/v8/src/hydrogen-gvn.cc
+++ b/chromium/v8/src/hydrogen-gvn.cc
@@ -396,30 +396,30 @@ void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
// Compute side effects for the block.
HBasicBlock* block = graph()->blocks()->at(i);
- int id = block->block_id();
GVNFlagSet side_effects;
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- side_effects.Add(instr->ChangesFlags());
- if (instr->IsDeoptimize()) {
- block_side_effects_[id].RemoveAll();
- side_effects.RemoveAll();
- break;
+ if (block->IsReachable() && !block->IsDeoptimizing()) {
+ int id = block->block_id();
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ side_effects.Add(instr->ChangesFlags());
}
- }
- block_side_effects_[id].Add(side_effects);
+ block_side_effects_[id].Add(side_effects);
- // Loop headers are part of their loop.
- if (block->IsLoopHeader()) {
- loop_side_effects_[id].Add(side_effects);
- }
+ // Loop headers are part of their loop.
+ if (block->IsLoopHeader()) {
+ loop_side_effects_[id].Add(side_effects);
+ }
- // Propagate loop side effects upwards.
- if (block->HasParentLoopHeader()) {
- int header_id = block->parent_loop_header()->block_id();
- loop_side_effects_[header_id].Add(block->IsLoopHeader()
- ? loop_side_effects_[id]
- : side_effects);
+ // Propagate loop side effects upwards.
+ if (block->HasParentLoopHeader()) {
+ HBasicBlock* with_parent = block;
+ if (block->IsLoopHeader()) side_effects = loop_side_effects_[id];
+ do {
+ HBasicBlock* parent_block = with_parent->parent_loop_header();
+ loop_side_effects_[parent_block->block_id()].Add(side_effects);
+ with_parent = parent_block;
+ } while (with_parent->HasParentLoopHeader());
+ }
}
}
}
@@ -436,7 +436,7 @@ SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
uint32_t set_depends_on = 0;
uint32_t set_changes = 0;
for (int bit = 0; bit < kLastFlag; ++bit) {
- if ((flags.ToIntegral() & (1 << bit)) != 0) {
+ if (flags.Contains(static_cast<GVNFlag>(bit))) {
if (bit % 2 == 0) {
set_changes++;
} else {
@@ -453,7 +453,7 @@ SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
offset += OS::SNPrintF(buffer + offset, "changes all except [");
}
for (int bit = 0; bit < kLastFlag; ++bit) {
- if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_changes) {
+ if (flags.Contains(static_cast<GVNFlag>(bit)) == positive_changes) {
switch (static_cast<GVNFlag>(bit)) {
#define DECLARE_FLAG(type) \
case kChanges##type: \
@@ -482,7 +482,7 @@ GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
offset += OS::SNPrintF(buffer + offset, "depends on all except [");
}
for (int bit = 0; bit < kLastFlag; ++bit) {
- if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_depends_on) {
+ if (flags.Contains(static_cast<GVNFlag>(bit)) == positive_depends_on) {
switch (static_cast<GVNFlag>(bit)) {
#define DECLARE_FLAG(type) \
case kDependsOn##type: \
@@ -570,7 +570,8 @@ void HGlobalValueNumberingPhase::ProcessLoopBlock(
}
if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
- TRACE_GVN_1("Hoisting loop invariant instruction %d\n", instr->id());
+ TRACE_GVN_2("Hoisting loop invariant instruction i%d to block B%d\n",
+ instr->id(), pre_header->block_id());
// Move the instruction out of the loop.
instr->Unlink();
instr->InsertBefore(pre_header->end());
@@ -609,7 +610,8 @@ bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
HBasicBlock* loop_header) {
// If we've disabled code motion or we're in a block that unconditionally
// deoptimizes, don't move any instructions.
- return AllowCodeMotion() && !instr->block()->IsDeoptimizing();
+ return AllowCodeMotion() && !instr->block()->IsDeoptimizing() &&
+ instr->block()->IsReachable();
}
diff --git a/chromium/v8/src/hydrogen-instructions.cc b/chromium/v8/src/hydrogen-instructions.cc
index 833f00b1b91..d418954aad2 100644
--- a/chromium/v8/src/hydrogen-instructions.cc
+++ b/chromium/v8/src/hydrogen-instructions.cc
@@ -509,6 +509,17 @@ const char* HValue::Mnemonic() const {
}
+bool HValue::CanReplaceWithDummyUses() {
+ return FLAG_unreachable_code_elimination &&
+ !(block()->IsReachable() ||
+ IsBlockEntry() ||
+ IsControlInstruction() ||
+ IsSimulate() ||
+ IsEnterInlined() ||
+ IsLeaveInlined());
+}
+
+
bool HValue::IsInteger32Constant() {
return IsConstant() && HConstant::cast(this)->HasInteger32Value();
}
@@ -730,6 +741,10 @@ void HInstruction::InsertBefore(HInstruction* next) {
next_ = next;
previous_ = prev;
SetBlock(next->block());
+ if (position() == RelocInfo::kNoPosition &&
+ next->position() != RelocInfo::kNoPosition) {
+ set_position(next->position());
+ }
}
@@ -764,6 +779,10 @@ void HInstruction::InsertAfter(HInstruction* previous) {
if (block->last() == previous) {
block->set_last(this);
}
+ if (position() == RelocInfo::kNoPosition &&
+ previous->position() != RelocInfo::kNoPosition) {
+ set_position(previous->position());
+ }
}
@@ -928,6 +947,25 @@ void HBoundsCheck::InferRepresentation(HInferRepresentationPhase* h_infer) {
}
+Range* HBoundsCheck::InferRange(Zone* zone) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32() && length()->HasRange()) {
+ int upper = length()->range()->upper() - (allow_equality() ? 0 : 1);
+ int lower = 0;
+
+ Range* result = new(zone) Range(lower, upper);
+ if (index()->HasRange()) {
+ result->Intersect(index()->range());
+ }
+
+ // In case of Smi representation, clamp result to Smi::kMaxValue.
+ if (r.IsSmi()) result->ClampToSmi();
+ return result;
+ }
+ return HValue::InferRange(zone);
+}
+
+
void HBoundsCheckBaseIndexInformation::PrintDataTo(StringStream* stream) {
stream->Add("base: ");
base_index()->PrintNameTo(stream);
@@ -973,6 +1011,9 @@ void HCallNewArray::PrintDataTo(StringStream* stream) {
void HCallRuntime::PrintDataTo(StringStream* stream) {
stream->Add("%o ", *name());
+ if (save_doubles() == kSaveFPRegs) {
+ stream->Add("[save doubles] ");
+ }
stream->Add("#%d", argument_count());
}
@@ -1050,9 +1091,24 @@ Representation HBranch::observed_input_representation(int index) {
}
+bool HBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ HValue* value = this->value();
+ if (value->EmitAtUses()) {
+ ASSERT(value->IsConstant());
+ ASSERT(!value->representation().IsDouble());
+ *block = HConstant::cast(value)->BooleanValue()
+ ? FirstSuccessor()
+ : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" (%p)", *map());
+ stream->Add(" (%p)", *map().handle());
HControlInstruction::PrintDataTo(stream);
}
@@ -1140,6 +1196,20 @@ void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
+bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (value()->representation().IsSpecialization()) {
+ if (compares_number_type()) {
+ *block = FirstSuccessor();
+ } else {
+ *block = SecondSuccessor();
+ }
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
void HCheckMapValue::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
@@ -1211,6 +1281,26 @@ HValue* HBitwise::Canonicalize() {
}
+Representation HAdd::RepresentationFromInputs() {
+ Representation left_rep = left()->representation();
+ if (left_rep.IsExternal()) {
+ return Representation::External();
+ }
+ return HArithmeticBinaryOperation::RepresentationFromInputs();
+}
+
+
+Representation HAdd::RequiredInputRepresentation(int index) {
+ if (index == 2) {
+ Representation left_rep = left()->representation();
+ if (left_rep.IsExternal()) {
+ return Representation::Integer32();
+ }
+ }
+ return HArithmeticBinaryOperation::RequiredInputRepresentation(index);
+}
+
+
static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
return arg1->representation().IsSpecialization() &&
arg2->EqualsInteger32Constant(identity);
@@ -1244,6 +1334,16 @@ HValue* HMul::Canonicalize() {
}
+bool HMul::MulMinusOne() {
+ if (left()->EqualsInteger32Constant(-1) ||
+ right()->EqualsInteger32Constant(-1)) {
+ return true;
+ }
+
+ return false;
+}
+
+
HValue* HMod::Canonicalize() {
return this;
}
@@ -1274,6 +1374,23 @@ void HTypeof::PrintDataTo(StringStream* stream) {
}
+HInstruction* HForceRepresentation::New(Zone* zone, HValue* context,
+ HValue* value, Representation required_representation) {
+ if (FLAG_fold_constants && value->IsConstant()) {
+ HConstant* c = HConstant::cast(value);
+ if (c->HasNumberValue()) {
+ double double_res = c->DoubleValue();
+ if (TypeInfo::IsInt32Double(double_res)) {
+ return HConstant::New(zone, context,
+ static_cast<int32_t>(double_res),
+ required_representation);
+ }
+ }
+ }
+ return new(zone) HForceRepresentation(value, required_representation);
+}
+
+
void HForceRepresentation::PrintDataTo(StringStream* stream) {
stream->Add("%s ", representation().Mnemonic());
value()->PrintNameTo(stream);
@@ -1324,7 +1441,6 @@ HValue* HUnaryMathOperation::Canonicalize() {
if (op() == kMathFloor) {
HValue* val = value();
- if (val->IsChange()) val = HChange::cast(val)->value();
if (val->IsDiv() && (val->UseCount() == 1)) {
HDiv* hdiv = HDiv::cast(val);
HValue* left = hdiv->left();
@@ -1363,17 +1479,8 @@ HValue* HUnaryMathOperation::Canonicalize() {
}
HMathFloorOfDiv* instr =
HMathFloorOfDiv::New(block()->zone(), context(), new_left, new_right);
- // Replace this HMathFloor instruction by the new HMathFloorOfDiv.
instr->InsertBefore(this);
- ReplaceAllUsesWith(instr);
- Kill();
- // We know the division had no other uses than this HMathFloor. Delete it.
- // Dead code elimination will deal with |left| and |right| if
- // appropriate.
- hdiv->DeleteAndReplaceWith(NULL);
-
- // Return NULL to remove this instruction from the graph.
- return NULL;
+ return instr;
}
}
return this;
@@ -1438,11 +1545,9 @@ void HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
HStoreNamedField* store = HStoreNamedField::cast(dominator);
if (!store->has_transition() || store->object() != value()) return;
HConstant* transition = HConstant::cast(store->transition());
- for (int i = 0; i < map_set()->length(); i++) {
- if (transition->UniqueValueIdsMatch(map_unique_ids_.at(i))) {
- DeleteAndReplaceWith(NULL);
- return;
- }
+ if (map_set_.Contains(transition->GetUnique())) {
+ DeleteAndReplaceWith(NULL);
+ return;
}
}
}
@@ -1450,9 +1555,9 @@ void HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
void HCheckMaps::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" [%p", *map_set()->first());
- for (int i = 1; i < map_set()->length(); ++i) {
- stream->Add(",%p", *map_set()->at(i));
+ stream->Add(" [%p", *map_set_.at(0).handle());
+ for (int i = 1; i < map_set_.size(); ++i) {
+ stream->Add(",%p", *map_set_.at(i).handle());
}
stream->Add("]%s", CanOmitMapChecks() ? "(omitted)" : "");
}
@@ -1461,13 +1566,13 @@ void HCheckMaps::PrintDataTo(StringStream* stream) {
void HCheckValue::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
- object()->ShortPrint(stream);
+ object().handle()->ShortPrint(stream);
}
HValue* HCheckValue::Canonicalize() {
return (value()->IsConstant() &&
- HConstant::cast(value())->UniqueValueIdsMatch(object_unique_id_))
+ HConstant::cast(value())->GetUnique() == object_)
? NULL
: this;
}
@@ -1562,6 +1667,11 @@ Range* HConstant::InferRange(Zone* zone) {
}
+int HPhi::position() const {
+ return block()->first()->position();
+}
+
+
Range* HPhi::InferRange(Zone* zone) {
Representation r = representation();
if (r.IsSmiOrInteger32()) {
@@ -1631,10 +1741,13 @@ Range* HMul::InferRange(Zone* zone) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
- if (!res->MulAndCheckOverflow(r, b)) {
- // Clearing the kCanOverflow flag when kAllUsesAreTruncatingToInt32
- // would be wrong, because truncated integer multiplication is too
- // precise and therefore not the same as converting to Double and back.
+ if (!res->MulAndCheckOverflow(r, b) ||
+ (((r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
+ (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) &&
+ MulMinusOne())) {
+ // Truncated int multiplication is too precise and therefore not the
+ // same as converting to Double and back.
+ // Handle truncated integer multiplication by -1 special.
ClearFlag(kCanOverflow);
}
res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
@@ -1656,7 +1769,10 @@ Range* HDiv::InferRange(Zone* zone) {
result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
(a->CanBeMinusZero() ||
(a->CanBeZero() && b->CanBeNegative())));
- if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+ if (!a->Includes(kMinInt) ||
+ !b->Includes(-1) ||
+ CheckFlag(kAllUsesTruncatingToInt32)) {
+ // It is safe to clear kCanOverflow when kAllUsesTruncatingToInt32.
ClearFlag(HValue::kCanOverflow);
}
@@ -2334,23 +2450,38 @@ void HSimulate::ReplayEnvironment(HEnvironment* env) {
}
+static void ReplayEnvironmentNested(const ZoneList<HValue*>* values,
+ HCapturedObject* other) {
+ for (int i = 0; i < values->length(); ++i) {
+ HValue* value = values->at(i);
+ if (value->IsCapturedObject()) {
+ if (HCapturedObject::cast(value)->capture_id() == other->capture_id()) {
+ values->at(i) = other;
+ } else {
+ ReplayEnvironmentNested(HCapturedObject::cast(value)->values(), other);
+ }
+ }
+ }
+}
+
+
// Replay captured objects by replacing all captured objects with the
// same capture id in the current and all outer environments.
void HCapturedObject::ReplayEnvironment(HEnvironment* env) {
ASSERT(env != NULL);
while (env != NULL) {
- for (int i = 0; i < env->length(); ++i) {
- HValue* value = env->values()->at(i);
- if (value->IsCapturedObject() &&
- HCapturedObject::cast(value)->capture_id() == this->capture_id()) {
- env->SetValueAt(i, this);
- }
- }
+ ReplayEnvironmentNested(env->values(), this);
env = env->outer();
}
}
+void HCapturedObject::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d ", capture_id());
+ HDematerializedObject::PrintDataTo(stream);
+}
+
+
void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
Zone* zone) {
ASSERT(return_target->IsInlineReturnTarget());
@@ -2372,8 +2503,7 @@ static bool IsInteger32(double value) {
HConstant::HConstant(Handle<Object> handle, Representation r)
: HTemplateInstruction<0>(HType::TypeFromValue(handle)),
- handle_(handle),
- unique_id_(),
+ object_(Unique<Object>::CreateUninitialized(handle)),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
@@ -2382,29 +2512,29 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
is_not_in_new_space_(true),
is_cell_(false),
boolean_value_(handle->BooleanValue()) {
- if (handle_->IsHeapObject()) {
+ if (handle->IsHeapObject()) {
Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap();
is_not_in_new_space_ = !heap->InNewSpace(*handle);
}
- if (handle_->IsNumber()) {
- double n = handle_->Number();
+ if (handle->IsNumber()) {
+ double n = handle->Number();
has_int32_value_ = IsInteger32(n);
int32_value_ = DoubleToInt32(n);
has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
double_value_ = n;
has_double_value_ = true;
+ // TODO(titzer): if this heap number is new space, tenure a new one.
} else {
- is_internalized_string_ = handle_->IsInternalizedString();
+ is_internalized_string_ = handle->IsInternalizedString();
}
- is_cell_ = !handle_.is_null() &&
- (handle_->IsCell() || handle_->IsPropertyCell());
+ is_cell_ = !handle.is_null() &&
+ (handle->IsCell() || handle->IsPropertyCell());
Initialize(r);
}
-HConstant::HConstant(Handle<Object> handle,
- UniqueValueId unique_id,
+HConstant::HConstant(Unique<Object> unique,
Representation r,
HType type,
bool is_internalize_string,
@@ -2412,8 +2542,7 @@ HConstant::HConstant(Handle<Object> handle,
bool is_cell,
bool boolean_value)
: HTemplateInstruction<0>(type),
- handle_(handle),
- unique_id_(unique_id),
+ object_(unique),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
@@ -2422,36 +2551,17 @@ HConstant::HConstant(Handle<Object> handle,
is_not_in_new_space_(is_not_in_new_space),
is_cell_(is_cell),
boolean_value_(boolean_value) {
- ASSERT(!handle.is_null());
+ ASSERT(!unique.handle().is_null());
ASSERT(!type.IsTaggedNumber());
Initialize(r);
}
-HConstant::HConstant(Handle<Map> handle,
- UniqueValueId unique_id)
- : HTemplateInstruction<0>(HType::Tagged()),
- handle_(handle),
- unique_id_(unique_id),
- has_smi_value_(false),
- has_int32_value_(false),
- has_double_value_(false),
- has_external_reference_value_(false),
- is_internalized_string_(false),
- is_not_in_new_space_(true),
- is_cell_(false),
- boolean_value_(false) {
- ASSERT(!handle.is_null());
- Initialize(Representation::Tagged());
-}
-
-
HConstant::HConstant(int32_t integer_value,
Representation r,
bool is_not_in_new_space,
- Handle<Object> optional_handle)
- : handle_(optional_handle),
- unique_id_(),
+ Unique<Object> object)
+ : object_(object),
has_smi_value_(Smi::IsValid(integer_value)),
has_int32_value_(true),
has_double_value_(true),
@@ -2470,9 +2580,8 @@ HConstant::HConstant(int32_t integer_value,
HConstant::HConstant(double double_value,
Representation r,
bool is_not_in_new_space,
- Handle<Object> optional_handle)
- : handle_(optional_handle),
- unique_id_(),
+ Unique<Object> object)
+ : object_(object),
has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
has_external_reference_value_(false),
@@ -2490,6 +2599,7 @@ HConstant::HConstant(double double_value,
HConstant::HConstant(ExternalReference reference)
: HTemplateInstruction<0>(HType::None()),
+ object_(Unique<Object>(Handle<Object>::null())),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
@@ -2503,14 +2613,6 @@ HConstant::HConstant(ExternalReference reference)
}
-static void PrepareConstant(Handle<Object> object) {
- if (!object->IsJSObject()) return;
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- if (!js_object->map()->is_deprecated()) return;
- JSObject::TryMigrateInstance(js_object);
-}
-
-
void HConstant::Initialize(Representation r) {
if (r.IsNone()) {
if (has_smi_value_ && SmiValuesAre31Bits()) {
@@ -2522,7 +2624,14 @@ void HConstant::Initialize(Representation r) {
} else if (has_external_reference_value_) {
r = Representation::External();
} else {
- PrepareConstant(handle_);
+ Handle<Object> object = object_.handle();
+ if (object->IsJSObject()) {
+ // Try to eagerly migrate JSObjects that have deprecated maps.
+ Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+ if (js_object->map()->is_deprecated()) {
+ JSObject::TryMigrateInstance(js_object);
+ }
+ }
r = Representation::Tagged();
}
}
@@ -2533,9 +2642,12 @@ void HConstant::Initialize(Representation r) {
bool HConstant::EmitAtUses() {
ASSERT(IsLinked());
- if (block()->graph()->has_osr()) {
- return block()->graph()->IsStandardConstant(this);
+ if (block()->graph()->has_osr() &&
+ block()->graph()->IsStandardConstant(this)) {
+ // TODO(titzer): this seems like a hack that should be fixed by custom OSR.
+ return true;
}
+ if (UseCount() == 0) return true;
if (IsCell()) return false;
if (representation().IsDouble()) return false;
return true;
@@ -2548,17 +2660,16 @@ HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (r.IsDouble() && !has_double_value_) return NULL;
if (r.IsExternal() && !has_external_reference_value_) return NULL;
if (has_int32_value_) {
- return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, handle_);
+ return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, object_);
}
if (has_double_value_) {
- return new(zone) HConstant(double_value_, r, is_not_in_new_space_, handle_);
+ return new(zone) HConstant(double_value_, r, is_not_in_new_space_, object_);
}
if (has_external_reference_value_) {
return new(zone) HConstant(external_reference_value_);
}
- ASSERT(!handle_.is_null());
- return new(zone) HConstant(handle_,
- unique_id_,
+ ASSERT(!object_.handle().is_null());
+ return new(zone) HConstant(object_,
r,
type_,
is_internalized_string_,
@@ -2574,16 +2685,12 @@ Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) {
res = new(zone) HConstant(int32_value_,
Representation::Integer32(),
is_not_in_new_space_,
- handle_);
+ object_);
} else if (has_double_value_) {
res = new(zone) HConstant(DoubleToInt32(double_value_),
Representation::Integer32(),
is_not_in_new_space_,
- handle_);
- } else {
- ASSERT(!HasNumberValue());
- Maybe<HConstant*> number = CopyToTruncatedNumber(zone);
- if (number.has_value) return number.value->CopyToTruncatedInt32(zone);
+ object_);
}
return Maybe<HConstant*>(res != NULL, res);
}
@@ -2615,6 +2722,9 @@ void HConstant::PrintDataTo(StringStream* stream) {
} else {
handle(Isolate::Current())->ShortPrint(stream);
}
+ if (!is_not_in_new_space_) {
+ stream->Add("[new space] ");
+ }
}
@@ -2631,6 +2741,12 @@ void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
+
+ if (representation().IsSmi() && HasNonSmiUse()) {
+ UpdateRepresentation(
+ Representation::Integer32(), h_infer, "use requirements");
+ }
+
if (observed_output_representation_.IsNone()) {
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
@@ -2638,11 +2754,6 @@ void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) {
new_rep = RepresentationFromOutput();
UpdateRepresentation(new_rep, h_infer, "output");
}
-
- if (representation().IsSmi() && HasNonSmiUse()) {
- UpdateRepresentation(
- Representation::Integer32(), h_infer, "use requirements");
- }
}
@@ -2669,7 +2780,7 @@ bool HBinaryOperation::IgnoreObservedOutputRepresentation(
return ((current_rep.IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) ||
(current_rep.IsSmi() && CheckUsesForFlag(kTruncatingToSmi))) &&
// Mul in Integer32 mode would be too precise.
- !this->IsMul();
+ (!this->IsMul() || HMul::cast(this)->MulMinusOne());
}
@@ -2809,6 +2920,18 @@ Range* HShl::InferRange(Zone* zone) {
Range* HLoadNamedField::InferRange(Zone* zone) {
+ if (access().representation().IsInteger8()) {
+ return new(zone) Range(kMinInt8, kMaxInt8);
+ }
+ if (access().representation().IsUInteger8()) {
+ return new(zone) Range(kMinUInt8, kMaxUInt8);
+ }
+ if (access().representation().IsInteger16()) {
+ return new(zone) Range(kMinInt16, kMaxInt16);
+ }
+ if (access().representation().IsUInteger16()) {
+ return new(zone) Range(kMinUInt16, kMaxUInt16);
+ }
if (access().IsStringLength()) {
return new(zone) Range(0, String::kMaxLength);
}
@@ -2818,16 +2941,15 @@ Range* HLoadNamedField::InferRange(Zone* zone) {
Range* HLoadKeyed::InferRange(Zone* zone) {
switch (elements_kind()) {
- case EXTERNAL_PIXEL_ELEMENTS:
- return new(zone) Range(0, 255);
case EXTERNAL_BYTE_ELEMENTS:
- return new(zone) Range(-128, 127);
+ return new(zone) Range(kMinInt8, kMaxInt8);
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return new(zone) Range(0, 255);
+ case EXTERNAL_PIXEL_ELEMENTS:
+ return new(zone) Range(kMinUInt8, kMaxUInt8);
case EXTERNAL_SHORT_ELEMENTS:
- return new(zone) Range(-32768, 32767);
+ return new(zone) Range(kMinInt16, kMaxInt16);
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return new(zone) Range(0, 65535);
+ return new(zone) Range(kMinUInt16, kMaxUInt16);
default:
return HValue::InferRange(zone);
}
@@ -2866,18 +2988,44 @@ void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
}
-void HCompareHoleAndBranch::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- HControlInstruction::PrintDataTo(stream);
+bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (left()->IsConstant() && right()->IsConstant()) {
+ bool comparison_result =
+ HConstant::cast(left())->Equals(HConstant::cast(right()));
+ *block = comparison_result
+ ? FirstSuccessor()
+ : SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
}
void HCompareHoleAndBranch::InferRepresentation(
HInferRepresentationPhase* h_infer) {
- ChangeRepresentation(object()->representation());
+ ChangeRepresentation(value()->representation());
+}
+
+
+bool HCompareMinusZeroAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+ if (value()->representation().IsSmiOrInteger32()) {
+ // A Smi or Integer32 cannot contain minus zero.
+ *block = SecondSuccessor();
+ return true;
+ }
+ *block = NULL;
+ return false;
+}
+
+
+void HCompareMinusZeroAndBranch::InferRepresentation(
+ HInferRepresentationPhase* h_infer) {
+ ChangeRepresentation(value()->representation());
}
+
void HGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", SuccessorAt(0)->block_id());
}
@@ -2944,22 +3092,17 @@ HCheckMaps* HCheckMaps::New(Zone* zone,
if (map->CanOmitMapChecks() &&
value->IsConstant() &&
HConstant::cast(value)->HasMap(map)) {
- check_map->omit(info);
+ // TODO(titzer): collect dependent map checks into a list.
+ check_map->omit_ = true;
+ if (map->CanTransition()) {
+ map->AddDependentCompilationInfo(
+ DependentCode::kPrototypeCheckGroup, info);
+ }
}
return check_map;
}
-void HCheckMaps::FinalizeUniqueValueId() {
- if (!map_unique_ids_.is_empty()) return;
- Zone* zone = block()->zone();
- map_unique_ids_.Initialize(map_set_.length(), zone);
- for (int i = 0; i < map_set_.length(); i++) {
- map_unique_ids_.Add(UniqueValueId(map_set_.at(i)), zone);
- }
-}
-
-
void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
@@ -3155,19 +3298,19 @@ void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
- ElementsKind from_kind = original_map()->elements_kind();
- ElementsKind to_kind = transitioned_map()->elements_kind();
+ ElementsKind from_kind = original_map().handle()->elements_kind();
+ ElementsKind to_kind = transitioned_map().handle()->elements_kind();
stream->Add(" %p [%s] -> %p [%s]",
- *original_map(),
+ *original_map().handle(),
ElementsAccessor::ForKind(from_kind)->name(),
- *transitioned_map(),
+ *transitioned_map().handle(),
ElementsAccessor::ForKind(to_kind)->name());
if (IsSimpleMapChangeTransition(from_kind, to_kind)) stream->Add(" (simple)");
}
void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
- stream->Add("[%p]", *cell());
+ stream->Add("[%p]", *cell().handle());
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
}
@@ -3195,7 +3338,7 @@ void HInnerAllocatedObject::PrintDataTo(StringStream* stream) {
void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
- stream->Add("[%p] = ", *cell());
+ stream->Add("[%p] = ", *cell().handle());
value()->PrintNameTo(stream);
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
@@ -3318,7 +3461,7 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
}
}
- if (new_dominator_size > Page::kMaxNonCodeHeapObjectSize) {
+ if (new_dominator_size > isolate()->heap()->MaxRegularSpaceAllocationSize()) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
id(), Mnemonic(), dominator_allocate->id(),
@@ -3356,7 +3499,7 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HInnerAllocatedObject::New(zone,
context(),
dominator_allocate,
- dominator_size_constant,
+ dominator_size,
type());
dominated_allocate_instr->InsertBefore(this);
DeleteAndReplaceWith(dominated_allocate_instr);
@@ -3452,17 +3595,15 @@ void HAllocate::UpdateFreeSpaceFiller(int32_t free_space_size) {
void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
ASSERT(filler_free_space_size_ == NULL);
Zone* zone = block()->zone();
- int32_t dominator_size =
- HConstant::cast(dominating_allocate_->size())->GetInteger32Constant();
HInstruction* free_space_instr =
HInnerAllocatedObject::New(zone, context(), dominating_allocate_,
- dominator_size, type());
+ dominating_allocate_->size(), type());
free_space_instr->InsertBefore(this);
HConstant* filler_map = HConstant::New(
zone,
context(),
- isolate()->factory()->free_space_map(),
- UniqueValueId::free_space_map(isolate()->heap()));
+ isolate()->factory()->free_space_map());
+ filler_map->FinalizeUniqueness(); // TODO(titzer): should be init'd a'ready
filler_map->InsertAfter(free_space_instr);
HInstruction* store_map = HStoreNamedField::New(zone, context(),
free_space_instr, HObjectAccess::ForMap(), filler_map);
@@ -3820,8 +3961,7 @@ HInstruction* HMathMinMax::New(
HInstruction* HMod::New(Zone* zone,
HValue* context,
HValue* left,
- HValue* right,
- Maybe<int> fixed_right_arg) {
+ HValue* right) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -3840,7 +3980,7 @@ HInstruction* HMod::New(Zone* zone,
}
}
}
- return new(zone) HMod(context, left, right, fixed_right_arg);
+ return new(zone) HMod(context, left, right);
}
@@ -3938,6 +4078,26 @@ HInstruction* HShr::New(
}
+HInstruction* HSeqStringGetChar::New(Zone* zone,
+ HValue* context,
+ String::Encoding encoding,
+ HValue* string,
+ HValue* index) {
+ if (FLAG_fold_constants && string->IsConstant() && index->IsConstant()) {
+ HConstant* c_string = HConstant::cast(string);
+ HConstant* c_index = HConstant::cast(index);
+ if (c_string->HasStringValue() && c_index->HasInteger32Value()) {
+ Handle<String> s = c_string->StringValue();
+ int32_t i = c_index->Integer32Value();
+ ASSERT_LE(0, i);
+ ASSERT_LT(i, s->length());
+ return H_CONSTANT_INT(s->Get(i));
+ }
+ }
+ return new(zone) HSeqStringGetChar(encoding, string, index);
+}
+
+
#undef H_CONSTANT_INT
#undef H_CONSTANT_DOUBLE
@@ -4011,7 +4171,7 @@ Representation HValue::RepresentationFromUseRequirements() {
Representation rep = Representation::None();
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
// Ignore the use requirement from never run code
- if (it.value()->block()->IsDeoptimizing()) continue;
+ if (it.value()->block()->IsUnreachable()) continue;
// We check for observed_input_representation elsewhere.
Representation use_rep =
@@ -4136,14 +4296,14 @@ HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset,
HObjectAccess HObjectAccess::ForField(Handle<Map> map,
LookupResult *lookup, Handle<String> name) {
- ASSERT(lookup->IsField() || lookup->IsTransitionToField(*map));
+ ASSERT(lookup->IsField() || lookup->IsTransitionToField());
int index;
Representation representation;
if (lookup->IsField()) {
index = lookup->GetLocalFieldIndexFromMap(*map);
representation = lookup->representation();
} else {
- Map* transition = lookup->GetTransitionMapFromMap(*map);
+ Map* transition = lookup->GetTransitionTarget();
int descriptor = transition->LastAdded();
index = transition->instance_descriptors()->GetFieldIndex(descriptor) -
map->inobject_properties();
diff --git a/chromium/v8/src/hydrogen-instructions.h b/chromium/v8/src/hydrogen-instructions.h
index 7c4c921a851..cf83928529c 100644
--- a/chromium/v8/src/hydrogen-instructions.h
+++ b/chromium/v8/src/hydrogen-instructions.h
@@ -36,6 +36,7 @@
#include "deoptimizer.h"
#include "small-pointer-list.h"
#include "string-stream.h"
+#include "unique.h"
#include "v8conversions.h"
#include "v8utils.h"
#include "zone.h"
@@ -63,6 +64,7 @@ class LChunkBuilder;
#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AbnormalExit) \
V(AccessArgumentsAt) \
V(Add) \
V(Allocate) \
@@ -98,6 +100,7 @@ class LChunkBuilder;
V(CompareNumericAndBranch) \
V(CompareHoleAndBranch) \
V(CompareGeneric) \
+ V(CompareMinusZeroAndBranch) \
V(CompareObjectEqAndBranch) \
V(CompareMap) \
V(Constant) \
@@ -124,11 +127,9 @@ class LChunkBuilder;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
- V(IsNumberAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -143,6 +144,7 @@ class LChunkBuilder;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
+ V(LoadRoot) \
V(MapEnumLength) \
V(MathFloorOfDiv) \
V(MathMinMax) \
@@ -153,11 +155,11 @@ class LChunkBuilder;
V(Parameter) \
V(Power) \
V(PushArgument) \
- V(Random) \
V(RegExpLiteral) \
V(Return) \
V(Ror) \
V(Sar) \
+ V(SeqStringGetChar) \
V(SeqStringSetChar) \
V(Shl) \
V(Shr) \
@@ -207,7 +209,8 @@ class LChunkBuilder;
V(GlobalVars) \
V(InobjectFields) \
V(OsrEntries) \
- V(ExternalMemory)
+ V(ExternalMemory) \
+ V(StringChars)
#define DECLARE_ABSTRACT_INSTRUCTION(type) \
@@ -305,64 +308,6 @@ class Range V8_FINAL : public ZoneObject {
};
-class UniqueValueId V8_FINAL {
- public:
- UniqueValueId() : raw_address_(NULL) { }
-
- explicit UniqueValueId(Handle<Object> handle) {
- ASSERT(!AllowHeapAllocation::IsAllowed());
- static const Address kEmptyHandleSentinel = reinterpret_cast<Address>(1);
- if (handle.is_null()) {
- raw_address_ = kEmptyHandleSentinel;
- } else {
- raw_address_ = reinterpret_cast<Address>(*handle);
- ASSERT_NE(kEmptyHandleSentinel, raw_address_);
- }
- ASSERT(IsInitialized());
- }
-
- bool IsInitialized() const { return raw_address_ != NULL; }
-
- bool operator==(const UniqueValueId& other) const {
- ASSERT(IsInitialized() && other.IsInitialized());
- return raw_address_ == other.raw_address_;
- }
-
- bool operator!=(const UniqueValueId& other) const {
- ASSERT(IsInitialized() && other.IsInitialized());
- return raw_address_ != other.raw_address_;
- }
-
- intptr_t Hashcode() const {
- ASSERT(IsInitialized());
- return reinterpret_cast<intptr_t>(raw_address_);
- }
-
-#define IMMOVABLE_UNIQUE_VALUE_ID(name) \
- static UniqueValueId name(Heap* heap) { return UniqueValueId(heap->name()); }
-
- IMMOVABLE_UNIQUE_VALUE_ID(free_space_map)
- IMMOVABLE_UNIQUE_VALUE_ID(minus_zero_value)
- IMMOVABLE_UNIQUE_VALUE_ID(nan_value)
- IMMOVABLE_UNIQUE_VALUE_ID(undefined_value)
- IMMOVABLE_UNIQUE_VALUE_ID(null_value)
- IMMOVABLE_UNIQUE_VALUE_ID(true_value)
- IMMOVABLE_UNIQUE_VALUE_ID(false_value)
- IMMOVABLE_UNIQUE_VALUE_ID(the_hole_value)
- IMMOVABLE_UNIQUE_VALUE_ID(empty_string)
-
-#undef IMMOVABLE_UNIQUE_VALUE_ID
-
- private:
- Address raw_address_;
-
- explicit UniqueValueId(Object* object) {
- raw_address_ = reinterpret_cast<Address>(object);
- ASSERT(IsInitialized());
- }
-};
-
-
class HType V8_FINAL {
public:
static HType None() { return HType(kNone); }
@@ -599,7 +544,7 @@ class DecompositionResult V8_FINAL BASE_EMBEDDED {
};
-typedef EnumSet<GVNFlag> GVNFlagSet;
+typedef EnumSet<GVNFlag, int64_t> GVNFlagSet;
class HValue : public ZoneObject {
@@ -695,6 +640,9 @@ class HValue : public ZoneObject {
flags_(0) {}
virtual ~HValue() {}
+ virtual int position() const { return RelocInfo::kNoPosition; }
+ virtual int operand_position(int index) const { return position(); }
+
HBasicBlock* block() const { return block_; }
void SetBlock(HBasicBlock* block);
int LoopWeight() const;
@@ -777,16 +725,24 @@ class HValue : public ZoneObject {
return index == kNoRedefinedOperand ? NULL : OperandAt(index);
}
+ bool CanReplaceWithDummyUses();
+
+ virtual int argument_delta() const { return 0; }
+
// A purely informative definition is an idef that will not emit code and
// should therefore be removed from the graph in the RestoreActualValues
// phase (so that live ranges will be shorter).
virtual bool IsPurelyInformativeDefinition() { return false; }
- // This method must always return the original HValue SSA definition
- // (regardless of any iDef of this value).
+ // This method must always return the original HValue SSA definition,
+ // regardless of any chain of iDefs of this value.
HValue* ActualValue() {
- int index = RedefinedOperandIndex();
- return index == kNoRedefinedOperand ? this : OperandAt(index);
+ HValue* value = this;
+ int index;
+ while ((index = value->RedefinedOperandIndex()) != kNoRedefinedOperand) {
+ value = value->OperandAt(index);
+ }
+ return value;
}
bool IsInteger32Constant();
@@ -815,6 +771,9 @@ class HValue : public ZoneObject {
void SetFlag(Flag f) { flags_ |= (1 << f); }
void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
+ void CopyFlag(Flag f, HValue* other) {
+ if (other->CheckFlag(f)) SetFlag(f);
+ }
// Returns true if the flag specified is set for all uses, false otherwise.
bool CheckUsesForFlag(Flag f) const;
@@ -898,7 +857,7 @@ class HValue : public ZoneObject {
virtual intptr_t Hashcode();
// Compute unique ids upfront that is safe wrt GC and concurrent compilation.
- virtual void FinalizeUniqueValueId() { }
+ virtual void FinalizeUniqueness() { }
// Printing support.
virtual void PrintTo(StringStream* stream) = 0;
@@ -1104,6 +1063,143 @@ class HValue : public ZoneObject {
return new(zone) I(p1, p2, p3, p4, p5); \
}
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \
+ static I* New(Zone* zone, HValue* context) { \
+ return new(zone) I(context); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(I, P1) \
+ static I* New(Zone* zone, HValue* context, P1 p1) { \
+ return new(zone) I(context, p1); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(I, P1, P2) \
+ static I* New(Zone* zone, HValue* context, P1 p1, P2 p2) { \
+ return new(zone) I(context, p1, p2); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(I, P1, P2, P3) \
+ static I* New(Zone* zone, HValue* context, P1 p1, P2 p2, P3 p3) { \
+ return new(zone) I(context, p1, p2, p3); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(I, P1, P2, P3, P4) \
+ static I* New(Zone* zone, \
+ HValue* context, \
+ P1 p1, \
+ P2 p2, \
+ P3 p3, \
+ P4 p4) { \
+ return new(zone) I(context, p1, p2, p3, p4); \
+ }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(I, P1, P2, P3, P4, P5) \
+ static I* New(Zone* zone, \
+ HValue* context, \
+ P1 p1, \
+ P2 p2, \
+ P3 p3, \
+ P4 p4, \
+ P5 p5) { \
+ return new(zone) I(context, p1, p2, p3, p4, p5); \
+ }
+
+
+// A helper class to represent per-operand position information attached to
+// the HInstruction in the compact form. Uses tagging to distinguish between
+// case when only instruction's position is available and case when operands'
+// positions are also available.
+// In the first case it contains intruction's position as a tagged value.
+// In the second case it points to an array which contains instruction's
+// position and operands' positions.
+// TODO(vegorov): what we really want to track here is a combination of
+// source position and a script id because cross script inlining can easily
+// result in optimized functions composed of several scripts.
+class HPositionInfo {
+ public:
+ explicit HPositionInfo(int pos) : data_(TagPosition(pos)) { }
+
+ int position() const {
+ if (has_operand_positions()) {
+ return static_cast<int>(operand_positions()[kInstructionPosIndex]);
+ }
+ return static_cast<int>(UntagPosition(data_));
+ }
+
+ void set_position(int pos) {
+ if (has_operand_positions()) {
+ operand_positions()[kInstructionPosIndex] = pos;
+ } else {
+ data_ = TagPosition(pos);
+ }
+ }
+
+ void ensure_storage_for_operand_positions(Zone* zone, int operand_count) {
+ if (has_operand_positions()) {
+ return;
+ }
+
+ const int length = kFirstOperandPosIndex + operand_count;
+ intptr_t* positions =
+ zone->NewArray<intptr_t>(length);
+ for (int i = 0; i < length; i++) {
+ positions[i] = RelocInfo::kNoPosition;
+ }
+
+ const int pos = position();
+ data_ = reinterpret_cast<intptr_t>(positions);
+ set_position(pos);
+
+ ASSERT(has_operand_positions());
+ }
+
+ int operand_position(int idx) const {
+ if (!has_operand_positions()) {
+ return position();
+ }
+ return static_cast<int>(*operand_position_slot(idx));
+ }
+
+ void set_operand_position(int idx, int pos) {
+ *operand_position_slot(idx) = pos;
+ }
+
+ private:
+ static const intptr_t kInstructionPosIndex = 0;
+ static const intptr_t kFirstOperandPosIndex = 1;
+
+ intptr_t* operand_position_slot(int idx) const {
+ ASSERT(has_operand_positions());
+ return &(operand_positions()[kFirstOperandPosIndex + idx]);
+ }
+
+ bool has_operand_positions() const {
+ return !IsTaggedPosition(data_);
+ }
+
+ intptr_t* operand_positions() const {
+ ASSERT(has_operand_positions());
+ return reinterpret_cast<intptr_t*>(data_);
+ }
+
+ static const intptr_t kPositionTag = 1;
+ static const intptr_t kPositionShift = 1;
+ static bool IsTaggedPosition(intptr_t val) {
+ return (val & kPositionTag) != 0;
+ }
+ static intptr_t UntagPosition(intptr_t val) {
+ ASSERT(IsTaggedPosition(val));
+ return val >> kPositionShift;
+ }
+ static intptr_t TagPosition(intptr_t val) {
+ const intptr_t result = (val << kPositionShift) | kPositionTag;
+ ASSERT(UntagPosition(result) == val);
+ return result;
+ }
+
+ intptr_t data_;
+};
+
class HInstruction : public HValue {
public:
@@ -1119,12 +1215,26 @@ class HInstruction : public HValue {
void InsertAfter(HInstruction* previous);
// The position is a write-once variable.
- int position() const { return position_; }
- bool has_position() const { return position_ != RelocInfo::kNoPosition; }
+ virtual int position() const V8_OVERRIDE {
+ return position_.position();
+ }
+ bool has_position() const {
+ return position_.position() != RelocInfo::kNoPosition;
+ }
void set_position(int position) {
ASSERT(!has_position());
ASSERT(position != RelocInfo::kNoPosition);
- position_ = position;
+ position_.set_position(position);
+ }
+
+ virtual int operand_position(int index) const V8_OVERRIDE {
+ const int pos = position_.operand_position(index);
+ return (pos != RelocInfo::kNoPosition) ? pos : position();
+ }
+ void set_operand_position(Zone* zone, int index, int pos) {
+ ASSERT(0 <= index && index < OperandCount());
+ position_.ensure_storage_for_operand_positions(zone, OperandCount());
+ position_.set_operand_position(index, pos);
}
bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
@@ -1160,7 +1270,7 @@ class HInstruction : public HValue {
HInstruction* next_;
HInstruction* previous_;
- int position_;
+ HPositionInfo position_;
friend class HBasicBlock;
};
@@ -1194,6 +1304,11 @@ class HControlInstruction : public HInstruction {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) {
+ *block = NULL;
+ return false;
+ }
+
HBasicBlock* FirstSuccessor() {
return SuccessorCount() > 0 ? SuccessorAt(0) : NULL;
}
@@ -1201,6 +1316,12 @@ class HControlInstruction : public HInstruction {
return SuccessorCount() > 1 ? SuccessorAt(1) : NULL;
}
+ void Not() {
+ HBasicBlock* swap = SuccessorAt(0);
+ SetSuccessorAt(0, SuccessorAt(1));
+ SetSuccessorAt(1, swap);
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction)
};
@@ -1277,53 +1398,74 @@ class HDummyUse V8_FINAL : public HTemplateInstruction<1> {
};
-class HDeoptimize V8_FINAL : public HTemplateInstruction<0> {
+// Inserts an int3/stop break instruction for debugging purposes.
+class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HDeoptimize, const char*,
- Deoptimizer::BailoutType);
+ DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- const char* reason() const { return reason_; }
- Deoptimizer::BailoutType type() { return type_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
-
- private:
- explicit HDeoptimize(const char* reason, Deoptimizer::BailoutType type)
- : reason_(reason), type_(type) {}
-
- const char* reason_;
- Deoptimizer::BailoutType type_;
+ DECLARE_CONCRETE_INSTRUCTION(DebugBreak)
};
-// Inserts an int3/stop break instruction for debugging purposes.
-class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
+class HGoto V8_FINAL : public HTemplateControlInstruction<1, 0> {
public:
+ explicit HGoto(HBasicBlock* target) {
+ SetSuccessorAt(0, target);
+ }
+
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+ *block = FirstSuccessor();
+ return true;
+ }
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(DebugBreak)
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(Goto)
};
-class HGoto V8_FINAL : public HTemplateControlInstruction<1, 0> {
+class HDeoptimize V8_FINAL : public HTemplateControlInstruction<1, 0> {
public:
- explicit HGoto(HBasicBlock* target) {
- SetSuccessorAt(0, target);
+ static HDeoptimize* New(Zone* zone,
+ HValue* context,
+ const char* reason,
+ Deoptimizer::BailoutType type,
+ HBasicBlock* unreachable_continuation) {
+ return new(zone) HDeoptimize(reason, type, unreachable_continuation);
+ }
+
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+ *block = NULL;
+ return true;
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ const char* reason() const { return reason_; }
+ Deoptimizer::BailoutType type() { return type_; }
- DECLARE_CONCRETE_INSTRUCTION(Goto)
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
+
+ private:
+ explicit HDeoptimize(const char* reason,
+ Deoptimizer::BailoutType type,
+ HBasicBlock* unreachable_continuation)
+ : reason_(reason), type_(type) {
+ SetSuccessorAt(0, unreachable_continuation);
+ }
+
+ const char* reason_;
+ Deoptimizer::BailoutType type_;
};
@@ -1345,20 +1487,20 @@ class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> {
class HBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HBranch(HValue* value,
- ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target),
- expected_input_types_(expected_input_types) {
- SetFlag(kAllowUndefinedAsNaN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*,
+ ToBooleanStub::Types);
+ DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*,
+ ToBooleanStub::Types,
+ HBasicBlock*, HBasicBlock*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
virtual Representation observed_input_representation(int index) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
ToBooleanStub::Types expected_input_types() const {
return expected_input_types_;
}
@@ -1366,24 +1508,28 @@ class HBranch V8_FINAL : public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(Branch)
private:
+ HBranch(HValue* value,
+ ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target),
+ expected_input_types_(expected_input_types) {
+ SetFlag(kAllowUndefinedAsNaN);
+ }
+
ToBooleanStub::Types expected_input_types_;
};
class HCompareMap V8_FINAL : public HUnaryControlInstruction {
public:
- HCompareMap(HValue* value,
- Handle<Map> map,
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target),
- map_(map) {
- ASSERT(!map.is_null());
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HCompareMap, HValue*, Handle<Map>);
+ DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>,
+ HBasicBlock*, HBasicBlock*);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> map() const { return map_; }
+ Unique<Map> map() const { return map_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -1395,7 +1541,17 @@ class HCompareMap V8_FINAL : public HUnaryControlInstruction {
virtual int RedefinedOperandIndex() { return 0; }
private:
- Handle<Map> map_;
+ HCompareMap(HValue* value,
+ Handle<Map> map,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target),
+ map_(Unique<Map>(map)) {
+ ASSERT(!map.is_null());
+ set_representation(Representation::Tagged());
+ }
+
+ Unique<Map> map_;
};
@@ -1426,20 +1582,12 @@ class HContext V8_FINAL : public HTemplateInstruction<0> {
class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
public:
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* value,
- HValue* parameter_count) {
- return new(zone) HReturn(value, context, parameter_count);
- }
-
- static HInstruction* New(Zone* zone,
- HValue* context,
- HValue* value) {
- return new(zone) HReturn(value, context, 0);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HReturn, HValue*, HValue*);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HReturn, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ // TODO(titzer): require an Int32 input for faster returns.
+ if (index == 2) return Representation::Smi();
return Representation::Tagged();
}
@@ -1452,7 +1600,7 @@ class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
DECLARE_CONCRETE_INSTRUCTION(Return)
private:
- HReturn(HValue* value, HValue* context, HValue* parameter_count) {
+ HReturn(HValue* context, HValue* value, HValue* parameter_count = 0) {
SetOperandAt(0, value);
SetOperandAt(1, context);
SetOperandAt(2, parameter_count);
@@ -1460,6 +1608,20 @@ class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
};
+class HAbnormalExit V8_FINAL : public HTemplateControlInstruction<0, 0> {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P0(HAbnormalExit);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AbnormalExit)
+ private:
+ HAbnormalExit() {}
+};
+
+
class HUnaryOperation : public HTemplateInstruction<1> {
public:
HUnaryOperation(HValue* value, HType type = HType::Tagged())
@@ -1478,11 +1640,7 @@ class HUnaryOperation : public HTemplateInstruction<1> {
class HThrow V8_FINAL : public HTemplateInstruction<2> {
public:
- static HThrow* New(Zone* zone,
- HValue* context,
- HValue* value) {
- return new(zone) HThrow(context, value);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HThrow, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -1519,7 +1677,8 @@ class HUseConst V8_FINAL : public HUnaryOperation {
class HForceRepresentation V8_FINAL : public HTemplateInstruction<1> {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HForceRepresentation, HValue*, Representation);
+ static HInstruction* New(Zone* zone, HValue* context, HValue* value,
+ Representation required_representation);
HValue* value() { return OperandAt(0); }
@@ -1738,8 +1897,7 @@ class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> {
public:
enum Kind { BIND, LOOKUP };
- HEnvironmentMarker(Kind kind, int index)
- : kind_(kind), index_(index), next_simulate_(NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P2(HEnvironmentMarker, Kind, int);
Kind kind() { return kind_; }
int index() { return index_; }
@@ -1766,6 +1924,9 @@ class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> {
DECLARE_CONCRETE_INSTRUCTION(EnvironmentMarker);
private:
+ HEnvironmentMarker(Kind kind, int index)
+ : kind_(kind), index_(index), next_simulate_(NULL) { }
+
Kind kind_;
int index_;
HSimulate* next_simulate_;
@@ -1783,7 +1944,7 @@ class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
kBackwardsBranch
};
- DECLARE_INSTRUCTION_FACTORY_P2(HStackCheck, HValue*, Type);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HStackCheck, Type);
HValue* context() { return OperandAt(0); }
@@ -1898,13 +2059,24 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
class HLeaveInlined V8_FINAL : public HTemplateInstruction<0> {
public:
- HLeaveInlined() { }
+ HLeaveInlined(HEnterInlined* entry,
+ int drop_count)
+ : entry_(entry),
+ drop_count_(drop_count) { }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
+ virtual int argument_delta() const V8_OVERRIDE {
+ return entry_->arguments_pushed() ? -drop_count_ : 0;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
+
+ private:
+ HEnterInlined* entry_;
+ int drop_count_;
};
@@ -1916,6 +2088,7 @@ class HPushArgument V8_FINAL : public HUnaryOperation {
return Representation::Tagged();
}
+ virtual int argument_delta() const V8_OVERRIDE { return 1; }
HValue* argument() { return OperandAt(0); }
DECLARE_CONCRETE_INSTRUCTION(PushArgument)
@@ -1929,10 +2102,7 @@ class HPushArgument V8_FINAL : public HUnaryOperation {
class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
public:
- HThisFunction() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P0(HThisFunction);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
@@ -1944,6 +2114,11 @@ class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
+ HThisFunction() {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -1973,22 +2148,9 @@ class HOuterContext V8_FINAL : public HUnaryOperation {
class HDeclareGlobals V8_FINAL : public HUnaryOperation {
public:
- HDeclareGlobals(HValue* context,
- Handle<FixedArray> pairs,
- int flags)
- : HUnaryOperation(context),
- pairs_(pairs),
- flags_(flags) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- static HDeclareGlobals* New(Zone* zone,
- HValue* context,
- Handle<FixedArray> pairs,
- int flags) {
- return new(zone) HDeclareGlobals(context, pairs, flags);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HDeclareGlobals,
+ Handle<FixedArray>,
+ int);
HValue* context() { return OperandAt(0); }
Handle<FixedArray> pairs() const { return pairs_; }
@@ -2001,6 +2163,16 @@ class HDeclareGlobals V8_FINAL : public HUnaryOperation {
}
private:
+ HDeclareGlobals(HValue* context,
+ Handle<FixedArray> pairs,
+ int flags)
+ : HUnaryOperation(context),
+ pairs_(pairs),
+ flags_(flags) {
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
Handle<FixedArray> pairs_;
int flags_;
};
@@ -2008,14 +2180,7 @@ class HDeclareGlobals V8_FINAL : public HUnaryOperation {
class HGlobalObject V8_FINAL : public HUnaryOperation {
public:
- explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- static HGlobalObject* New(Zone* zone, HValue* context) {
- return new(zone) HGlobalObject(context);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(HGlobalObject);
DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
@@ -2027,6 +2192,11 @@ class HGlobalObject V8_FINAL : public HUnaryOperation {
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
+ explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -2068,7 +2238,13 @@ class HCall : public HTemplateInstruction<V> {
return HType::Tagged();
}
- virtual int argument_count() const { return argument_count_; }
+ virtual int argument_count() const {
+ return argument_count_;
+ }
+
+ virtual int argument_delta() const V8_OVERRIDE {
+ return -argument_count();
+ }
virtual bool IsCall() V8_FINAL V8_OVERRIDE { return true; }
@@ -2117,16 +2293,7 @@ class HBinaryCall : public HCall<2> {
class HInvokeFunction V8_FINAL : public HBinaryCall {
public:
- HInvokeFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count) {
- }
-
- static HInvokeFunction* New(Zone* zone,
- HValue* context,
- HValue* function,
- int argument_count) {
- return new(zone) HInvokeFunction(context, function, argument_count);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
HInvokeFunction(HValue* context,
HValue* function,
@@ -2155,6 +2322,10 @@ class HInvokeFunction V8_FINAL : public HBinaryCall {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
private:
+ HInvokeFunction(HValue* context, HValue* function, int argument_count)
+ : HBinaryCall(context, function, argument_count) {
+ }
+
Handle<JSFunction> known_function_;
int formal_parameter_count_;
};
@@ -2162,10 +2333,9 @@ class HInvokeFunction V8_FINAL : public HBinaryCall {
class HCallConstantFunction V8_FINAL : public HCall<0> {
public:
- HCallConstantFunction(Handle<JSFunction> function, int argument_count)
- : HCall<0>(argument_count),
- function_(function),
- formal_parameter_count_(function->shared()->formal_parameter_count()) {}
+ DECLARE_INSTRUCTION_FACTORY_P2(HCallConstantFunction,
+ Handle<JSFunction>,
+ int);
Handle<JSFunction> function() const { return function_; }
int formal_parameter_count() const { return formal_parameter_count_; }
@@ -2184,6 +2354,11 @@ class HCallConstantFunction V8_FINAL : public HCall<0> {
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction)
private:
+ HCallConstantFunction(Handle<JSFunction> function, int argument_count)
+ : HCall<0>(argument_count),
+ function_(function),
+ formal_parameter_count_(function->shared()->formal_parameter_count()) {}
+
Handle<JSFunction> function_;
int formal_parameter_count_;
};
@@ -2191,22 +2366,23 @@ class HCallConstantFunction V8_FINAL : public HCall<0> {
class HCallKeyed V8_FINAL : public HBinaryCall {
public:
- HCallKeyed(HValue* context, HValue* key, int argument_count)
- : HBinaryCall(context, key, argument_count) {
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallKeyed, HValue*, int);
HValue* context() { return first(); }
HValue* key() { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed)
+
+ private:
+ HCallKeyed(HValue* context, HValue* key, int argument_count)
+ : HBinaryCall(context, key, argument_count) {
+ }
};
class HCallNamed V8_FINAL : public HUnaryCall {
public:
- HCallNamed(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNamed, Handle<String>, int);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -2216,42 +2392,52 @@ class HCallNamed V8_FINAL : public HUnaryCall {
DECLARE_CONCRETE_INSTRUCTION(CallNamed)
private:
+ HCallNamed(HValue* context, Handle<String> name, int argument_count)
+ : HUnaryCall(context, argument_count), name_(name) {
+ }
+
Handle<String> name_;
};
+enum CallMode {
+ NORMAL_CALL,
+ TAIL_CALL
+};
+
+
class HCallFunction V8_FINAL : public HBinaryCall {
public:
- HCallFunction(HValue* context, HValue* function, int argument_count)
- : HBinaryCall(context, function, argument_count) {
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(
+ HCallFunction, HValue*, int, CallMode);
- static HCallFunction* New(Zone* zone,
- HValue* context,
- HValue* function,
- int argument_count) {
- return new(zone) HCallFunction(context, function, argument_count);
- }
+ bool IsTailCall() const { return call_mode_ == TAIL_CALL; }
HValue* context() { return first(); }
HValue* function() { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallFunction)
+
+ virtual int argument_delta() const V8_OVERRIDE {
+ if (IsTailCall()) return 0;
+ return -argument_count();
+ }
+
+ private:
+ HCallFunction(HValue* context,
+ HValue* function,
+ int argument_count,
+ CallMode mode = NORMAL_CALL)
+ : HBinaryCall(context, function, argument_count), call_mode_(mode) {
+ }
+ CallMode call_mode_;
};
class HCallGlobal V8_FINAL : public HUnaryCall {
public:
- HCallGlobal(HValue* context, Handle<String> name, int argument_count)
- : HUnaryCall(context, argument_count), name_(name) {
- }
-
- static HCallGlobal* New(Zone* zone,
- HValue* context,
- Handle<String> name,
- int argument_count) {
- return new(zone) HCallGlobal(context, name, argument_count);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallGlobal, Handle<String>, int);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -2261,16 +2447,17 @@ class HCallGlobal V8_FINAL : public HUnaryCall {
DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
private:
+ HCallGlobal(HValue* context, Handle<String> name, int argument_count)
+ : HUnaryCall(context, argument_count), name_(name) {
+ }
+
Handle<String> name_;
};
class HCallKnownGlobal V8_FINAL : public HCall<0> {
public:
- HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
- : HCall<0>(argument_count),
- target_(target),
- formal_parameter_count_(target->shared()->formal_parameter_count()) { }
+ DECLARE_INSTRUCTION_FACTORY_P2(HCallKnownGlobal, Handle<JSFunction>, int);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -2284,6 +2471,11 @@ class HCallKnownGlobal V8_FINAL : public HCall<0> {
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal)
private:
+ HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
+ : HCall<0>(argument_count),
+ target_(target),
+ formal_parameter_count_(target->shared()->formal_parameter_count()) { }
+
Handle<JSFunction> target_;
int formal_parameter_count_;
};
@@ -2291,23 +2483,26 @@ class HCallKnownGlobal V8_FINAL : public HCall<0> {
class HCallNew V8_FINAL : public HBinaryCall {
public:
- HCallNew(HValue* context, HValue* constructor, int argument_count)
- : HBinaryCall(context, constructor, argument_count) {}
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNew, HValue*, int);
HValue* context() { return first(); }
HValue* constructor() { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallNew)
+
+ private:
+ HCallNew(HValue* context, HValue* constructor, int argument_count)
+ : HBinaryCall(context, constructor, argument_count) {}
};
class HCallNewArray V8_FINAL : public HBinaryCall {
public:
- HCallNewArray(HValue* context, HValue* constructor, int argument_count,
- Handle<Cell> type_cell, ElementsKind elements_kind)
- : HBinaryCall(context, constructor, argument_count),
- elements_kind_(elements_kind),
- type_cell_(type_cell) {}
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HCallNewArray,
+ HValue*,
+ int,
+ Handle<Cell>,
+ ElementsKind);
HValue* context() { return first(); }
HValue* constructor() { return second(); }
@@ -2323,6 +2518,12 @@ class HCallNewArray V8_FINAL : public HBinaryCall {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
private:
+ HCallNewArray(HValue* context, HValue* constructor, int argument_count,
+ Handle<Cell> type_cell, ElementsKind elements_kind)
+ : HBinaryCall(context, constructor, argument_count),
+ elements_kind_(elements_kind),
+ type_cell_(type_cell) {}
+
ElementsKind elements_kind_;
Handle<Cell> type_cell_;
};
@@ -2330,19 +2531,20 @@ class HCallNewArray V8_FINAL : public HBinaryCall {
class HCallRuntime V8_FINAL : public HCall<1> {
public:
- static HCallRuntime* New(Zone* zone,
- HValue* context,
- Handle<String> name,
- const Runtime::Function* c_function,
- int argument_count) {
- return new(zone) HCallRuntime(context, name, c_function, argument_count);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallRuntime,
+ Handle<String>,
+ const Runtime::Function*,
+ int);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* context() { return OperandAt(0); }
const Runtime::Function* function() const { return c_function_; }
Handle<String> name() const { return name_; }
+ SaveFPRegsMode save_doubles() const { return save_doubles_; }
+ void set_save_doubles(SaveFPRegsMode save_doubles) {
+ save_doubles_ = save_doubles;
+ }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -2355,12 +2557,14 @@ class HCallRuntime V8_FINAL : public HCall<1> {
Handle<String> name,
const Runtime::Function* c_function,
int argument_count)
- : HCall<1>(argument_count), c_function_(c_function), name_(name) {
+ : HCall<1>(argument_count), c_function_(c_function), name_(name),
+ save_doubles_(kDontSaveFPRegs) {
SetOperandAt(0, context);
}
const Runtime::Function* c_function_;
Handle<String> name_;
+ SaveFPRegsMode save_doubles_;
};
@@ -2509,6 +2713,40 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
};
+class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P1(HLoadRoot, Heap::RootListIndex);
+ DECLARE_INSTRUCTION_FACTORY_P2(HLoadRoot, Heap::RootListIndex, HType);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::None();
+ }
+
+ Heap::RootListIndex index() const { return index_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HLoadRoot* b = HLoadRoot::cast(other);
+ return index_ == b->index_;
+ }
+
+ private:
+ HLoadRoot(Heap::RootListIndex index, HType type = HType::Tagged())
+ : HTemplateInstruction<0>(type), index_(index) {
+ SetFlag(kUseGVN);
+ // TODO(bmeurer): We'll need kDependsOnRoots once we add the
+ // corresponding HStoreRoot instruction.
+ SetGVNFlag(kDependsOnCalls);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ const Heap::RootListIndex index_;
+};
+
+
class HLoadExternalArrayPointer V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HLoadExternalArrayPointer, HValue*);
@@ -2553,7 +2791,6 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
for (int i = 0; i < maps->length(); i++) {
check_map->Add(maps->at(i), zone);
}
- check_map->map_set_.Sort();
return check_map;
}
@@ -2568,38 +2805,26 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
- SmallMapList* map_set() { return &map_set_; }
- ZoneList<UniqueValueId>* map_unique_ids() { return &map_unique_ids_; }
- bool has_migration_target() {
+ Unique<Map> first_map() const { return map_set_.at(0); }
+ UniqueSet<Map> map_set() const { return map_set_; }
+
+ bool has_migration_target() const {
return has_migration_target_;
}
- virtual void FinalizeUniqueValueId() V8_OVERRIDE;
-
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
- ASSERT_EQ(map_set_.length(), map_unique_ids_.length());
- HCheckMaps* b = HCheckMaps::cast(other);
- // Relies on the fact that map_set has been sorted before.
- if (map_unique_ids_.length() != b->map_unique_ids_.length()) {
- return false;
- }
- for (int i = 0; i < map_unique_ids_.length(); i++) {
- if (map_unique_ids_.at(i) != b->map_unique_ids_.at(i)) {
- return false;
- }
- }
- return true;
+ return this->map_set_.Equals(&HCheckMaps::cast(other)->map_set_);
}
virtual int RedefinedOperandIndex() { return 0; }
private:
void Add(Handle<Map> map, Zone* zone) {
- map_set_.Add(map, zone);
+ map_set_.Add(Unique<Map>(map), zone);
if (!has_migration_target_ && map->is_migration_target()) {
has_migration_target_ = true;
SetGVNFlag(kChangesNewSpacePromotion);
@@ -2609,10 +2834,9 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
// Clients should use one of the static New* methods above.
HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
: HTemplateInstruction<2>(value->type()),
- omit_(false), has_migration_target_(false), map_unique_ids_(0, zone) {
+ omit_(false), has_migration_target_(false) {
SetOperandAt(0, value);
// Use the object value for the dependency if NULL is passed.
- // TODO(titzer): do GVN flags already express this dependency?
SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
@@ -2621,36 +2845,33 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
SetGVNFlag(kDependsOnElementsKind);
}
- void omit(CompilationInfo* info) {
- omit_ = true;
- for (int i = 0; i < map_set_.length(); i++) {
- Handle<Map> map = map_set_.at(i);
- if (!map->CanTransition()) continue;
- map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup,
- info);
- }
- }
-
bool omit_;
bool has_migration_target_;
- SmallMapList map_set_;
- ZoneList<UniqueValueId> map_unique_ids_;
+ UniqueSet<Map> map_set_;
};
class HCheckValue V8_FINAL : public HUnaryOperation {
public:
static HCheckValue* New(Zone* zone, HValue* context,
- HValue* value, Handle<JSFunction> target) {
- bool in_new_space = zone->isolate()->heap()->InNewSpace(*target);
+ HValue* value, Handle<JSFunction> func) {
+ bool in_new_space = zone->isolate()->heap()->InNewSpace(*func);
+ // NOTE: We create an uninitialized Unique and initialize it later.
+ // This is because a JSFunction can move due to GC during graph creation.
+ // TODO(titzer): This is a migration crutch. Replace with some kind of
+ // Uniqueness scope later.
+ Unique<JSFunction> target = Unique<JSFunction>::CreateUninitialized(func);
HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space);
return check;
}
static HCheckValue* New(Zone* zone, HValue* context,
- HValue* value, Handle<Map> map, UniqueValueId id) {
- HCheckValue* check = new(zone) HCheckValue(value, map, false);
- check->object_unique_id_ = id;
- return check;
+ HValue* value, Unique<HeapObject> target,
+ bool object_in_new_space) {
+ return new(zone) HCheckValue(value, target, object_in_new_space);
+ }
+
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ object_ = Unique<HeapObject>(object_.handle());
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -2664,11 +2885,7 @@ class HCheckValue V8_FINAL : public HUnaryOperation {
virtual void Verify() V8_OVERRIDE;
#endif
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
- object_unique_id_ = UniqueValueId(object_);
- }
-
- Handle<HeapObject> object() const { return object_; }
+ Unique<HeapObject> object() const { return object_; }
bool object_in_new_space() const { return object_in_new_space_; }
DECLARE_CONCRETE_INSTRUCTION(CheckValue)
@@ -2676,38 +2893,35 @@ class HCheckValue V8_FINAL : public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HCheckValue* b = HCheckValue::cast(other);
- return object_unique_id_ == b->object_unique_id_;
+ return object_ == b->object_;
}
private:
- HCheckValue(HValue* value, Handle<HeapObject> object, bool in_new_space)
+ HCheckValue(HValue* value, Unique<HeapObject> object,
+ bool object_in_new_space)
: HUnaryOperation(value, value->type()),
- object_(object), object_in_new_space_(in_new_space) {
+ object_(object),
+ object_in_new_space_(object_in_new_space) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
- Handle<HeapObject> object_;
- UniqueValueId object_unique_id_;
+ Unique<HeapObject> object_;
bool object_in_new_space_;
};
class HCheckInstanceType V8_FINAL : public HUnaryOperation {
public:
- static HCheckInstanceType* NewIsSpecObject(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_SPEC_OBJECT);
- }
- static HCheckInstanceType* NewIsJSArray(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_JS_ARRAY);
- }
- static HCheckInstanceType* NewIsString(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_STRING);
- }
- static HCheckInstanceType* NewIsInternalizedString(
- HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_INTERNALIZED_STRING);
- }
+ enum Check {
+ IS_SPEC_OBJECT,
+ IS_JS_ARRAY,
+ IS_STRING,
+ IS_INTERNALIZED_STRING,
+ LAST_INTERVAL_CHECK = IS_JS_ARRAY
+ };
+
+ DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -2735,14 +2949,6 @@ class HCheckInstanceType V8_FINAL : public HUnaryOperation {
virtual int RedefinedOperandIndex() { return 0; }
private:
- enum Check {
- IS_SPEC_OBJECT,
- IS_JS_ARRAY,
- IS_STRING,
- IS_INTERNALIZED_STRING,
- LAST_INTERVAL_CHECK = IS_JS_ARRAY
- };
-
const char* GetCheckName();
HCheckInstanceType(HValue* value, Check check)
@@ -2784,21 +2990,6 @@ class HCheckSmi V8_FINAL : public HUnaryOperation {
};
-class HIsNumberAndBranch V8_FINAL : public HUnaryControlInstruction {
- public:
- explicit HIsNumberAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) {
- SetFlag(kFlexibleRepresentation);
- }
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch)
-};
-
-
class HCheckHeapObject V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*);
@@ -3090,6 +3281,8 @@ class HPhi V8_FINAL : public HValue {
bool IsReceiver() const { return merged_index_ == 0; }
bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
+ virtual int position() const V8_OVERRIDE;
+
int merged_index() const { return merged_index_; }
InductionVariableData* induction_variable_data() {
@@ -3200,9 +3393,6 @@ class HDematerializedObject : public HInstruction {
// List of values tracked by this marker.
ZoneList<HValue*> values_;
-
- private:
- virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; }
};
@@ -3230,6 +3420,8 @@ class HArgumentsObject V8_FINAL : public HDematerializedObject {
set_representation(Representation::Tagged());
SetFlag(kIsArguments);
}
+
+ virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; }
};
@@ -3260,10 +3452,17 @@ class HCapturedObject V8_FINAL : public HDematerializedObject {
// Replay effects of this instruction on the given environment.
void ReplayEnvironment(HEnvironment* env);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
private:
int capture_id_;
+
+ // Note that we cannot DCE captured objects as they are used to replay
+ // the environment. This method is here as an explicit reminder.
+ // TODO(mstarzinger): Turn HSimulates into full snapshots maybe?
+ virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return false; }
};
@@ -3273,7 +3472,6 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, double);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Handle<Object>);
- DECLARE_INSTRUCTION_FACTORY_P2(HConstant, Handle<Map>, UniqueValueId);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, ExternalReference);
static HConstant* CreateAndInsertAfter(Zone* zone,
@@ -3298,16 +3496,27 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return new_constant;
}
+ static HConstant* CreateAndInsertBefore(Zone* zone,
+ Unique<Object> unique,
+ bool is_not_in_new_space,
+ HInstruction* instruction) {
+ HConstant* new_constant = new(zone) HConstant(unique,
+ Representation::Tagged(), HType::Tagged(), false, is_not_in_new_space,
+ false, false);
+ new_constant->InsertBefore(instruction);
+ return new_constant;
+ }
+
Handle<Object> handle(Isolate* isolate) {
- if (handle_.is_null()) {
- Factory* factory = isolate->factory();
+ if (object_.handle().is_null()) {
// Default arguments to is_not_in_new_space depend on this heap number
- // to be tenured so that it's guaranteed not be be located in new space.
- handle_ = factory->NewNumber(double_value_, TENURED);
+ // to be tenured so that it's guaranteed not to be located in new space.
+ object_ = Unique<Object>::CreateUninitialized(
+ isolate->factory()->NewNumber(double_value_, TENURED));
}
AllowDeferredHandleDereference smi_check;
- ASSERT(has_int32_value_ || !handle_->IsSmi());
- return handle_;
+ ASSERT(has_int32_value_ || !object_.handle()->IsSmi());
+ return object_.handle();
}
bool HasMap(Handle<Map> map) {
@@ -3341,16 +3550,18 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
return false;
}
- ASSERT(!handle_.is_null());
+ ASSERT(!object_.handle().is_null());
Heap* heap = isolate()->heap();
- ASSERT(unique_id_ != UniqueValueId::minus_zero_value(heap));
- ASSERT(unique_id_ != UniqueValueId::nan_value(heap));
- return unique_id_ == UniqueValueId::undefined_value(heap) ||
- unique_id_ == UniqueValueId::null_value(heap) ||
- unique_id_ == UniqueValueId::true_value(heap) ||
- unique_id_ == UniqueValueId::false_value(heap) ||
- unique_id_ == UniqueValueId::the_hole_value(heap) ||
- unique_id_ == UniqueValueId::empty_string(heap);
+ ASSERT(!object_.IsKnownGlobal(heap->minus_zero_value()));
+ ASSERT(!object_.IsKnownGlobal(heap->nan_value()));
+ return
+ object_.IsKnownGlobal(heap->undefined_value()) ||
+ object_.IsKnownGlobal(heap->null_value()) ||
+ object_.IsKnownGlobal(heap->true_value()) ||
+ object_.IsKnownGlobal(heap->false_value()) ||
+ object_.IsKnownGlobal(heap->the_hole_value()) ||
+ object_.IsKnownGlobal(heap->empty_string()) ||
+ object_.IsKnownGlobal(heap->empty_fixed_array());
}
bool IsCell() const {
@@ -3389,11 +3600,7 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
if (HasDoubleValue() && FixedDoubleArray::is_the_hole_nan(double_value_)) {
return true;
}
- Heap* heap = isolate()->heap();
- if (!handle_.is_null() && *handle_ == heap->the_hole_value()) {
- return true;
- }
- return false;
+ return object_.IsKnownGlobal(isolate()->heap()->the_hole_value());
}
bool HasNumberValue() const { return has_double_value_; }
int32_t NumberValueAsInteger32() const {
@@ -3405,12 +3612,12 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
}
bool HasStringValue() const {
if (has_double_value_ || has_int32_value_) return false;
- ASSERT(!handle_.is_null());
+ ASSERT(!object_.handle().is_null());
return type_.IsString();
}
Handle<String> StringValue() const {
ASSERT(HasStringValue());
- return Handle<String>::cast(handle_);
+ return Handle<String>::cast(object_.handle());
}
bool HasInternalizedStringValue() const {
return HasStringValue() && is_internalized_string_;
@@ -3434,21 +3641,20 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
} else if (has_external_reference_value_) {
return reinterpret_cast<intptr_t>(external_reference_value_.address());
} else {
- ASSERT(!handle_.is_null());
- return unique_id_.Hashcode();
+ ASSERT(!object_.handle().is_null());
+ return object_.Hashcode();
}
}
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
if (!has_double_value_ && !has_external_reference_value_) {
- ASSERT(!handle_.is_null());
- unique_id_ = UniqueValueId(handle_);
+ ASSERT(!object_.handle().is_null());
+ object_ = Unique<Object>(object_.handle());
}
}
- bool UniqueValueIdsMatch(UniqueValueId other) {
- return !has_double_value_ && !has_external_reference_value_ &&
- unique_id_ == other;
+ Unique<Object> GetUnique() const {
+ return object_;
}
#ifdef DEBUG
@@ -3474,9 +3680,13 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
external_reference_value_ ==
other_constant->external_reference_value_;
} else {
- ASSERT(!handle_.is_null());
- return !other_constant->handle_.is_null() &&
- unique_id_ == other_constant->unique_id_;
+ if (other_constant->has_int32_value_ ||
+ other_constant->has_double_value_ ||
+ other_constant->has_external_reference_value_) {
+ return false;
+ }
+ ASSERT(!object_.handle().is_null());
+ return other_constant->object_ == object_;
}
}
@@ -3486,33 +3696,30 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> {
HConstant(int32_t value,
Representation r = Representation::None(),
bool is_not_in_new_space = true,
- Handle<Object> optional_handle = Handle<Object>::null());
+ Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
HConstant(double value,
Representation r = Representation::None(),
bool is_not_in_new_space = true,
- Handle<Object> optional_handle = Handle<Object>::null());
- HConstant(Handle<Object> handle,
- UniqueValueId unique_id,
+ Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
+ HConstant(Unique<Object> unique,
Representation r,
HType type,
bool is_internalized_string,
bool is_not_in_new_space,
bool is_cell,
bool boolean_value);
- HConstant(Handle<Map> handle,
- UniqueValueId unique_id);
+
explicit HConstant(ExternalReference reference);
void Initialize(Representation r);
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
- // If this is a numerical constant, handle_ either points to to the
+ // If this is a numerical constant, object_ either points to the
// HeapObject the constant originated from or is null. If the
- // constant is non-numeric, handle_ always points to a valid
+ // constant is non-numeric, object_ always points to a valid
// constant HeapObject.
- Handle<Object> handle_;
- UniqueValueId unique_id_;
+ Unique<Object> object_;
// We store the HConstant in the most specific form safely possible.
// The two flags, has_int32_value_ and has_double_value_ tell us if
@@ -3611,6 +3818,11 @@ class HBinaryOperation : public HTemplateInstruction<3> {
return representation();
}
+ void SetOperandPositions(Zone* zone, int left_pos, int right_pos) {
+ set_operand_position(zone, 1, left_pos);
+ set_operand_position(zone, 2, right_pos);
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
private:
@@ -3649,17 +3861,8 @@ class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> {
class HApplyArguments V8_FINAL : public HTemplateInstruction<4> {
public:
- HApplyArguments(HValue* function,
- HValue* receiver,
- HValue* length,
- HValue* elements) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, function);
- SetOperandAt(1, receiver);
- SetOperandAt(2, length);
- SetOperandAt(3, elements);
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_FACTORY_P4(HApplyArguments, HValue*, HValue*, HValue*,
+ HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// The length is untagged, all other inputs are tagged.
@@ -3674,6 +3877,19 @@ class HApplyArguments V8_FINAL : public HTemplateInstruction<4> {
HValue* elements() { return OperandAt(3); }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments)
+
+ private:
+ HApplyArguments(HValue* function,
+ HValue* receiver,
+ HValue* length,
+ HValue* elements) {
+ set_representation(Representation::Tagged());
+ SetOperandAt(0, function);
+ SetOperandAt(1, receiver);
+ SetOperandAt(2, length);
+ SetOperandAt(3, elements);
+ SetAllSideEffects();
+ }
};
@@ -3731,13 +3947,7 @@ class HArgumentsLength V8_FINAL : public HUnaryOperation {
class HAccessArgumentsAt V8_FINAL : public HTemplateInstruction<3> {
public:
- HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetOperandAt(0, arguments);
- SetOperandAt(1, length);
- SetOperandAt(2, index);
- }
+ DECLARE_INSTRUCTION_FACTORY_P3(HAccessArgumentsAt, HValue*, HValue*, HValue*);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -3754,6 +3964,15 @@ class HAccessArgumentsAt V8_FINAL : public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt)
+ private:
+ HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetOperandAt(0, arguments);
+ SetOperandAt(1, length);
+ SetOperandAt(2, index);
+ }
+
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
};
@@ -3813,6 +4032,8 @@ class HBoundsCheck V8_FINAL : public HTemplateInstruction<2> {
protected:
friend class HBoundsCheckBaseIndexInformation;
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
bool skip_check_;
HValue* base_;
@@ -3882,13 +4103,14 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (!to.IsTagged()) {
- ASSERT(to.IsSmiOrInteger32());
- ClearAllSideEffects();
- SetFlag(kUseGVN);
- } else {
+ if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (to.IsTagged() &&
+ (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
ClearFlag(kUseGVN);
+ } else {
+ ClearAllSideEffects();
+ SetFlag(kUseGVN);
}
}
@@ -3920,12 +4142,9 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
public:
- static HMathFloorOfDiv* New(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
- return new(zone) HMathFloorOfDiv(context, left, right);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HMathFloorOfDiv,
+ HValue*,
+ HValue*);
virtual HValue* EnsureAndPropagateNotMinusZero(
BitVector* visited) V8_OVERRIDE;
@@ -3961,7 +4180,9 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) {
+ if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
+ if (to.IsTagged() &&
+ (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
SetAllSideEffects();
ClearFlag(kUseGVN);
} else {
@@ -3971,7 +4192,6 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
}
DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
-
private:
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -3979,16 +4199,8 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
class HCompareGeneric V8_FINAL : public HBinaryOperation {
public:
- HCompareGeneric(HValue* context,
- HValue* left,
- HValue* right,
- Token::Value token)
- : HBinaryOperation(context, left, right, HType::Boolean()),
- token_(token) {
- ASSERT(Token::IsCompareOp(token));
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCompareGeneric, HValue*,
+ HValue*, Token::Value);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return index == 0
@@ -4002,19 +4214,28 @@ class HCompareGeneric V8_FINAL : public HBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
private:
+ HCompareGeneric(HValue* context,
+ HValue* left,
+ HValue* right,
+ Token::Value token)
+ : HBinaryOperation(context, left, right, HType::Boolean()),
+ token_(token) {
+ ASSERT(Token::IsCompareOp(token));
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
Token::Value token_;
};
class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
public:
- HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token)
- : token_(token) {
- SetFlag(kFlexibleRepresentation);
- ASSERT(Token::IsCompareOp(token));
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- }
+ DECLARE_INSTRUCTION_FACTORY_P3(HCompareNumericAndBranch,
+ HValue*, HValue*, Token::Value);
+ DECLARE_INSTRUCTION_FACTORY_P5(HCompareNumericAndBranch,
+ HValue*, HValue*, Token::Value,
+ HBasicBlock*, HBasicBlock*);
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
@@ -4037,28 +4258,62 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ void SetOperandPositions(Zone* zone, int left_pos, int right_pos) {
+ set_operand_position(zone, 0, left_pos);
+ set_operand_position(zone, 1, right_pos);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
private:
+ HCompareNumericAndBranch(HValue* left,
+ HValue* right,
+ Token::Value token,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : token_(token) {
+ SetFlag(kFlexibleRepresentation);
+ ASSERT(Token::IsCompareOp(token));
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
+ SetSuccessorAt(0, true_target);
+ SetSuccessorAt(1, false_target);
+ }
+
Representation observed_input_representation_[2];
Token::Value token_;
};
-class HCompareHoleAndBranch V8_FINAL
- : public HTemplateControlInstruction<2, 1> {
+class HCompareHoleAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- // TODO(danno): make this private when the IfBuilder properly constructs
- // control flow instructions.
- explicit HCompareHoleAndBranch(HValue* object) {
+ DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HCompareHoleAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
+
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return representation();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareHoleAndBranch)
+
+ private:
+ HCompareHoleAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {
SetFlag(kFlexibleRepresentation);
SetFlag(kAllowUndefinedAsNaN);
- SetOperandAt(0, object);
}
+};
- DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*);
- HValue* object() { return OperandAt(0); }
+class HCompareMinusZeroAndBranch V8_FINAL : public HUnaryControlInstruction {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P1(HCompareMinusZeroAndBranch, HValue*);
virtual void InferRepresentation(
HInferRepresentationPhase* h_infer) V8_OVERRIDE;
@@ -4067,23 +4322,42 @@ class HCompareHoleAndBranch V8_FINAL
return representation();
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
- DECLARE_CONCRETE_INSTRUCTION(CompareHoleAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch)
+
+ private:
+ explicit HCompareMinusZeroAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) {
+ }
};
class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
public:
- // TODO(danno): make this private when the IfBuilder properly constructs
- // control flow instructions.
HCompareObjectEqAndBranch(HValue* left,
- HValue* right) {
+ HValue* right,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL) {
+ // TODO(danno): make this private when the IfBuilder properly constructs
+ // control flow instructions.
+ ASSERT(!left->IsConstant() ||
+ (!HConstant::cast(left)->HasInteger32Value() ||
+ HConstant::cast(left)->HasSmiValue()));
+ ASSERT(!right->IsConstant() ||
+ (!HConstant::cast(right)->HasInteger32Value() ||
+ HConstant::cast(right)->HasSmiValue()));
SetOperandAt(0, left);
SetOperandAt(1, right);
+ SetSuccessorAt(0, true_target);
+ SetSuccessorAt(1, false_target);
}
DECLARE_INSTRUCTION_FACTORY_P2(HCompareObjectEqAndBranch, HValue*, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P4(HCompareObjectEqAndBranch, HValue*, HValue*,
+ HBasicBlock*, HBasicBlock*);
+
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
@@ -4104,33 +4378,49 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsObjectAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsObjectAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsObjectAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
+
+ private:
+ HIsObjectAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
+
class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsStringAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsStringAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsStringAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
+
+ private:
+ HIsStringAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsSmiAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsSmiAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsSmiAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
@@ -4140,36 +4430,41 @@ class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ HIsSmiAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
class HIsUndetectableAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsUndetectableAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsUndetectableAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsUndetectableAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
+
+ private:
+ HIsUndetectableAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
public:
- HStringCompareAndBranch(HValue* context,
- HValue* left,
- HValue* right,
- Token::Value token)
- : token_(token) {
- ASSERT(Token::IsCompareOp(token));
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- SetOperandAt(2, right);
- set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HStringCompareAndBranch,
+ HValue*,
+ HValue*,
+ Token::Value);
HValue* context() { return OperandAt(0); }
HValue* left() { return OperandAt(1); }
@@ -4189,28 +4484,43 @@ class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
private:
+ HStringCompareAndBranch(HValue* context,
+ HValue* left,
+ HValue* right,
+ Token::Value token)
+ : token_(token) {
+ ASSERT(Token::IsCompareOp(token));
+ SetOperandAt(0, context);
+ SetOperandAt(1, left);
+ SetOperandAt(2, right);
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
Token::Value token_;
};
class HIsConstructCallAndBranch : public HTemplateControlInstruction<2, 0> {
public:
+ DECLARE_INSTRUCTION_FACTORY_P0(HIsConstructCallAndBranch);
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch)
+ private:
+ HIsConstructCallAndBranch() {}
};
class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
- : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
- HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
- : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
- ASSERT(to == LAST_TYPE); // Others not implemented yet in backend.
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(
+ HHasInstanceTypeAndBranch, HValue*, InstanceType);
+ DECLARE_INSTRUCTION_FACTORY_P3(
+ HHasInstanceTypeAndBranch, HValue*, InstanceType, InstanceType);
InstanceType from() { return from_; }
InstanceType to() { return to_; }
@@ -4224,6 +4534,13 @@ class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
private:
+ HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
+ : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
+ HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
+ : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
+ ASSERT(to == LAST_TYPE); // Others not implemented yet in backend.
+ }
+
InstanceType from_;
InstanceType to_; // Inclusive range, not all combinations work.
};
@@ -4231,23 +4548,22 @@ class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
class HHasCachedArrayIndexAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HHasCachedArrayIndexAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HHasCachedArrayIndexAndBranch, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch)
+ private:
+ explicit HHasCachedArrayIndexAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
};
class HGetCachedArrayIndex V8_FINAL : public HUnaryOperation {
public:
- explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HGetCachedArrayIndex, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -4259,15 +4575,19 @@ class HGetCachedArrayIndex V8_FINAL : public HUnaryOperation {
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
+ explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
class HClassOfTestAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
- : HUnaryControlInstruction(value, NULL, NULL),
- class_name_(class_name) { }
+ DECLARE_INSTRUCTION_FACTORY_P2(HClassOfTestAndBranch, HValue*,
+ Handle<String>);
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
@@ -4280,37 +4600,46 @@ class HClassOfTestAndBranch V8_FINAL : public HUnaryControlInstruction {
Handle<String> class_name() const { return class_name_; }
private:
+ HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
+ : HUnaryControlInstruction(value, NULL, NULL),
+ class_name_(class_name) { }
+
Handle<String> class_name_;
};
class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
- : HUnaryControlInstruction(value, NULL, NULL),
- type_literal_(type_literal) { }
+ DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
Handle<String> type_literal() { return type_literal_; }
+ bool compares_number_type() { return compares_number_type_; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
+ return Representation::None();
}
+ virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
private:
+ HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
+ : HUnaryControlInstruction(value, NULL, NULL),
+ type_literal_(type_literal) {
+ Heap* heap = type_literal->GetHeap();
+ compares_number_type_ = type_literal->Equals(heap->number_string());
+ }
+
Handle<String> type_literal_;
+ bool compares_number_type_ : 1;
};
class HInstanceOf V8_FINAL : public HBinaryOperation {
public:
- HInstanceOf(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right, HType::Boolean()) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOf, HValue*, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -4319,20 +4648,21 @@ class HInstanceOf V8_FINAL : public HBinaryOperation {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
+
+ private:
+ HInstanceOf(HValue* context, HValue* left, HValue* right)
+ : HBinaryOperation(context, left, right, HType::Boolean()) {
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
};
class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> {
public:
- HInstanceOfKnownGlobal(HValue* context,
- HValue* left,
- Handle<JSFunction> right)
- : HTemplateInstruction<2>(HType::Boolean()), function_(right) {
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOfKnownGlobal,
+ HValue*,
+ Handle<JSFunction>);
HValue* context() { return OperandAt(0); }
HValue* left() { return OperandAt(1); }
@@ -4345,27 +4675,17 @@ class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal)
private:
- Handle<JSFunction> function_;
-};
-
-
-// TODO(mstarzinger): This instruction should be modeled as a load of the map
-// field followed by a load of the instance size field once HLoadNamedField is
-// flexible enough to accommodate byte-field loads.
-class HInstanceSize V8_FINAL : public HTemplateInstruction<1> {
- public:
- explicit HInstanceSize(HValue* object) {
- SetOperandAt(0, object);
- set_representation(Representation::Integer32());
- }
-
- HValue* object() { return OperandAt(0); }
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
+ HInstanceOfKnownGlobal(HValue* context,
+ HValue* left,
+ Handle<JSFunction> right)
+ : HTemplateInstruction<2>(HType::Boolean()), function_(right) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, left);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
}
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize)
+ Handle<JSFunction> function_;
};
@@ -4408,26 +4728,6 @@ class HPower V8_FINAL : public HTemplateInstruction<2> {
};
-class HRandom V8_FINAL : public HTemplateInstruction<1> {
- public:
- explicit HRandom(HValue* global_object) {
- SetOperandAt(0, global_object);
- set_representation(Representation::Double());
- }
-
- HValue* global_object() { return OperandAt(0); }
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Random)
-
- private:
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
-};
-
-
class HAdd V8_FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
@@ -4437,8 +4737,9 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
// Add is only commutative if two integer values are added and not if two
// tagged values are added (because it might be a String concatenation).
+ // We also do not commute (pointer + offset).
virtual bool IsCommutative() const V8_OVERRIDE {
- return !representation().IsTagged();
+ return !representation().IsTagged() && !representation().IsExternal();
}
virtual HValue* EnsureAndPropagateNotMinusZero(
@@ -4459,10 +4760,25 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation {
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (to.IsTagged()) ClearFlag(kAllowUndefinedAsNaN);
- HArithmeticBinaryOperation::RepresentationChanged(to);
+ if (to.IsTagged()) {
+ SetGVNFlag(kChangesNewSpacePromotion);
+ ClearFlag(kAllowUndefinedAsNaN);
+ }
+ if (to.IsTagged() &&
+ (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
+ left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
+ } else {
+ ClearAllSideEffects();
+ SetFlag(kUseGVN);
+ }
}
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE;
+
DECLARE_CONCRETE_INSTRUCTION(Add)
protected:
@@ -4522,10 +4838,12 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation {
HValue* right);
static HInstruction* NewImul(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
- HMul* mul = new(zone) HMul(context, left, right);
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ HInstruction* instr = HMul::New(zone, context, left, right);
+ if (!instr->IsMul()) return instr;
+ HMul* mul = HMul::cast(instr);
// TODO(mstarzinger): Prevent bailout on minus zero for imul.
mul->AssumeRepresentation(Representation::Integer32());
mul->ClearFlag(HValue::kCanOverflow);
@@ -4548,6 +4866,8 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation {
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
+ bool MulMinusOne();
+
DECLARE_CONCRETE_INSTRUCTION(Mul)
protected:
@@ -4568,10 +4888,7 @@ class HMod V8_FINAL : public HArithmeticBinaryOperation {
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
- HValue* right,
- Maybe<int> fixed_right_arg);
-
- Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+ HValue* right);
bool HasPowerOf2Divisor() {
if (right()->IsConstant() &&
@@ -4605,15 +4922,10 @@ class HMod V8_FINAL : public HArithmeticBinaryOperation {
private:
HMod(HValue* context,
HValue* left,
- HValue* right,
- Maybe<int> fixed_right_arg)
- : HArithmeticBinaryOperation(context, left, right),
- fixed_right_arg_(fixed_right_arg) {
+ HValue* right) : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
}
-
- const Maybe<int> fixed_right_arg_;
};
@@ -4884,9 +5196,11 @@ class HSar V8_FINAL : public HBitwiseBinaryOperation {
class HRor V8_FINAL : public HBitwiseBinaryOperation {
public:
- HRor(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) {
- ChangeRepresentation(Representation::Integer32());
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ return new(zone) HRor(context, left, right);
}
virtual void UpdateRepresentation(Representation new_rep,
@@ -4900,6 +5214,12 @@ class HRor V8_FINAL : public HBitwiseBinaryOperation {
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ HRor(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) {
+ ChangeRepresentation(Representation::Integer32());
+ }
};
@@ -4971,12 +5291,7 @@ class HParameter V8_FINAL : public HTemplateInstruction<0> {
class HCallStub V8_FINAL : public HUnaryCall {
public:
- HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
- : HUnaryCall(context, argument_count),
- major_key_(major_key),
- transcendental_type_(TranscendentalCache::kNumberOfCaches) {
- }
-
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallStub, CodeStub::Major, int);
CodeStub::Major major_key() { return major_key_; }
HValue* context() { return value(); }
@@ -4993,6 +5308,12 @@ class HCallStub V8_FINAL : public HUnaryCall {
DECLARE_CONCRETE_INSTRUCTION(CallStub)
private:
+ HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
+ : HUnaryCall(context, argument_count),
+ major_key_(major_key),
+ transcendental_type_(TranscendentalCache::kNumberOfCaches) {
+ }
+
CodeStub::Major major_key_;
TranscendentalCache::Type transcendental_type_;
};
@@ -5036,24 +5357,20 @@ class HUnknownOSRValue V8_FINAL : public HTemplateInstruction<0> {
class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
public:
- HLoadGlobalCell(Handle<Cell> cell, PropertyDetails details)
- : cell_(cell), details_(details), unique_id_() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnGlobalVars);
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HLoadGlobalCell, Handle<Cell>,
+ PropertyDetails);
- Handle<Cell> cell() const { return cell_; }
+ Unique<Cell> cell() const { return cell_; }
bool RequiresHoleCheck() const;
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
virtual intptr_t Hashcode() V8_OVERRIDE {
- return unique_id_.Hashcode();
+ return cell_.Hashcode();
}
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
- unique_id_ = UniqueValueId(cell_);
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ cell_ = Unique<Cell>(cell_.handle());
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -5064,32 +5381,28 @@ class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
- HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
- return unique_id_ == b->unique_id_;
+ return cell_ == HLoadGlobalCell::cast(other)->cell_;
}
private:
+ HLoadGlobalCell(Handle<Cell> cell, PropertyDetails details)
+ : cell_(Unique<Cell>::CreateUninitialized(cell)), details_(details) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnGlobalVars);
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
- Handle<Cell> cell_;
+ Unique<Cell> cell_;
PropertyDetails details_;
- UniqueValueId unique_id_;
};
class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
public:
- HLoadGlobalGeneric(HValue* context,
- HValue* global_object,
- Handle<Object> name,
- bool for_typeof)
- : name_(name),
- for_typeof_(for_typeof) {
- SetOperandAt(0, context);
- SetOperandAt(1, global_object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadGlobalGeneric, HValue*,
+ Handle<Object>, bool);
HValue* context() { return OperandAt(0); }
HValue* global_object() { return OperandAt(1); }
@@ -5105,6 +5418,18 @@ class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
private:
+ HLoadGlobalGeneric(HValue* context,
+ HValue* global_object,
+ Handle<Object> name,
+ bool for_typeof)
+ : name_(name),
+ for_typeof_(for_typeof) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, global_object);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
Handle<Object> name_;
bool for_typeof_;
};
@@ -5117,9 +5442,11 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
HValue* size,
HType type,
PretenureFlag pretenure_flag,
- InstanceType instance_type) {
+ InstanceType instance_type,
+ Handle<AllocationSite> allocation_site =
+ Handle<AllocationSite>::null()) {
return new(zone) HAllocate(context, size, type, pretenure_flag,
- instance_type);
+ instance_type, allocation_site);
}
// Maximum instance size for which allocations will be inlined.
@@ -5192,7 +5519,9 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
HValue* size,
HType type,
PretenureFlag pretenure_flag,
- InstanceType instance_type)
+ InstanceType instance_type,
+ Handle<AllocationSite> allocation_site =
+ Handle<AllocationSite>::null())
: HTemplateInstruction<2>(type),
dominating_allocate_(NULL),
filler_free_space_size_(NULL),
@@ -5220,6 +5549,14 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
}
clear_next_map_word_ = pretenure_flag == NOT_TENURED &&
AllocationSite::CanTrack(instance_type);
+
+ if (FLAG_trace_pretenuring) {
+ PrintF("HAllocate with AllocationSite %p %s\n",
+ allocation_site.is_null()
+ ? static_cast<void*>(NULL)
+ : static_cast<void*>(*allocation_site),
+ pretenure_flag == TENURED ? "tenured" : "not tenured");
+ }
}
void UpdateSize(HValue* size) {
@@ -5275,21 +5612,21 @@ class HStoreCodeEntry V8_FINAL: public HTemplateInstruction<2> {
};
-class HInnerAllocatedObject V8_FINAL: public HTemplateInstruction<1> {
+class HInnerAllocatedObject V8_FINAL : public HTemplateInstruction<2> {
public:
static HInnerAllocatedObject* New(Zone* zone,
HValue* context,
HValue* value,
- int offset,
+ HValue* offset,
HType type = HType::Tagged()) {
return new(zone) HInnerAllocatedObject(value, offset, type);
}
HValue* base_object() { return OperandAt(0); }
- int offset() { return offset_; }
+ HValue* offset() { return OperandAt(1); }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
+ return index == 0 ? Representation::Tagged() : Representation::Integer32();
}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -5297,15 +5634,16 @@ class HInnerAllocatedObject V8_FINAL: public HTemplateInstruction<1> {
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
private:
- HInnerAllocatedObject(HValue* value, int offset, HType type = HType::Tagged())
- : HTemplateInstruction<1>(type), offset_(offset) {
+ HInnerAllocatedObject(HValue* value,
+ HValue* offset,
+ HType type = HType::Tagged())
+ : HTemplateInstruction<2>(type) {
ASSERT(value->IsAllocate());
SetOperandAt(0, value);
+ SetOperandAt(1, offset);
set_type(type);
set_representation(Representation::Tagged());
}
-
- int offset_;
};
@@ -5344,7 +5682,7 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*,
Handle<PropertyCell>, PropertyDetails);
- Handle<PropertyCell> cell() const { return cell_; }
+ Unique<PropertyCell> cell() const { return cell_; }
bool RequiresHoleCheck() {
return !details_.IsDontDelete() || details_.IsReadOnly();
}
@@ -5352,6 +5690,10 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
return StoringValueNeedsWriteBarrier(value());
}
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
+ cell_ = Unique<PropertyCell>(cell_.handle());
+ }
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -5364,12 +5706,12 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
Handle<PropertyCell> cell,
PropertyDetails details)
: HUnaryOperation(value),
- cell_(cell),
+ cell_(Unique<PropertyCell>::CreateUninitialized(cell)),
details_(details) {
SetGVNFlag(kChangesGlobalVars);
}
- Handle<PropertyCell> cell_;
+ Unique<PropertyCell> cell_;
PropertyDetails details_;
};
@@ -5580,6 +5922,18 @@ class HObjectAccess V8_FINAL {
kDouble, HeapNumber::kValueOffset, Representation::Double());
}
+ static HObjectAccess ForHeapNumberValueLowestBits() {
+ return HObjectAccess(kDouble,
+ HeapNumber::kValueOffset,
+ Representation::Integer32());
+ }
+
+ static HObjectAccess ForHeapNumberValueHighestBits() {
+ return HObjectAccess(kDouble,
+ HeapNumber::kValueOffset + kIntSize,
+ Representation::Integer32());
+ }
+
static HObjectAccess ForElementsPointer() {
return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
@@ -5601,12 +5955,9 @@ class HObjectAccess V8_FINAL {
? Representation::Smi() : Representation::Tagged());
}
- static HObjectAccess ForAllocationSiteTransitionInfo() {
- return HObjectAccess(kInobject, AllocationSite::kTransitionInfoOffset);
- }
-
- static HObjectAccess ForAllocationSiteWeakNext() {
- return HObjectAccess(kInobject, AllocationSite::kWeakNextOffset);
+ static HObjectAccess ForAllocationSiteOffset(int offset) {
+ ASSERT(offset >= HeapObject::kHeaderSize && offset < AllocationSite::kSize);
+ return HObjectAccess(kInobject, offset);
}
static HObjectAccess ForAllocationSiteList() {
@@ -5620,6 +5971,12 @@ class HObjectAccess V8_FINAL {
FLAG_track_fields ? Representation::Smi() : Representation::Tagged());
}
+ static HObjectAccess ForStringHashField() {
+ return HObjectAccess(kInobject,
+ String::kHashFieldOffset,
+ Representation::Integer32());
+ }
+
static HObjectAccess ForStringLength() {
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
return HObjectAccess(
@@ -5628,6 +5985,14 @@ class HObjectAccess V8_FINAL {
FLAG_track_fields ? Representation::Smi() : Representation::Tagged());
}
+ static HObjectAccess ForConsStringFirst() {
+ return HObjectAccess(kInobject, ConsString::kFirstOffset);
+ }
+
+ static HObjectAccess ForConsStringSecond() {
+ return HObjectAccess(kInobject, ConsString::kSecondOffset);
+ }
+
static HObjectAccess ForPropertiesPointer() {
return HObjectAccess(kInobject, JSObject::kPropertiesOffset);
}
@@ -5669,6 +6034,18 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(kMaps, JSObject::kMapOffset);
}
+ static HObjectAccess ForMapInstanceSize() {
+ return HObjectAccess(kInobject,
+ Map::kInstanceSizeOffset,
+ Representation::UInteger8());
+ }
+
+ static HObjectAccess ForMapInstanceType() {
+ return HObjectAccess(kInobject,
+ Map::kInstanceTypeOffset,
+ Representation::UInteger8());
+ }
+
static HObjectAccess ForPropertyCellValue() {
return HObjectAccess(kInobject, PropertyCell::kValueOffset);
}
@@ -5708,6 +6085,43 @@ class HObjectAccess V8_FINAL {
// Create an access for the payload of a Cell or JSGlobalPropertyCell.
static HObjectAccess ForCellPayload(Isolate* isolate);
+ static HObjectAccess ForJSTypedArrayLength() {
+ return HObjectAccess::ForJSObjectOffset(JSTypedArray::kLengthOffset);
+ }
+
+ static HObjectAccess ForJSArrayBufferBackingStore() {
+ return HObjectAccess::ForJSObjectOffset(
+ JSArrayBuffer::kBackingStoreOffset, Representation::External());
+ }
+
+ static HObjectAccess ForExternalArrayExternalPointer() {
+ return HObjectAccess::ForJSObjectOffset(
+ ExternalArray::kExternalPointerOffset, Representation::External());
+ }
+
+ static HObjectAccess ForJSArrayBufferViewWeakNext() {
+ return HObjectAccess::ForJSObjectOffset(JSArrayBufferView::kWeakNextOffset);
+ }
+
+ static HObjectAccess ForJSArrayBufferWeakFirstView() {
+ return HObjectAccess::ForJSObjectOffset(
+ JSArrayBuffer::kWeakFirstViewOffset);
+ }
+
+ static HObjectAccess ForJSArrayBufferViewBuffer() {
+ return HObjectAccess::ForJSObjectOffset(JSArrayBufferView::kBufferOffset);
+ }
+
+ static HObjectAccess ForJSArrayBufferViewByteOffset() {
+ return HObjectAccess::ForJSObjectOffset(
+ JSArrayBufferView::kByteOffsetOffset);
+ }
+
+ static HObjectAccess ForJSArrayBufferViewByteLength() {
+ return HObjectAccess::ForJSObjectOffset(
+ JSArrayBufferView::kByteLengthOffset);
+ }
+
void PrintTo(StringStream* stream);
inline bool Equals(HObjectAccess that) const {
@@ -5744,8 +6158,8 @@ class HObjectAccess V8_FINAL {
}
class PortionField : public BitField<Portion, 0, 3> {};
- class RepresentationField : public BitField<Representation::Kind, 3, 3> {};
- class OffsetField : public BitField<int, 6, 26> {};
+ class RepresentationField : public BitField<Representation::Kind, 3, 4> {};
+ class OffsetField : public BitField<int, 7, 25> {};
uint32_t value_; // encodes portion, representation, and offset
Handle<String> name_;
@@ -5798,7 +6212,12 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
SetOperandAt(0, object);
Representation representation = access.representation();
- if (representation.IsSmi()) {
+ if (representation.IsInteger8() ||
+ representation.IsUInteger8() ||
+ representation.IsInteger16() ||
+ representation.IsUInteger16()) {
+ set_representation(Representation::Integer32());
+ } else if (representation.IsSmi()) {
set_type(HType::Smi());
set_representation(representation);
} else if (representation.IsDouble() ||
@@ -5823,13 +6242,8 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> {
public:
- HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
- : name_(name) {
- SetOperandAt(0, context);
- SetOperandAt(1, object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadNamedGeneric, HValue*,
+ Handle<Object>);
HValue* context() { return OperandAt(0); }
HValue* object() { return OperandAt(1); }
@@ -5844,18 +6258,21 @@ class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
private:
+ HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
+ : name_(name) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, object);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
Handle<Object> name_;
};
class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
public:
- explicit HLoadFunctionPrototype(HValue* function)
- : HUnaryOperation(function) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnCalls);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HLoadFunctionPrototype, HValue*);
HValue* function() { return OperandAt(0); }
@@ -5867,6 +6284,14 @@ class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ explicit HLoadFunctionPrototype(HValue* function)
+ : HUnaryOperation(function) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnCalls);
+ }
};
class ArrayInstructionInterface {
@@ -6058,14 +6483,8 @@ class HLoadKeyed V8_FINAL
class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
- HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, obj);
- SetOperandAt(1, key);
- SetOperandAt(2, context);
- SetAllSideEffects();
- }
-
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadKeyedGeneric, HValue*,
+ HValue*);
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
@@ -6080,6 +6499,15 @@ class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> {
virtual HValue* Canonicalize() V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
+
+ private:
+ HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) {
+ set_representation(Representation::Tagged());
+ SetOperandAt(0, obj);
+ SetOperandAt(1, key);
+ SetOperandAt(2, context);
+ SetAllSideEffects();
+ }
};
@@ -6100,11 +6528,19 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
if (index == 0 && access().IsExternalMemory()) {
// object must be external in case of external memory access
return Representation::External();
- } else if (index == 1 &&
- (field_representation().IsDouble() ||
- field_representation().IsSmi() ||
- field_representation().IsInteger32())) {
- return field_representation();
+ } else if (index == 1) {
+ if (field_representation().IsInteger8() ||
+ field_representation().IsUInteger8() ||
+ field_representation().IsInteger16() ||
+ field_representation().IsUInteger16() ||
+ field_representation().IsInteger32()) {
+ return Representation::Integer32();
+ } else if (field_representation().IsDouble() ||
+ field_representation().IsSmi()) {
+ return field_representation();
+ } else if (field_representation().IsExternal()) {
+ return Representation::External();
+ }
}
return Representation::Tagged();
}
@@ -6195,19 +6631,9 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
- HStoreNamedGeneric(HValue* context,
- HValue* object,
- Handle<String> name,
- HValue* value,
- StrictModeFlag strict_mode_flag)
- : name_(name),
- strict_mode_flag_(strict_mode_flag) {
- SetOperandAt(0, object);
- SetOperandAt(1, value);
- SetOperandAt(2, context);
- SetAllSideEffects();
- }
-
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreNamedGeneric, HValue*,
+ Handle<String>, HValue*,
+ StrictModeFlag);
HValue* object() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
@@ -6223,6 +6649,19 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
private:
+ HStoreNamedGeneric(HValue* context,
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ StrictModeFlag strict_mode_flag)
+ : name_(name),
+ strict_mode_flag_(strict_mode_flag) {
+ SetOperandAt(0, object);
+ SetOperandAt(1, value);
+ SetOperandAt(2, context);
+ SetAllSideEffects();
+ }
+
Handle<String> name_;
StrictModeFlag strict_mode_flag_;
};
@@ -6374,18 +6813,8 @@ class HStoreKeyed V8_FINAL
class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
public:
- HStoreKeyedGeneric(HValue* context,
- HValue* object,
- HValue* key,
- HValue* value,
- StrictModeFlag strict_mode_flag)
- : strict_mode_flag_(strict_mode_flag) {
- SetOperandAt(0, object);
- SetOperandAt(1, key);
- SetOperandAt(2, value);
- SetOperandAt(3, context);
- SetAllSideEffects();
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreKeyedGeneric, HValue*,
+ HValue*, HValue*, StrictModeFlag);
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
@@ -6403,6 +6832,19 @@ class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
private:
+ HStoreKeyedGeneric(HValue* context,
+ HValue* object,
+ HValue* key,
+ HValue* value,
+ StrictModeFlag strict_mode_flag)
+ : strict_mode_flag_(strict_mode_flag) {
+ SetOperandAt(0, object);
+ SetOperandAt(1, key);
+ SetOperandAt(2, value);
+ SetOperandAt(3, context);
+ SetAllSideEffects();
+ }
+
StrictModeFlag strict_mode_flag_;
};
@@ -6424,25 +6866,20 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
HValue* object() { return OperandAt(0); }
HValue* context() { return OperandAt(1); }
- Handle<Map> original_map() { return original_map_; }
- Handle<Map> transitioned_map() { return transitioned_map_; }
+ Unique<Map> original_map() { return original_map_; }
+ Unique<Map> transitioned_map() { return transitioned_map_; }
ElementsKind from_kind() { return from_kind_; }
ElementsKind to_kind() { return to_kind_; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
- original_map_unique_id_ = UniqueValueId(original_map_);
- transitioned_map_unique_id_ = UniqueValueId(transitioned_map_);
- }
-
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
- return original_map_unique_id_ == instr->original_map_unique_id_ &&
- transitioned_map_unique_id_ == instr->transitioned_map_unique_id_;
+ return original_map_ == instr->original_map_ &&
+ transitioned_map_ == instr->transitioned_map_;
}
private:
@@ -6450,10 +6887,8 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
HValue* object,
Handle<Map> original_map,
Handle<Map> transitioned_map)
- : original_map_(original_map),
- transitioned_map_(transitioned_map),
- original_map_unique_id_(),
- transitioned_map_unique_id_(),
+ : original_map_(Unique<Map>(original_map)),
+ transitioned_map_(Unique<Map>(transitioned_map)),
from_kind_(original_map->elements_kind()),
to_kind_(transitioned_map->elements_kind()) {
SetOperandAt(0, object);
@@ -6467,10 +6902,8 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
}
- Handle<Map> original_map_;
- Handle<Map> transitioned_map_;
- UniqueValueId original_map_unique_id_;
- UniqueValueId transitioned_map_unique_id_;
+ Unique<Map> original_map_;
+ Unique<Map> transitioned_map_;
ElementsKind from_kind_;
ElementsKind to_kind_;
};
@@ -6499,14 +6932,26 @@ class HStringAdd V8_FINAL : public HBinaryOperation {
HStringAdd(HValue* context, HValue* left, HValue* right, StringAddFlags flags)
: HBinaryOperation(context, left, right, HType::String()), flags_(flags) {
set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
+ if (MightHaveSideEffects()) {
+ SetAllSideEffects();
+ } else {
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
}
- // No side-effects except possible allocation.
- // NOTE: this instruction _does not_ call ToString() on its inputs.
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ bool MightHaveSideEffects() const {
+ return flags_ != STRING_ADD_CHECK_NONE &&
+ (left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved());
+ }
+
+ // No side-effects except possible allocation:
+ // NOTE: this instruction does not call ToString() on its inputs, when flags_
+ // is set to STRING_ADD_CHECK_NONE.
+ virtual bool IsDeletable() const V8_OVERRIDE {
+ return !MightHaveSideEffects();
+ }
const StringAddFlags flags_;
};
@@ -6514,12 +6959,9 @@ class HStringAdd V8_FINAL : public HBinaryOperation {
class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
public:
- static HStringCharCodeAt* New(Zone* zone,
- HValue* context,
- HValue* string,
- HValue* index) {
- return new(zone) HStringCharCodeAt(context, string, index);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HStringCharCodeAt,
+ HValue*,
+ HValue*);
virtual Representation RequiredInputRepresentation(int index) {
// The index is supposed to be Integer32.
@@ -6549,6 +6991,7 @@ class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnStringChars);
SetGVNFlag(kChangesNewSpacePromotion);
}
@@ -6623,6 +7066,24 @@ class HMaterializedLiteral : public HTemplateInstruction<V> {
class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
public:
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HRegExpLiteral,
+ Handle<FixedArray>,
+ Handle<String>,
+ Handle<String>,
+ int);
+
+ HValue* context() { return OperandAt(0); }
+ Handle<FixedArray> literals() { return literals_; }
+ Handle<String> pattern() { return pattern_; }
+ Handle<String> flags() { return flags_; }
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
+
+ private:
HRegExpLiteral(HValue* context,
Handle<FixedArray> literals,
Handle<String> pattern,
@@ -6637,18 +7098,6 @@ class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
set_type(HType::JSObject());
}
- HValue* context() { return OperandAt(0); }
- Handle<FixedArray> literals() { return literals_; }
- Handle<String> pattern() { return pattern_; }
- Handle<String> flags() { return flags_; }
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
-
- private:
Handle<FixedArray> literals_;
Handle<String> pattern_;
Handle<String> flags_;
@@ -6657,20 +7106,9 @@ class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
public:
- HFunctionLiteral(HValue* context,
- Handle<SharedFunctionInfo> shared,
- bool pretenure)
- : HTemplateInstruction<1>(HType::JSObject()),
- shared_info_(shared),
- pretenure_(pretenure),
- has_no_literals_(shared->num_literals() == 0),
- is_generator_(shared->is_generator()),
- language_mode_(shared->language_mode()) {
- SetOperandAt(0, context);
- set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HFunctionLiteral,
+ Handle<SharedFunctionInfo>,
+ bool);
HValue* context() { return OperandAt(0); }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -6686,6 +7124,20 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
LanguageMode language_mode() const { return language_mode_; }
private:
+ HFunctionLiteral(HValue* context,
+ Handle<SharedFunctionInfo> shared,
+ bool pretenure)
+ : HTemplateInstruction<1>(HType::JSObject()),
+ shared_info_(shared),
+ pretenure_(pretenure),
+ has_no_literals_(shared->num_literals() == 0),
+ is_generator_(shared->is_generator()),
+ language_mode_(shared->language_mode()) {
+ SetOperandAt(0, context);
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
Handle<SharedFunctionInfo> shared_info_;
@@ -6698,11 +7150,7 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
class HTypeof V8_FINAL : public HTemplateInstruction<2> {
public:
- explicit HTypeof(HValue* context, HValue* value) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*);
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
@@ -6716,6 +7164,12 @@ class HTypeof V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(Typeof)
private:
+ explicit HTypeof(HValue* context, HValue* value) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
+ set_representation(Representation::Tagged());
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -6760,8 +7214,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
ASSERT(value->IsCallRuntime());
#ifdef DEBUG
const Runtime::Function* function = HCallRuntime::cast(value)->function();
- ASSERT(function->function_id == Runtime::kCreateObjectLiteral ||
- function->function_id == Runtime::kCreateObjectLiteralShallow);
+ ASSERT(function->function_id == Runtime::kCreateObjectLiteral);
#endif
}
@@ -6771,9 +7224,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation {
class HValueOf V8_FINAL : public HUnaryOperation {
public:
- explicit HValueOf(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HValueOf, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -6782,16 +7233,17 @@ class HValueOf V8_FINAL : public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(ValueOf)
private:
+ explicit HValueOf(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Tagged());
+ }
+
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
class HDateField V8_FINAL : public HUnaryOperation {
public:
- HDateField(HValue* date, Smi* index)
- : HUnaryOperation(date), index_(index) {
- set_representation(Representation::Tagged());
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HDateField, HValue*, Smi*);
Smi* index() const { return index_; }
@@ -6802,35 +7254,98 @@ class HDateField V8_FINAL : public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(DateField)
private:
+ HDateField(HValue* date, Smi* index)
+ : HUnaryOperation(date), index_(index) {
+ set_representation(Representation::Tagged());
+ }
+
Smi* index_;
};
-class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<3> {
+class HSeqStringGetChar V8_FINAL : public HTemplateInstruction<2> {
public:
- HSeqStringSetChar(String::Encoding encoding,
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ String::Encoding encoding,
+ HValue* string,
+ HValue* index);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return (index == 0) ? Representation::Tagged()
+ : Representation::Integer32();
+ }
+
+ String::Encoding encoding() const { return encoding_; }
+ HValue* string() const { return OperandAt(0); }
+ HValue* index() const { return OperandAt(1); }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ return encoding() == HSeqStringGetChar::cast(other)->encoding();
+ }
+
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE {
+ if (encoding() == String::ONE_BYTE_ENCODING) {
+ return new(zone) Range(0, String::kMaxOneByteCharCode);
+ } else {
+ ASSERT_EQ(String::TWO_BYTE_ENCODING, encoding());
+ return new(zone) Range(0, String::kMaxUtf16CodeUnit);
+ }
+ }
+
+ private:
+ HSeqStringGetChar(String::Encoding encoding,
HValue* string,
- HValue* index,
- HValue* value) : encoding_(encoding) {
+ HValue* index) : encoding_(encoding) {
SetOperandAt(0, string);
SetOperandAt(1, index);
- SetOperandAt(2, value);
- set_representation(Representation::Tagged());
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnStringChars);
}
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ String::Encoding encoding_;
+};
+
+
+class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<4> {
+ public:
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(
+ HSeqStringSetChar, String::Encoding,
+ HValue*, HValue*, HValue*);
+
String::Encoding encoding() { return encoding_; }
- HValue* string() { return OperandAt(0); }
- HValue* index() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
+ HValue* context() { return OperandAt(0); }
+ HValue* string() { return OperandAt(1); }
+ HValue* index() { return OperandAt(2); }
+ HValue* value() { return OperandAt(3); }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return (index == 0) ? Representation::Tagged()
+ return (index <= 1) ? Representation::Tagged()
: Representation::Integer32();
}
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar)
private:
+ HSeqStringSetChar(HValue* context,
+ String::Encoding encoding,
+ HValue* string,
+ HValue* index,
+ HValue* value) : encoding_(encoding) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, string);
+ SetOperandAt(2, index);
+ SetOperandAt(3, value);
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesStringChars);
+ }
+
String::Encoding encoding_;
};
@@ -6855,6 +7370,8 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
protected:
+ virtual int RedefinedOperandIndex() { return 0; }
+
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
return true;
}
@@ -6874,11 +7391,7 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
class HForInPrepareMap V8_FINAL : public HTemplateInstruction<2> {
public:
- static HForInPrepareMap* New(Zone* zone,
- HValue* context,
- HValue* object) {
- return new(zone) HForInPrepareMap(context, object);
- }
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HForInPrepareMap, HValue*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
diff --git a/chromium/v8/src/hydrogen-load-elimination.cc b/chromium/v8/src/hydrogen-load-elimination.cc
new file mode 100644
index 00000000000..f3b574847f8
--- /dev/null
+++ b/chromium/v8/src/hydrogen-load-elimination.cc
@@ -0,0 +1,510 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-alias-analysis.h"
+#include "hydrogen-load-elimination.h"
+#include "hydrogen-instructions.h"
+#include "hydrogen-flow-engine.h"
+
+namespace v8 {
+namespace internal {
+
+#define GLOBAL true
+#define TRACE(x) if (FLAG_trace_load_elimination) PrintF x
+
+static const int kMaxTrackedFields = 16;
+static const int kMaxTrackedObjects = 5;
+
+// An element in the field approximation list.
+class HFieldApproximation : public ZoneObject {
+ public: // Just a data blob.
+ HValue* object_;
+ HLoadNamedField* last_load_;
+ HValue* last_value_;
+ HFieldApproximation* next_;
+
+ // Recursively copy the entire linked list of field approximations.
+ HFieldApproximation* Copy(Zone* zone) {
+ if (this == NULL) return NULL;
+ HFieldApproximation* copy = new(zone) HFieldApproximation();
+ copy->object_ = this->object_;
+ copy->last_load_ = this->last_load_;
+ copy->last_value_ = this->last_value_;
+ copy->next_ = this->next_->Copy(zone);
+ return copy;
+ }
+};
+
+
+// The main datastructure used during load/store elimination. Each in-object
+// field is tracked separately. For each field, store a list of known field
+// values for known objects.
+class HLoadEliminationTable : public ZoneObject {
+ public:
+ HLoadEliminationTable(Zone* zone, HAliasAnalyzer* aliasing)
+ : zone_(zone), fields_(kMaxTrackedFields, zone), aliasing_(aliasing) { }
+
+ // The main processing of instructions.
+ HLoadEliminationTable* Process(HInstruction* instr, Zone* zone) {
+ switch (instr->opcode()) {
+ case HValue::kLoadNamedField: {
+ HLoadNamedField* l = HLoadNamedField::cast(instr);
+ TRACE((" process L%d field %d (o%d)\n",
+ instr->id(),
+ FieldOf(l->access()),
+ l->object()->ActualValue()->id()));
+ HValue* result = load(l);
+ if (result != instr) {
+ // The load can be replaced with a previous load or a value.
+ TRACE((" replace L%d -> v%d\n", instr->id(), result->id()));
+ instr->DeleteAndReplaceWith(result);
+ }
+ break;
+ }
+ case HValue::kStoreNamedField: {
+ HStoreNamedField* s = HStoreNamedField::cast(instr);
+ TRACE((" process S%d field %d (o%d) = v%d\n",
+ instr->id(),
+ FieldOf(s->access()),
+ s->object()->ActualValue()->id(),
+ s->value()->id()));
+ HValue* result = store(s);
+ if (result == NULL) {
+ // The store is redundant. Remove it.
+ TRACE((" remove S%d\n", instr->id()));
+ instr->DeleteAndReplaceWith(NULL);
+ }
+ break;
+ }
+ default: {
+ if (instr->CheckGVNFlag(kChangesInobjectFields)) {
+ TRACE((" kill-all i%d\n", instr->id()));
+ Kill();
+ break;
+ }
+ if (instr->CheckGVNFlag(kChangesMaps)) {
+ TRACE((" kill-maps i%d\n", instr->id()));
+ KillOffset(JSObject::kMapOffset);
+ }
+ if (instr->CheckGVNFlag(kChangesElementsKind)) {
+ TRACE((" kill-elements-kind i%d\n", instr->id()));
+ KillOffset(JSObject::kMapOffset);
+ KillOffset(JSObject::kElementsOffset);
+ }
+ if (instr->CheckGVNFlag(kChangesElementsPointer)) {
+ TRACE((" kill-elements i%d\n", instr->id()));
+ KillOffset(JSObject::kElementsOffset);
+ }
+ if (instr->CheckGVNFlag(kChangesOsrEntries)) {
+ TRACE((" kill-osr i%d\n", instr->id()));
+ Kill();
+ }
+ }
+ // Improvements possible:
+ // - learn from HCheckMaps for field 0
+ // - remove unobservable stores (write-after-write)
+ // - track cells
+ // - track globals
+ // - track roots
+ }
+ return this;
+ }
+
+ // Support for global analysis with HFlowEngine: Copy state to sucessor block.
+ HLoadEliminationTable* Copy(HBasicBlock* succ, Zone* zone) {
+ HLoadEliminationTable* copy =
+ new(zone) HLoadEliminationTable(zone, aliasing_);
+ copy->EnsureFields(fields_.length());
+ for (int i = 0; i < fields_.length(); i++) {
+ copy->fields_[i] = fields_[i]->Copy(zone);
+ }
+ if (FLAG_trace_load_elimination) {
+ TRACE((" copy-to B%d\n", succ->block_id()));
+ copy->Print();
+ }
+ return copy;
+ }
+
+ // Support for global analysis with HFlowEngine: Merge this state with
+ // the other incoming state.
+ HLoadEliminationTable* Merge(HBasicBlock* succ,
+ HLoadEliminationTable* that, Zone* zone) {
+ if (that->fields_.length() < fields_.length()) {
+ // Drop fields not in the other table.
+ fields_.Rewind(that->fields_.length());
+ }
+ for (int i = 0; i < fields_.length(); i++) {
+ // Merge the field approximations for like fields.
+ HFieldApproximation* approx = fields_[i];
+ HFieldApproximation* prev = NULL;
+ while (approx != NULL) {
+ // TODO(titzer): Merging is O(N * M); sort?
+ HFieldApproximation* other = that->Find(approx->object_, i);
+ if (other == NULL || !Equal(approx->last_value_, other->last_value_)) {
+ // Kill an entry that doesn't agree with the other value.
+ if (prev != NULL) {
+ prev->next_ = approx->next_;
+ } else {
+ fields_[i] = approx->next_;
+ }
+ approx = approx->next_;
+ continue;
+ }
+ prev = approx;
+ approx = approx->next_;
+ }
+ }
+ return this;
+ }
+
+ friend class HLoadEliminationEffects; // Calls Kill() and others.
+ friend class HLoadEliminationPhase;
+
+ private:
+ // Process a load instruction, updating internal table state. If a previous
+ // load or store for this object and field exists, return the new value with
+ // which the load should be replaced. Otherwise, return {instr}.
+ HValue* load(HLoadNamedField* instr) {
+ int field = FieldOf(instr->access());
+ if (field < 0) return instr;
+
+ HValue* object = instr->object()->ActualValue();
+ HFieldApproximation* approx = FindOrCreate(object, field);
+
+ if (approx->last_value_ == NULL) {
+ // Load is not redundant. Fill out a new entry.
+ approx->last_load_ = instr;
+ approx->last_value_ = instr;
+ return instr;
+ } else {
+ // Eliminate the load. Reuse previously stored value or load instruction.
+ return approx->last_value_;
+ }
+ }
+
+ // Process a store instruction, updating internal table state. If a previous
+ // store to the same object and field makes this store redundant (e.g. because
+ // the stored values are the same), return NULL indicating that this store
+ // instruction is redundant. Otherwise, return {instr}.
+ HValue* store(HStoreNamedField* instr) {
+ int field = FieldOf(instr->access());
+ if (field < 0) return KillIfMisaligned(instr);
+
+ HValue* object = instr->object()->ActualValue();
+ HValue* value = instr->value();
+
+ // Kill non-equivalent may-alias entries.
+ KillFieldInternal(object, field, value);
+ if (instr->has_transition()) {
+ // A transition store alters the map of the object.
+ // TODO(titzer): remember the new map (a constant) for the object.
+ KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
+ }
+ HFieldApproximation* approx = FindOrCreate(object, field);
+
+ if (Equal(approx->last_value_, value)) {
+ // The store is redundant because the field already has this value.
+ return NULL;
+ } else {
+ // The store is not redundant. Update the entry.
+ approx->last_load_ = NULL;
+ approx->last_value_ = value;
+ return instr;
+ }
+ }
+
+ // Kill everything in this table.
+ void Kill() {
+ fields_.Rewind(0);
+ }
+
+ // Kill all entries matching the given offset.
+ void KillOffset(int offset) {
+ int field = FieldOf(offset);
+ if (field >= 0 && field < fields_.length()) {
+ fields_[field] = NULL;
+ }
+ }
+
+ // Kill all entries aliasing the given store.
+ void KillStore(HStoreNamedField* s) {
+ int field = FieldOf(s->access());
+ if (field >= 0) {
+ KillFieldInternal(s->object()->ActualValue(), field, s->value());
+ } else {
+ KillIfMisaligned(s);
+ }
+ }
+
+ // Kill multiple entries in the case of a misaligned store.
+ HValue* KillIfMisaligned(HStoreNamedField* instr) {
+ HObjectAccess access = instr->access();
+ if (access.IsInobject()) {
+ int offset = access.offset();
+ if ((offset % kPointerSize) != 0) {
+ // Kill the field containing the first word of the access.
+ HValue* object = instr->object()->ActualValue();
+ int field = offset / kPointerSize;
+ KillFieldInternal(object, field, NULL);
+
+ // Kill the next field in case of overlap.
+ int size = access.representation().size();
+ int next_field = (offset + size - 1) / kPointerSize;
+ if (next_field != field) KillFieldInternal(object, next_field, NULL);
+ }
+ }
+ return instr;
+ }
+
+ // Find an entry for the given object and field pair.
+ HFieldApproximation* Find(HValue* object, int field) {
+ // Search for a field approximation for this object.
+ HFieldApproximation* approx = fields_[field];
+ while (approx != NULL) {
+ if (aliasing_->MustAlias(object, approx->object_)) return approx;
+ approx = approx->next_;
+ }
+ return NULL;
+ }
+
+ // Find or create an entry for the given object and field pair.
+ HFieldApproximation* FindOrCreate(HValue* object, int field) {
+ EnsureFields(field + 1);
+
+ // Search for a field approximation for this object.
+ HFieldApproximation* approx = fields_[field];
+ int count = 0;
+ while (approx != NULL) {
+ if (aliasing_->MustAlias(object, approx->object_)) return approx;
+ count++;
+ approx = approx->next_;
+ }
+
+ if (count >= kMaxTrackedObjects) {
+ // Pull the last entry off the end and repurpose it for this object.
+ approx = ReuseLastApproximation(field);
+ } else {
+ // Allocate a new entry.
+ approx = new(zone_) HFieldApproximation();
+ }
+
+ // Insert the entry at the head of the list.
+ approx->object_ = object;
+ approx->last_load_ = NULL;
+ approx->last_value_ = NULL;
+ approx->next_ = fields_[field];
+ fields_[field] = approx;
+
+ return approx;
+ }
+
+ // Kill all entries for a given field that _may_ alias the given object
+ // and do _not_ have the given value.
+ void KillFieldInternal(HValue* object, int field, HValue* value) {
+ if (field >= fields_.length()) return; // Nothing to do.
+
+ HFieldApproximation* approx = fields_[field];
+ HFieldApproximation* prev = NULL;
+ while (approx != NULL) {
+ if (aliasing_->MayAlias(object, approx->object_)) {
+ if (!Equal(approx->last_value_, value)) {
+ // Kill an aliasing entry that doesn't agree on the value.
+ if (prev != NULL) {
+ prev->next_ = approx->next_;
+ } else {
+ fields_[field] = approx->next_;
+ }
+ approx = approx->next_;
+ continue;
+ }
+ }
+ prev = approx;
+ approx = approx->next_;
+ }
+ }
+
+ bool Equal(HValue* a, HValue* b) {
+ if (a == b) return true;
+ if (a != NULL && b != NULL && a->CheckFlag(HValue::kUseGVN)) {
+ return a->Equals(b);
+ }
+ return false;
+ }
+
+ // Remove the last approximation for a field so that it can be reused.
+ // We reuse the last entry because it was the first inserted and is thus
+ // farthest away from the current instruction.
+ HFieldApproximation* ReuseLastApproximation(int field) {
+ HFieldApproximation* approx = fields_[field];
+ ASSERT(approx != NULL);
+
+ HFieldApproximation* prev = NULL;
+ while (approx->next_ != NULL) {
+ prev = approx;
+ approx = approx->next_;
+ }
+ if (prev != NULL) prev->next_ = NULL;
+ return approx;
+ }
+
+ // Compute the field index for the given object access; -1 if not tracked.
+ int FieldOf(HObjectAccess access) {
+ return access.IsInobject() ? FieldOf(access.offset()) : -1;
+ }
+
+ // Compute the field index for the given in-object offset; -1 if not tracked.
+ int FieldOf(int offset) {
+ if (offset >= kMaxTrackedFields * kPointerSize) return -1;
+ // TODO(titzer): track misaligned loads in a separate list?
+ if ((offset % kPointerSize) != 0) return -1; // Ignore misaligned accesses.
+ return offset / kPointerSize;
+ }
+
+ // Ensure internal storage for the given number of fields.
+ void EnsureFields(int num_fields) {
+ if (fields_.length() < num_fields) {
+ fields_.AddBlock(NULL, num_fields - fields_.length(), zone_);
+ }
+ }
+
+ // Print this table to stdout.
+ void Print() {
+ for (int i = 0; i < fields_.length(); i++) {
+ PrintF(" field %d: ", i);
+ for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) {
+ PrintF("[o%d =", a->object_->id());
+ if (a->last_load_ != NULL) PrintF(" L%d", a->last_load_->id());
+ if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id());
+ PrintF("] ");
+ }
+ PrintF("\n");
+ }
+ }
+
+ Zone* zone_;
+ ZoneList<HFieldApproximation*> fields_;
+ HAliasAnalyzer* aliasing_;
+};
+
+
+// Support for HFlowEngine: collect store effects within loops.
+class HLoadEliminationEffects : public ZoneObject {
+ public:
+ explicit HLoadEliminationEffects(Zone* zone)
+ : zone_(zone),
+ maps_stored_(false),
+ fields_stored_(false),
+ elements_stored_(false),
+ stores_(5, zone) { }
+
+ inline bool Disabled() {
+ return false; // Effects are _not_ disabled.
+ }
+
+ // Process a possibly side-effecting instruction.
+ void Process(HInstruction* instr, Zone* zone) {
+ switch (instr->opcode()) {
+ case HValue::kStoreNamedField: {
+ stores_.Add(HStoreNamedField::cast(instr), zone_);
+ break;
+ }
+ case HValue::kOsrEntry: {
+ // Kill everything. Loads must not be hoisted past the OSR entry.
+ maps_stored_ = true;
+ fields_stored_ = true;
+ elements_stored_ = true;
+ }
+ default: {
+ fields_stored_ |= instr->CheckGVNFlag(kChangesInobjectFields);
+ maps_stored_ |= instr->CheckGVNFlag(kChangesMaps);
+ maps_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
+ elements_stored_ |= instr->CheckGVNFlag(kChangesElementsKind);
+ elements_stored_ |= instr->CheckGVNFlag(kChangesElementsPointer);
+ }
+ }
+ }
+
+ // Apply these effects to the given load elimination table.
+ void Apply(HLoadEliminationTable* table) {
+ if (fields_stored_) {
+ table->Kill();
+ return;
+ }
+ if (maps_stored_) {
+ table->KillOffset(JSObject::kMapOffset);
+ }
+ if (elements_stored_) {
+ table->KillOffset(JSObject::kElementsOffset);
+ }
+
+ // Kill non-agreeing fields for each store contained in these effects.
+ for (int i = 0; i < stores_.length(); i++) {
+ table->KillStore(stores_[i]);
+ }
+ }
+
+ // Union these effects with the other effects.
+ void Union(HLoadEliminationEffects* that, Zone* zone) {
+ maps_stored_ |= that->maps_stored_;
+ fields_stored_ |= that->fields_stored_;
+ elements_stored_ |= that->elements_stored_;
+ for (int i = 0; i < that->stores_.length(); i++) {
+ stores_.Add(that->stores_[i], zone);
+ }
+ }
+
+ private:
+ Zone* zone_;
+ bool maps_stored_ : 1;
+ bool fields_stored_ : 1;
+ bool elements_stored_ : 1;
+ ZoneList<HStoreNamedField*> stores_;
+};
+
+
+// The main routine of the analysis phase. Use the HFlowEngine for either a
+// local or a global analysis.
+void HLoadEliminationPhase::Run() {
+ HFlowEngine<HLoadEliminationTable, HLoadEliminationEffects>
+ engine(graph(), zone());
+ HAliasAnalyzer aliasing;
+ HLoadEliminationTable* table =
+ new(zone()) HLoadEliminationTable(zone(), &aliasing);
+
+ if (GLOBAL) {
+ // Perform a global analysis.
+ engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table);
+ } else {
+ // Perform only local analysis.
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ table->Kill();
+ engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
+ }
+ }
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/allocation-inl.h b/chromium/v8/src/hydrogen-load-elimination.h
index d32db4b17fc..ef6f71fa113 100644
--- a/chromium/v8/src/allocation-inl.h
+++ b/chromium/v8/src/hydrogen-load-elimination.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,25 +25,26 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_ALLOCATION_INL_H_
-#define V8_ALLOCATION_INL_H_
+#ifndef V8_HYDROGEN_LOAD_ELIMINATION_H_
+#define V8_HYDROGEN_LOAD_ELIMINATION_H_
-#include "allocation.h"
+#include "hydrogen.h"
namespace v8 {
namespace internal {
+class HLoadEliminationPhase : public HPhase {
+ public:
+ explicit HLoadEliminationPhase(HGraph* graph)
+ : HPhase("H_Load elimination", graph) { }
-void* PreallocatedStorageAllocationPolicy::New(size_t size) {
- return Isolate::Current()->PreallocatedStorageNew(size);
-}
+ void Run();
-
-void PreallocatedStorageAllocationPolicy::Delete(void* p) {
- return Isolate::Current()->PreallocatedStorageDelete(p);
-}
+ private:
+ void EliminateLoads(HBasicBlock* block);
+};
} } // namespace v8::internal
-#endif // V8_ALLOCATION_INL_H_
+#endif // V8_HYDROGEN_LOAD_ELIMINATION_H_
diff --git a/chromium/v8/src/marking-thread.cc b/chromium/v8/src/hydrogen-mark-unreachable.cc
index 58bca3662dd..d7c5ed2b180 100644
--- a/chromium/v8/src/marking-thread.cc
+++ b/chromium/v8/src/hydrogen-mark-unreachable.cc
@@ -25,65 +25,53 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "marking-thread.h"
-
-#include "v8.h"
-
-#include "isolate.h"
-#include "v8threads.h"
+#include "hydrogen-mark-unreachable.h"
namespace v8 {
namespace internal {
-MarkingThread::MarkingThread(Isolate* isolate)
- : Thread("MarkingThread"),
- isolate_(isolate),
- heap_(isolate->heap()),
- start_marking_semaphore_(0),
- end_marking_semaphore_(0),
- stop_semaphore_(0) {
- NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
- id_ = NoBarrier_AtomicIncrement(&id_counter_, 1);
-}
-
-
-Atomic32 MarkingThread::id_counter_ = -1;
-
-
-void MarkingThread::Run() {
- Isolate::SetIsolateThreadLocals(isolate_, NULL);
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
-
- while (true) {
- start_marking_semaphore_.Wait();
- if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_.Signal();
- return;
+void HMarkUnreachableBlocksPhase::MarkUnreachableBlocks() {
+ // If there is unreachable code in the graph, propagate the unreachable marks
+ // using a fixed-point iteration.
+ bool changed = true;
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+ while (changed) {
+ changed = false;
+ for (int i = 0; i < blocks->length(); i++) {
+ HBasicBlock* block = blocks->at(i);
+ if (!block->IsReachable()) continue;
+ bool is_reachable = blocks->at(0) == block;
+ for (HPredecessorIterator it(block); !it.Done(); it.Advance()) {
+ HBasicBlock* predecessor = it.Current();
+ // A block is reachable if one of its predecessors is reachable,
+ // doesn't deoptimize and either is known to transfer control to the
+ // block or has a control flow instruction for which the next block
+ // cannot be determined.
+ if (predecessor->IsReachable() && !predecessor->IsDeoptimizing()) {
+ HBasicBlock* pred_succ;
+ bool known_pred_succ =
+ predecessor->end()->KnownSuccessorBlock(&pred_succ);
+ if (!known_pred_succ || pred_succ == block) {
+ is_reachable = true;
+ break;
+ }
+ }
+ if (block->is_osr_entry()) {
+ is_reachable = true;
+ }
+ }
+ if (!is_reachable) {
+ block->MarkUnreachable();
+ changed = true;
+ }
}
-
- end_marking_semaphore_.Signal();
}
}
-void MarkingThread::Stop() {
- Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
- start_marking_semaphore_.Signal();
- stop_semaphore_.Wait();
- Join();
-}
-
-
-void MarkingThread::StartMarking() {
- start_marking_semaphore_.Signal();
-}
-
-
-void MarkingThread::WaitForMarkingThread() {
- end_marking_semaphore_.Wait();
+void HMarkUnreachableBlocksPhase::Run() {
+ MarkUnreachableBlocks();
}
} } // namespace v8::internal
diff --git a/chromium/v8/src/hydrogen-deoptimizing-mark.h b/chromium/v8/src/hydrogen-mark-unreachable.h
index 7d6e6e4bda3..9ecc6e9f164 100644
--- a/chromium/v8/src/hydrogen-deoptimizing-mark.h
+++ b/chromium/v8/src/hydrogen-mark-unreachable.h
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_HYDROGEN_DEOPTIMIZING_MARK_H_
-#define V8_HYDROGEN_DEOPTIMIZING_MARK_H_
+#ifndef V8_HYDROGEN_MARK_UNREACHABLE_H_
+#define V8_HYDROGEN_MARK_UNREACHABLE_H_
#include "hydrogen.h"
@@ -34,23 +34,20 @@ namespace v8 {
namespace internal {
-// Mark all blocks that are dominated by an unconditional soft deoptimize to
-// prevent code motion across those blocks.
-class HPropagateDeoptimizingMarkPhase : public HPhase {
+class HMarkUnreachableBlocksPhase : public HPhase {
public:
- explicit HPropagateDeoptimizingMarkPhase(HGraph* graph)
- : HPhase("H_Propagate deoptimizing mark", graph) { }
+ explicit HMarkUnreachableBlocksPhase(HGraph* graph)
+ : HPhase("H_Mark unreachable blocks", graph) { }
void Run();
private:
- void MarkAsDeoptimizing();
- void NullifyUnreachableInstructions();
+ void MarkUnreachableBlocks();
- DISALLOW_COPY_AND_ASSIGN(HPropagateDeoptimizingMarkPhase);
+ DISALLOW_COPY_AND_ASSIGN(HMarkUnreachableBlocksPhase);
};
} } // namespace v8::internal
-#endif // V8_HYDROGEN_DEOPTIMIZING_MARK_H_
+#endif // V8_HYDROGEN_MARK_UNREACHABLE_H_
diff --git a/chromium/v8/src/hydrogen-minus-zero.cc b/chromium/v8/src/hydrogen-minus-zero.cc
index 28ae6eba401..316e0f5077c 100644
--- a/chromium/v8/src/hydrogen-minus-zero.cc
+++ b/chromium/v8/src/hydrogen-minus-zero.cc
@@ -49,6 +49,14 @@ void HComputeMinusZeroChecksPhase::Run() {
PropagateMinusZeroChecks(change->value());
visited_.Clear();
}
+ } else if (current->IsCompareMinusZeroAndBranch()) {
+ HCompareMinusZeroAndBranch* check =
+ HCompareMinusZeroAndBranch::cast(current);
+ if (check->value()->representation().IsSmiOrInteger32()) {
+ ASSERT(visited_.IsEmpty());
+ PropagateMinusZeroChecks(check->value());
+ visited_.Clear();
+ }
}
}
}
diff --git a/chromium/v8/src/hydrogen-osr.cc b/chromium/v8/src/hydrogen-osr.cc
index 6b1df1e7a5b..6e39df6aa95 100644
--- a/chromium/v8/src/hydrogen-osr.cc
+++ b/chromium/v8/src/hydrogen-osr.cc
@@ -37,19 +37,8 @@ bool HOsrBuilder::HasOsrEntryAt(IterationStatement* statement) {
}
-// Build a new loop header block and set it as the current block.
-HBasicBlock *HOsrBuilder::BuildLoopEntry() {
- HBasicBlock* loop_entry = builder_->CreateLoopHeaderBlock();
- builder_->current_block()->Goto(loop_entry);
- builder_->set_current_block(loop_entry);
- return loop_entry;
-}
-
-
-HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
- IterationStatement* statement) {
- // Check if there is an OSR here first.
- if (!HasOsrEntryAt(statement)) return BuildLoopEntry();
+HBasicBlock* HOsrBuilder::BuildOsrLoopEntry(IterationStatement* statement) {
+ ASSERT(HasOsrEntryAt(statement));
Zone* zone = builder_->zone();
HGraph* graph = builder_->graph();
@@ -63,12 +52,12 @@ HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
osr_entry_ = graph->CreateBasicBlock();
HValue* true_value = graph->GetConstantTrue();
- HBranch* test = new(zone) HBranch(true_value, ToBooleanStub::Types(),
- non_osr_entry, osr_entry_);
- builder_->current_block()->Finish(test);
+ HBranch* test = builder_->New<HBranch>(true_value, ToBooleanStub::Types(),
+ non_osr_entry, osr_entry_);
+ builder_->FinishCurrentBlock(test);
HBasicBlock* loop_predecessor = graph->CreateBasicBlock();
- non_osr_entry->Goto(loop_predecessor);
+ builder_->Goto(non_osr_entry, loop_predecessor);
builder_->set_current_block(osr_entry_);
osr_entry_->set_osr_entry();
@@ -108,12 +97,12 @@ HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
builder_->Add<HOsrEntry>(osr_entry_id);
HContext* context = builder_->Add<HContext>();
environment->BindContext(context);
- builder_->current_block()->Goto(loop_predecessor);
+ builder_->Goto(loop_predecessor);
loop_predecessor->SetJoinId(statement->EntryId());
builder_->set_current_block(loop_predecessor);
// Create the final loop entry
- osr_loop_entry_ = BuildLoopEntry();
+ osr_loop_entry_ = builder_->BuildLoopEntry();
return osr_loop_entry_;
}
diff --git a/chromium/v8/src/hydrogen-osr.h b/chromium/v8/src/hydrogen-osr.h
index 5014a75bdaf..ae72ce650c5 100644
--- a/chromium/v8/src/hydrogen-osr.h
+++ b/chromium/v8/src/hydrogen-osr.h
@@ -45,9 +45,10 @@ class HOsrBuilder : public ZoneObject {
osr_entry_(NULL),
osr_loop_entry_(NULL),
osr_values_(NULL) { }
+
// Creates the loop entry block for the given statement, setting up OSR
// entries as necessary, and sets the current block to the new block.
- HBasicBlock* BuildPossibleOsrLoopEntry(IterationStatement* statement);
+ HBasicBlock* BuildOsrLoopEntry(IterationStatement* statement);
// Process the hydrogen graph after it has been completed, performing
// any OSR-specific cleanups or changes.
@@ -61,10 +62,9 @@ class HOsrBuilder : public ZoneObject {
return unoptimized_frame_slots_;
}
- private:
- HBasicBlock* BuildLoopEntry();
bool HasOsrEntryAt(IterationStatement* statement);
+ private:
int unoptimized_frame_slots_;
HOptimizedGraphBuilder* builder_;
HBasicBlock* osr_entry_;
diff --git a/chromium/v8/src/hydrogen-redundant-phi.cc b/chromium/v8/src/hydrogen-redundant-phi.cc
index 9c38200577d..1263833dac9 100644
--- a/chromium/v8/src/hydrogen-redundant-phi.cc
+++ b/chromium/v8/src/hydrogen-redundant-phi.cc
@@ -31,37 +31,18 @@ namespace v8 {
namespace internal {
void HRedundantPhiEliminationPhase::Run() {
- // We do a simple fixed point iteration without any work list, because
- // machine-generated JavaScript can lead to a very dense Hydrogen graph with
- // an enormous work list and will consequently result in OOM. Experiments
- // showed that this simple algorithm is good enough, and even e.g. tracking
- // the set or range of blocks to consider is not a real improvement.
- bool need_another_iteration;
+ // Gather all phis from all blocks first.
const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
- ZoneList<HPhi*> redundant_phis(blocks->length(), zone());
- do {
- need_another_iteration = false;
- for (int i = 0; i < blocks->length(); ++i) {
- HBasicBlock* block = blocks->at(i);
- for (int j = 0; j < block->phis()->length(); j++) {
- HPhi* phi = block->phis()->at(j);
- HValue* replacement = phi->GetRedundantReplacement();
- if (replacement != NULL) {
- // Remember phi to avoid concurrent modification of the block's phis.
- redundant_phis.Add(phi, zone());
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- HValue* value = it.value();
- value->SetOperandAt(it.index(), replacement);
- need_another_iteration |= value->IsPhi();
- }
- }
- }
- for (int i = 0; i < redundant_phis.length(); i++) {
- block->RemovePhi(redundant_phis[i]);
- }
- redundant_phis.Clear();
+ ZoneList<HPhi*> all_phis(blocks->length(), zone());
+ for (int i = 0; i < blocks->length(); ++i) {
+ HBasicBlock* block = blocks->at(i);
+ for (int j = 0; j < block->phis()->length(); j++) {
+ all_phis.Add(block->phis()->at(j), zone());
}
- } while (need_another_iteration);
+ }
+
+ // Iteratively reduce all phis in the list.
+ ProcessPhis(&all_phis);
#if DEBUG
// Make sure that we *really* removed all redundant phis.
@@ -73,4 +54,35 @@ void HRedundantPhiEliminationPhase::Run() {
#endif
}
+
+void HRedundantPhiEliminationPhase::ProcessBlock(HBasicBlock* block) {
+ ProcessPhis(block->phis());
+}
+
+
+void HRedundantPhiEliminationPhase::ProcessPhis(const ZoneList<HPhi*>* phis) {
+ bool updated;
+ do {
+ // Iterately replace all redundant phis in the given list.
+ updated = false;
+ for (int i = 0; i < phis->length(); i++) {
+ HPhi* phi = phis->at(i);
+ if (phi->CheckFlag(HValue::kIsDead)) continue; // Already replaced.
+
+ HValue* replacement = phi->GetRedundantReplacement();
+ if (replacement != NULL) {
+ phi->SetFlag(HValue::kIsDead);
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ HValue* value = it.value();
+ value->SetOperandAt(it.index(), replacement);
+ // Iterate again if used in another non-dead phi.
+ updated |= value->IsPhi() && !value->CheckFlag(HValue::kIsDead);
+ }
+ phi->block()->RemovePhi(phi);
+ }
+ }
+ } while (updated);
+}
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/hydrogen-redundant-phi.h b/chromium/v8/src/hydrogen-redundant-phi.h
index 6291fa5b787..960ae69c95f 100644
--- a/chromium/v8/src/hydrogen-redundant-phi.h
+++ b/chromium/v8/src/hydrogen-redundant-phi.h
@@ -42,8 +42,11 @@ class HRedundantPhiEliminationPhase : public HPhase {
: HPhase("H_Redundant phi elimination", graph) { }
void Run();
+ void ProcessBlock(HBasicBlock* block);
private:
+ void ProcessPhis(const ZoneList<HPhi*>* phis);
+
DISALLOW_COPY_AND_ASSIGN(HRedundantPhiEliminationPhase);
};
diff --git a/chromium/v8/src/hydrogen-representation-changes.cc b/chromium/v8/src/hydrogen-representation-changes.cc
index 960113782f8..07fc8be38c0 100644
--- a/chromium/v8/src/hydrogen-representation-changes.cc
+++ b/chromium/v8/src/hydrogen-representation-changes.cc
@@ -61,6 +61,11 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
if (new_value == NULL) {
new_value = new(graph()->zone()) HChange(
value, to, is_truncating_to_smi, is_truncating_to_int);
+ if (use_value->operand_position(use_index) != RelocInfo::kNoPosition) {
+ new_value->set_position(use_value->operand_position(use_index));
+ } else {
+ ASSERT(!FLAG_emit_opt_code_positions || !graph()->info()->IsOptimizing());
+ }
}
new_value->InsertBefore(next);
diff --git a/chromium/v8/src/hydrogen.cc b/chromium/v8/src/hydrogen.cc
index 0b4fb26c199..cdf69e7c72c 100644
--- a/chromium/v8/src/hydrogen.cc
+++ b/chromium/v8/src/hydrogen.cc
@@ -30,21 +30,24 @@
#include <algorithm>
#include "v8.h"
+#include "allocation-site-scopes.h"
#include "codegen.h"
#include "full-codegen.h"
#include "hashmap.h"
#include "hydrogen-bce.h"
#include "hydrogen-bch.h"
#include "hydrogen-canonicalize.h"
+#include "hydrogen-check-elimination.h"
#include "hydrogen-dce.h"
#include "hydrogen-dehoist.h"
-#include "hydrogen-deoptimizing-mark.h"
#include "hydrogen-environment-liveness.h"
#include "hydrogen-escape-analysis.h"
#include "hydrogen-infer-representation.h"
#include "hydrogen-infer-types.h"
+#include "hydrogen-load-elimination.h"
#include "hydrogen-gvn.h"
#include "hydrogen-mark-deoptimize.h"
+#include "hydrogen-mark-unreachable.h"
#include "hydrogen-minus-zero.h"
#include "hydrogen-osr.h"
#include "hydrogen-range-analysis.h"
@@ -55,6 +58,7 @@
#include "hydrogen-uint32-analysis.h"
#include "lithium-allocator.h"
#include "parser.h"
+#include "runtime.h"
#include "scopeinfo.h"
#include "scopes.h"
#include "stub-cache.h"
@@ -94,7 +98,7 @@ HBasicBlock::HBasicBlock(HGraph* graph)
parent_loop_header_(NULL),
inlined_entry_block_(NULL),
is_inline_return_target_(false),
- is_deoptimizing_(false),
+ is_reachable_(true),
dominates_loop_successors_(false),
is_osr_entry_(false) { }
@@ -104,6 +108,11 @@ Isolate* HBasicBlock::isolate() const {
}
+void HBasicBlock::MarkUnreachable() {
+ is_reachable_ = false;
+}
+
+
void HBasicBlock::AttachLoopInformation() {
ASSERT(!IsLoopHeader());
loop_information_ = new(zone()) HLoopInformation(this, zone());
@@ -132,16 +141,25 @@ void HBasicBlock::RemovePhi(HPhi* phi) {
}
-void HBasicBlock::AddInstruction(HInstruction* instr) {
+void HBasicBlock::AddInstruction(HInstruction* instr, int position) {
ASSERT(!IsStartBlock() || !IsFinished());
ASSERT(!instr->IsLinked());
ASSERT(!IsFinished());
+ if (position != RelocInfo::kNoPosition) {
+ instr->set_position(position);
+ }
if (first_ == NULL) {
ASSERT(last_environment() != NULL);
ASSERT(!last_environment()->ast_id().IsNone());
HBlockEntry* entry = new(zone()) HBlockEntry();
entry->InitializeAsFirst(this);
+ if (position != RelocInfo::kNoPosition) {
+ entry->set_position(position);
+ } else {
+ ASSERT(!FLAG_emit_opt_code_positions ||
+ !graph()->info()->IsOptimizing());
+ }
first_ = last_ = entry;
}
instr->InsertAfter(last_);
@@ -192,9 +210,9 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
}
-void HBasicBlock::Finish(HControlInstruction* end) {
+void HBasicBlock::Finish(HControlInstruction* end, int position) {
ASSERT(!IsFinished());
- AddInstruction(end);
+ AddInstruction(end, position);
end_ = end;
for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
it.Current()->RegisterPredecessor(this);
@@ -203,35 +221,44 @@ void HBasicBlock::Finish(HControlInstruction* end) {
void HBasicBlock::Goto(HBasicBlock* block,
+ int position,
FunctionState* state,
bool add_simulate) {
bool drop_extra = state != NULL &&
state->inlining_kind() == DROP_EXTRA_ON_RETURN;
if (block->IsInlineReturnTarget()) {
- AddInstruction(new(zone()) HLeaveInlined());
+ HEnvironment* env = last_environment();
+ int argument_count = env->arguments_environment()->parameter_count();
+ AddInstruction(new(zone())
+ HLeaveInlined(state->entry(), argument_count),
+ position);
UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
}
- if (add_simulate) AddNewSimulate(BailoutId::None());
+ if (add_simulate) AddNewSimulate(BailoutId::None(), position);
HGoto* instr = new(zone()) HGoto(block);
- Finish(instr);
+ Finish(instr, position);
}
void HBasicBlock::AddLeaveInlined(HValue* return_value,
- FunctionState* state) {
+ FunctionState* state,
+ int position) {
HBasicBlock* target = state->function_return();
bool drop_extra = state->inlining_kind() == DROP_EXTRA_ON_RETURN;
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
- AddInstruction(new(zone()) HLeaveInlined());
+ HEnvironment* env = last_environment();
+ int argument_count = env->arguments_environment()->parameter_count();
+ AddInstruction(new(zone()) HLeaveInlined(state->entry(), argument_count),
+ position);
UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
last_environment()->Push(return_value);
- AddNewSimulate(BailoutId::None());
+ AddNewSimulate(BailoutId::None(), position);
HGoto* instr = new(zone()) HGoto(target);
- Finish(instr);
+ Finish(instr, position);
}
@@ -579,7 +606,8 @@ void HGraph::Verify(bool do_full_verify) const {
block->predecessors()->first()->last_environment()->ast_id();
for (int k = 0; k < block->predecessors()->length(); k++) {
HBasicBlock* predecessor = block->predecessors()->at(k);
- ASSERT(predecessor->end()->IsGoto());
+ ASSERT(predecessor->end()->IsGoto() ||
+ predecessor->end()->IsDeoptimize());
ASSERT(predecessor->last_environment()->ast_id() == id);
}
}
@@ -622,10 +650,21 @@ HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
// Can't pass GetInvalidContext() to HConstant::New, because that will
// recursively call GetConstant
HConstant* constant = HConstant::New(zone(), NULL, value);
- constant->InsertAfter(GetConstantUndefined());
+ constant->InsertAfter(entry_block()->first());
pointer->set(constant);
+ return constant;
+ }
+ return ReinsertConstantIfNecessary(pointer->get());
+}
+
+
+HConstant* HGraph::ReinsertConstantIfNecessary(HConstant* constant) {
+ if (!constant->IsLinked()) {
+ // The constant was removed from the graph. Reinsert.
+ constant->ClearFlag(HValue::kIsDead);
+ constant->InsertAfter(entry_block()->first());
}
- return pointer->get();
+ return constant;
}
@@ -648,21 +687,21 @@ HConstant* HGraph::GetConstantMinus1() {
HConstant* HGraph::GetConstant##Name() { \
if (!constant_##name##_.is_set()) { \
HConstant* constant = new(zone()) HConstant( \
- isolate()->factory()->name##_value(), \
- UniqueValueId::name##_value(isolate()->heap()), \
+ Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \
Representation::Tagged(), \
htype, \
false, \
true, \
false, \
boolean_value); \
- constant->InsertAfter(GetConstantUndefined()); \
+ constant->InsertAfter(entry_block()->first()); \
constant_##name##_.set(constant); \
} \
- return constant_##name##_.get(); \
+ return ReinsertConstantIfNecessary(constant_##name##_.get()); \
}
+DEFINE_GET_CONSTANT(Undefined, undefined, HType::Tagged(), false)
DEFINE_GET_CONSTANT(True, true, HType::Boolean(), true)
DEFINE_GET_CONSTANT(False, false, HType::Boolean(), false)
DEFINE_GET_CONSTANT(Hole, the_hole, HType::Tagged(), false)
@@ -705,23 +744,23 @@ bool HGraph::IsStandardConstant(HConstant* constant) {
}
-HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, int position)
+HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder)
: builder_(builder),
- position_(position),
finished_(false),
- deopt_then_(false),
- deopt_else_(false),
did_then_(false),
did_else_(false),
+ did_else_if_(false),
did_and_(false),
did_or_(false),
captured_(false),
needs_compare_(true),
+ pending_merge_block_(false),
split_edge_merge_block_(NULL),
- merge_block_(NULL) {
+ merge_at_join_blocks_(NULL),
+ normal_merge_at_join_block_count_(0),
+ deopt_merge_at_join_block_count_(0) {
HEnvironment* env = builder->environment();
first_true_block_ = builder->CreateBasicBlock(env->Copy());
- last_true_block_ = NULL;
first_false_block_ = builder->CreateBasicBlock(env->Copy());
}
@@ -730,27 +769,42 @@ HGraphBuilder::IfBuilder::IfBuilder(
HGraphBuilder* builder,
HIfContinuation* continuation)
: builder_(builder),
- position_(RelocInfo::kNoPosition),
finished_(false),
- deopt_then_(false),
- deopt_else_(false),
did_then_(false),
did_else_(false),
+ did_else_if_(false),
did_and_(false),
did_or_(false),
captured_(false),
needs_compare_(false),
+ pending_merge_block_(false),
first_true_block_(NULL),
first_false_block_(NULL),
split_edge_merge_block_(NULL),
- merge_block_(NULL) {
+ merge_at_join_blocks_(NULL),
+ normal_merge_at_join_block_count_(0),
+ deopt_merge_at_join_block_count_(0) {
continuation->Continue(&first_true_block_,
- &first_false_block_,
- &position_);
+ &first_false_block_);
}
-void HGraphBuilder::IfBuilder::AddCompare(HControlInstruction* compare) {
+HControlInstruction* HGraphBuilder::IfBuilder::AddCompare(
+ HControlInstruction* compare) {
+ ASSERT(did_then_ == did_else_);
+ if (did_else_) {
+ // Handle if-then-elseif
+ did_else_if_ = true;
+ did_else_ = false;
+ did_then_ = false;
+ did_and_ = false;
+ did_or_ = false;
+ pending_merge_block_ = false;
+ split_edge_merge_block_ = NULL;
+ HEnvironment* env = builder_->environment();
+ first_true_block_ = builder_->CreateBasicBlock(env->Copy());
+ first_false_block_ = builder_->CreateBasicBlock(env->Copy());
+ }
if (split_edge_merge_block_ != NULL) {
HEnvironment* env = first_false_block_->last_environment();
HBasicBlock* split_edge =
@@ -762,24 +816,26 @@ void HGraphBuilder::IfBuilder::AddCompare(HControlInstruction* compare) {
compare->SetSuccessorAt(0, first_true_block_);
compare->SetSuccessorAt(1, split_edge);
}
- split_edge->GotoNoSimulate(split_edge_merge_block_);
+ builder_->GotoNoSimulate(split_edge, split_edge_merge_block_);
} else {
compare->SetSuccessorAt(0, first_true_block_);
compare->SetSuccessorAt(1, first_false_block_);
}
- builder_->current_block()->Finish(compare);
+ builder_->FinishCurrentBlock(compare);
needs_compare_ = false;
+ return compare;
}
void HGraphBuilder::IfBuilder::Or() {
+ ASSERT(!needs_compare_);
ASSERT(!did_and_);
did_or_ = true;
HEnvironment* env = first_false_block_->last_environment();
if (split_edge_merge_block_ == NULL) {
split_edge_merge_block_ =
builder_->CreateBasicBlock(env->Copy());
- first_true_block_->GotoNoSimulate(split_edge_merge_block_);
+ builder_->GotoNoSimulate(first_true_block_, split_edge_merge_block_);
first_true_block_ = split_edge_merge_block_;
}
builder_->set_current_block(first_false_block_);
@@ -788,12 +844,13 @@ void HGraphBuilder::IfBuilder::Or() {
void HGraphBuilder::IfBuilder::And() {
+ ASSERT(!needs_compare_);
ASSERT(!did_or_);
did_and_ = true;
HEnvironment* env = first_false_block_->last_environment();
if (split_edge_merge_block_ == NULL) {
split_edge_merge_block_ = builder_->CreateBasicBlock(env->Copy());
- first_false_block_->GotoNoSimulate(split_edge_merge_block_);
+ builder_->GotoNoSimulate(first_false_block_, split_edge_merge_block_);
first_false_block_ = split_edge_merge_block_;
}
builder_->set_current_block(first_true_block_);
@@ -803,15 +860,38 @@ void HGraphBuilder::IfBuilder::And() {
void HGraphBuilder::IfBuilder::CaptureContinuation(
HIfContinuation* continuation) {
+ ASSERT(!did_else_if_);
ASSERT(!finished_);
ASSERT(!captured_);
- HBasicBlock* true_block = last_true_block_ == NULL
- ? first_true_block_
- : last_true_block_;
- HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL)
- ? builder_->current_block()
- : first_false_block_;
- continuation->Capture(true_block, false_block, position_);
+
+ HBasicBlock* true_block = NULL;
+ HBasicBlock* false_block = NULL;
+ Finish(&true_block, &false_block);
+ ASSERT(true_block != NULL);
+ ASSERT(false_block != NULL);
+ continuation->Capture(true_block, false_block);
+ captured_ = true;
+ builder_->set_current_block(NULL);
+ End();
+}
+
+
+void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) {
+ ASSERT(!did_else_if_);
+ ASSERT(!finished_);
+ ASSERT(!captured_);
+ HBasicBlock* true_block = NULL;
+ HBasicBlock* false_block = NULL;
+ Finish(&true_block, &false_block);
+ merge_at_join_blocks_ = NULL;
+ if (true_block != NULL && !true_block->IsFinished()) {
+ ASSERT(continuation->IsTrueReachable());
+ builder_->GotoNoSimulate(true_block, continuation->true_branch());
+ }
+ if (false_block != NULL && !false_block->IsFinished()) {
+ ASSERT(continuation->IsFalseReachable());
+ builder_->GotoNoSimulate(false_block, continuation->false_branch());
+ }
captured_ = true;
End();
}
@@ -829,12 +909,12 @@ void HGraphBuilder::IfBuilder::Then() {
HConstant* constant_false = builder_->graph()->GetConstantFalse();
ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
boolean_type.Add(ToBooleanStub::BOOLEAN);
- HBranch* branch =
- new(zone()) HBranch(constant_false, boolean_type, first_true_block_,
- first_false_block_);
- builder_->current_block()->Finish(branch);
+ HBranch* branch = builder()->New<HBranch>(
+ constant_false, boolean_type, first_true_block_, first_false_block_);
+ builder_->FinishCurrentBlock(branch);
}
builder_->set_current_block(first_true_block_);
+ pending_merge_block_ = true;
}
@@ -842,73 +922,117 @@ void HGraphBuilder::IfBuilder::Else() {
ASSERT(did_then_);
ASSERT(!captured_);
ASSERT(!finished_);
- last_true_block_ = builder_->current_block();
+ AddMergeAtJoinBlock(false);
builder_->set_current_block(first_false_block_);
+ pending_merge_block_ = true;
did_else_ = true;
}
void HGraphBuilder::IfBuilder::Deopt(const char* reason) {
ASSERT(did_then_);
- if (did_else_) {
- deopt_else_ = true;
- } else {
- deopt_then_ = true;
- }
builder_->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
+ AddMergeAtJoinBlock(true);
}
void HGraphBuilder::IfBuilder::Return(HValue* value) {
- HBasicBlock* block = builder_->current_block();
HValue* parameter_count = builder_->graph()->GetConstantMinus1();
- block->FinishExit(builder_->New<HReturn>(value, parameter_count));
+ builder_->FinishExitCurrentBlock(
+ builder_->New<HReturn>(value, parameter_count));
+ AddMergeAtJoinBlock(false);
+}
+
+
+void HGraphBuilder::IfBuilder::AddMergeAtJoinBlock(bool deopt) {
+ if (!pending_merge_block_) return;
+ HBasicBlock* block = builder_->current_block();
+ ASSERT(block == NULL || !block->IsFinished());
+ MergeAtJoinBlock* record =
+ new(builder_->zone()) MergeAtJoinBlock(block, deopt,
+ merge_at_join_blocks_);
+ merge_at_join_blocks_ = record;
+ if (block != NULL) {
+ ASSERT(block->end() == NULL);
+ if (deopt) {
+ normal_merge_at_join_block_count_++;
+ } else {
+ deopt_merge_at_join_block_count_++;
+ }
+ }
builder_->set_current_block(NULL);
- if (did_else_) {
- first_false_block_ = NULL;
- } else {
- first_true_block_ = NULL;
+ pending_merge_block_ = false;
+}
+
+
+void HGraphBuilder::IfBuilder::Finish() {
+ ASSERT(!finished_);
+ if (!did_then_) {
+ Then();
+ }
+ AddMergeAtJoinBlock(false);
+ if (!did_else_) {
+ Else();
+ AddMergeAtJoinBlock(false);
+ }
+ finished_ = true;
+}
+
+
+void HGraphBuilder::IfBuilder::Finish(HBasicBlock** then_continuation,
+ HBasicBlock** else_continuation) {
+ Finish();
+
+ MergeAtJoinBlock* else_record = merge_at_join_blocks_;
+ if (else_continuation != NULL) {
+ *else_continuation = else_record->block_;
+ }
+ MergeAtJoinBlock* then_record = else_record->next_;
+ if (then_continuation != NULL) {
+ *then_continuation = then_record->block_;
}
+ ASSERT(then_record->next_ == NULL);
}
void HGraphBuilder::IfBuilder::End() {
- if (!captured_) {
- ASSERT(did_then_);
- if (!did_else_) {
- last_true_block_ = builder_->current_block();
- }
- if (last_true_block_ == NULL || last_true_block_->IsFinished()) {
- ASSERT(did_else_);
- // Return on true. Nothing to do, just continue the false block.
- } else if (first_false_block_ == NULL ||
- (did_else_ && builder_->current_block()->IsFinished())) {
- // Deopt on false. Nothing to do except switching to the true block.
- builder_->set_current_block(last_true_block_);
- } else {
- merge_block_ = builder_->graph()->CreateBasicBlock();
- ASSERT(!finished_);
- if (!did_else_) Else();
- ASSERT(!last_true_block_->IsFinished());
- HBasicBlock* last_false_block = builder_->current_block();
- ASSERT(!last_false_block->IsFinished());
- if (deopt_then_) {
- last_false_block->GotoNoSimulate(merge_block_);
- builder_->PadEnvironmentForContinuation(last_true_block_,
- merge_block_);
- last_true_block_->GotoNoSimulate(merge_block_);
- } else {
- last_true_block_->GotoNoSimulate(merge_block_);
- if (deopt_else_) {
- builder_->PadEnvironmentForContinuation(last_false_block,
- merge_block_);
- }
- last_false_block->GotoNoSimulate(merge_block_);
+ if (captured_) return;
+ Finish();
+
+ int total_merged_blocks = normal_merge_at_join_block_count_ +
+ deopt_merge_at_join_block_count_;
+ ASSERT(total_merged_blocks >= 1);
+ HBasicBlock* merge_block = total_merged_blocks == 1
+ ? NULL : builder_->graph()->CreateBasicBlock();
+
+ // Merge non-deopt blocks first to ensure environment has right size for
+ // padding.
+ MergeAtJoinBlock* current = merge_at_join_blocks_;
+ while (current != NULL) {
+ if (!current->deopt_ && current->block_ != NULL) {
+ // If there is only one block that makes it through to the end of the
+ // if, then just set it as the current block and continue rather then
+ // creating an unnecessary merge block.
+ if (total_merged_blocks == 1) {
+ builder_->set_current_block(current->block_);
+ return;
}
- builder_->set_current_block(merge_block_);
+ builder_->GotoNoSimulate(current->block_, merge_block);
}
+ current = current->next_;
}
- finished_ = true;
+
+ // Merge deopt blocks, padding when necessary.
+ current = merge_at_join_blocks_;
+ while (current != NULL) {
+ if (current->deopt_ && current->block_ != NULL) {
+ builder_->PadEnvironmentForContinuation(current->block_,
+ merge_block);
+ builder_->GotoNoSimulate(current->block_, merge_block);
+ }
+ current = current->next_;
+ }
+ builder_->set_current_block(merge_block);
}
@@ -951,7 +1075,7 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
phi_ = header_block_->AddNewPhi(env->values()->length());
phi_->AddInput(initial);
env->Push(initial);
- builder_->current_block()->GotoNoSimulate(header_block_);
+ builder_->GotoNoSimulate(header_block_);
HEnvironment* body_env = env->Copy();
HEnvironment* exit_env = env->Copy();
@@ -963,11 +1087,8 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
builder_->set_current_block(header_block_);
env->Pop();
- HCompareNumericAndBranch* compare =
- new(zone()) HCompareNumericAndBranch(phi_, terminating, token);
- compare->SetSuccessorAt(0, body_block_);
- compare->SetSuccessorAt(1, exit_block_);
- builder_->current_block()->Finish(compare);
+ builder_->FinishCurrentBlock(builder_->New<HCompareNumericAndBranch>(
+ phi_, terminating, token, body_block_, exit_block_));
builder_->set_current_block(body_block_);
if (direction_ == kPreIncrement || direction_ == kPreDecrement) {
@@ -991,10 +1112,11 @@ void HGraphBuilder::LoopBuilder::Break() {
// Its the first time we saw a break.
HEnvironment* env = exit_block_->last_environment()->Copy();
exit_trampoline_block_ = builder_->CreateBasicBlock(env);
- exit_block_->GotoNoSimulate(exit_trampoline_block_);
+ builder_->GotoNoSimulate(exit_block_, exit_trampoline_block_);
}
- builder_->current_block()->GotoNoSimulate(exit_trampoline_block_);
+ builder_->GotoNoSimulate(exit_trampoline_block_);
+ builder_->set_current_block(NULL);
}
@@ -1014,7 +1136,7 @@ void HGraphBuilder::LoopBuilder::EndBody() {
// Push the new increment value on the expression stack to merge into the phi.
builder_->environment()->Push(increment_);
HBasicBlock* last_block = builder_->current_block();
- last_block->GotoNoSimulate(header_block_);
+ builder_->GotoNoSimulate(last_block, header_block_);
header_block_->loop_information()->RegisterBackEdge(last_block);
if (exit_trampoline_block_ != NULL) {
@@ -1032,14 +1154,16 @@ HGraph* HGraphBuilder::CreateGraph() {
CompilationPhase phase("H_Block building", info_);
set_current_block(graph()->entry_block());
if (!BuildGraph()) return NULL;
- graph()->FinalizeUniqueValueIds();
+ graph()->FinalizeUniqueness();
return graph_;
}
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
- current_block()->AddInstruction(instr);
+ ASSERT(!FLAG_emit_opt_code_positions ||
+ position_ != RelocInfo::kNoPosition || !info_->IsOptimizing());
+ current_block()->AddInstruction(instr, position_);
if (graph()->IsInsideNoSideEffectsScope()) {
instr->SetFlag(HValue::kHasNoObservableSideEffects);
}
@@ -1047,13 +1171,32 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
}
-void HGraphBuilder::AddIncrementCounter(StatsCounter* counter,
- HValue* context) {
+void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
+ ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
+ position_ != RelocInfo::kNoPosition);
+ current_block()->Finish(last, position_);
+ if (last->IsReturn() || last->IsAbnormalExit()) {
+ set_current_block(NULL);
+ }
+}
+
+
+void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
+ ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() ||
+ position_ != RelocInfo::kNoPosition);
+ current_block()->FinishExit(instruction, position_);
+ if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
+ set_current_block(NULL);
+ }
+}
+
+
+void HGraphBuilder::AddIncrementCounter(StatsCounter* counter) {
if (FLAG_native_code_counters && counter->Enabled()) {
HValue* reference = Add<HConstant>(ExternalReference(counter));
HValue* old_value = Add<HLoadNamedField>(reference,
HObjectAccess::ForCounter());
- HValue* new_value = Add<HAdd>(old_value, graph()->GetConstant1());
+ HValue* new_value = AddUncasted<HAdd>(old_value, graph()->GetConstant1());
new_value->ClearFlag(HValue::kCanOverflow); // Ignore counter overflow
Add<HStoreNamedField>(reference, HObjectAccess::ForCounter(),
new_value);
@@ -1096,9 +1239,9 @@ void HGraphBuilder::FinishExitWithHardDeoptimization(
PadEnvironmentForContinuation(current_block(), continuation);
Add<HDeoptimize>(reason, Deoptimizer::EAGER);
if (graph()->IsInsideNoSideEffectsScope()) {
- current_block()->GotoNoSimulate(continuation);
+ GotoNoSimulate(continuation);
} else {
- current_block()->Goto(continuation);
+ Goto(continuation);
}
}
@@ -1131,6 +1274,17 @@ HValue* HGraphBuilder::BuildCheckMap(HValue* obj, Handle<Map> map) {
}
+HValue* HGraphBuilder::BuildCheckString(HValue* string) {
+ if (!string->type().IsString()) {
+ ASSERT(!string->IsConstant() ||
+ !HConstant::cast(string)->HasStringValue());
+ BuildCheckHeapObject(string);
+ return Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING);
+ }
+ return string;
+}
+
+
HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
if (object->type().IsJSObject()) return object;
return Add<HWrapReceiver>(object, function);
@@ -1143,7 +1297,6 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
HValue* length,
HValue* key,
bool is_js_array) {
- Zone* zone = this->zone();
IfBuilder length_checker(this);
Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
@@ -1159,10 +1312,8 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
Token::GTE);
capacity_checker.Then();
- HValue* context = environment()->context();
-
HValue* max_gap = Add<HConstant>(static_cast<int32_t>(JSObject::kMaxGap));
- HValue* max_capacity = Add<HAdd>(current_capacity, max_gap);
+ HValue* max_capacity = AddUncasted<HAdd>(current_capacity, max_gap);
IfBuilder key_checker(this);
key_checker.If<HCompareNumericAndBranch>(key, max_capacity, Token::LT);
key_checker.Then();
@@ -1181,8 +1332,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
capacity_checker.End();
if (is_js_array) {
- HValue* new_length = AddInstruction(
- HAdd::New(zone, context, key, graph_->GetConstant1()));
+ HValue* new_length = AddUncasted<HAdd>(key, graph_->GetConstant1());
new_length->ClearFlag(HValue::kCanOverflow);
Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(kind),
@@ -1267,6 +1417,608 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
}
+HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
+ HValue* elements,
+ HValue* key,
+ HValue* hash,
+ HValue* mask,
+ int current_probe) {
+ if (current_probe == kNumberDictionaryProbes) {
+ return NULL;
+ }
+
+ int32_t offset = SeededNumberDictionary::GetProbeOffset(current_probe);
+ HValue* raw_index = (current_probe == 0)
+ ? hash
+ : AddUncasted<HAdd>(hash, Add<HConstant>(offset));
+ raw_index = AddUncasted<HBitwise>(Token::BIT_AND, raw_index, mask);
+ int32_t entry_size = SeededNumberDictionary::kEntrySize;
+ raw_index = AddUncasted<HMul>(raw_index, Add<HConstant>(entry_size));
+ raw_index->ClearFlag(HValue::kCanOverflow);
+
+ int32_t base_offset = SeededNumberDictionary::kElementsStartIndex;
+ HValue* key_index = AddUncasted<HAdd>(raw_index, Add<HConstant>(base_offset));
+ key_index->ClearFlag(HValue::kCanOverflow);
+
+ HValue* candidate_key = Add<HLoadKeyed>(elements, key_index,
+ static_cast<HValue*>(NULL),
+ FAST_SMI_ELEMENTS);
+
+ IfBuilder key_compare(this);
+ key_compare.IfNot<HCompareObjectEqAndBranch>(key, candidate_key);
+ key_compare.Then();
+ {
+ // Key at the current probe doesn't match, try at the next probe.
+ HValue* result = BuildUncheckedDictionaryElementLoadHelper(
+ elements, key, hash, mask, current_probe + 1);
+ if (result == NULL) {
+ key_compare.Deopt("probes exhausted in keyed load dictionary lookup");
+ result = graph()->GetConstantUndefined();
+ } else {
+ Push(result);
+ }
+ }
+ key_compare.Else();
+ {
+ // Key at current probe matches. Details must be zero, otherwise the
+ // dictionary element requires special handling.
+ HValue* details_index = AddUncasted<HAdd>(
+ raw_index, Add<HConstant>(base_offset + 2));
+ details_index->ClearFlag(HValue::kCanOverflow);
+
+ HValue* details = Add<HLoadKeyed>(elements, details_index,
+ static_cast<HValue*>(NULL),
+ FAST_SMI_ELEMENTS);
+ IfBuilder details_compare(this);
+ details_compare.If<HCompareNumericAndBranch>(details,
+ graph()->GetConstant0(),
+ Token::NE);
+ details_compare.ThenDeopt("keyed load dictionary element not fast case");
+
+ details_compare.Else();
+ {
+ // Key matches and details are zero --> fast case. Load and return the
+ // value.
+ HValue* result_index = AddUncasted<HAdd>(
+ raw_index, Add<HConstant>(base_offset + 1));
+ result_index->ClearFlag(HValue::kCanOverflow);
+
+ Push(Add<HLoadKeyed>(elements, result_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS));
+ }
+ details_compare.End();
+ }
+ key_compare.End();
+
+ return Pop();
+}
+
+
+HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
+ int32_t seed_value = static_cast<uint32_t>(isolate()->heap()->HashSeed());
+ HValue* seed = Add<HConstant>(seed_value);
+ HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, index, seed);
+
+ // hash = ~hash + (hash << 15);
+ HValue* shifted_hash = AddUncasted<HShl>(hash, Add<HConstant>(15));
+ HValue* not_hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash,
+ graph()->GetConstantMinus1());
+ hash = AddUncasted<HAdd>(shifted_hash, not_hash);
+
+ // hash = hash ^ (hash >> 12);
+ shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(12));
+ hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+
+ // hash = hash + (hash << 2);
+ shifted_hash = AddUncasted<HShl>(hash, Add<HConstant>(2));
+ hash = AddUncasted<HAdd>(hash, shifted_hash);
+
+ // hash = hash ^ (hash >> 4);
+ shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(4));
+ hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+
+ // hash = hash * 2057;
+ hash = AddUncasted<HMul>(hash, Add<HConstant>(2057));
+ hash->ClearFlag(HValue::kCanOverflow);
+
+ // hash = hash ^ (hash >> 16);
+ shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(16));
+ return AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+}
+
+
+HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
+ HValue* key) {
+ HValue* elements = AddLoadElements(receiver);
+
+ HValue* hash = BuildElementIndexHash(key);
+
+ HValue* capacity = Add<HLoadKeyed>(
+ elements,
+ Add<HConstant>(NameDictionary::kCapacityIndex),
+ static_cast<HValue*>(NULL),
+ FAST_SMI_ELEMENTS);
+
+ HValue* mask = AddUncasted<HSub>(capacity, graph()->GetConstant1());
+ mask->ChangeRepresentation(Representation::Integer32());
+ mask->ClearFlag(HValue::kCanOverflow);
+
+ return BuildUncheckedDictionaryElementLoadHelper(elements, key,
+ hash, mask, 0);
+}
+
+
+HValue* HGraphBuilder::BuildNumberToString(HValue* object,
+ Handle<Type> type) {
+ NoObservableSideEffectsScope scope(this);
+
+ // Convert constant numbers at compile time.
+ if (object->IsConstant() && HConstant::cast(object)->HasNumberValue()) {
+ Handle<Object> number = HConstant::cast(object)->handle(isolate());
+ Handle<String> result = isolate()->factory()->NumberToString(number);
+ return Add<HConstant>(result);
+ }
+
+ // Create a joinable continuation.
+ HIfContinuation found(graph()->CreateBasicBlock(),
+ graph()->CreateBasicBlock());
+
+ // Load the number string cache.
+ HValue* number_string_cache =
+ Add<HLoadRoot>(Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ HValue* mask = AddLoadFixedArrayLength(number_string_cache);
+ mask->set_type(HType::Smi());
+ mask = AddUncasted<HSar>(mask, graph()->GetConstant1());
+ mask = AddUncasted<HSub>(mask, graph()->GetConstant1());
+
+ // Check whether object is a smi.
+ IfBuilder if_objectissmi(this);
+ if_objectissmi.If<HIsSmiAndBranch>(object);
+ if_objectissmi.Then();
+ {
+ // Compute hash for smi similar to smi_get_hash().
+ HValue* hash = AddUncasted<HBitwise>(Token::BIT_AND, object, mask);
+
+ // Load the key.
+ HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
+ HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+
+ // Check if object == key.
+ IfBuilder if_objectiskey(this);
+ if_objectiskey.If<HCompareObjectEqAndBranch>(object, key);
+ if_objectiskey.Then();
+ {
+ // Make the key_index available.
+ Push(key_index);
+ }
+ if_objectiskey.JoinContinuation(&found);
+ }
+ if_objectissmi.Else();
+ {
+ if (type->Is(Type::Smi())) {
+ if_objectissmi.Deopt("Expected smi");
+ } else {
+ // Check if the object is a heap number.
+ IfBuilder if_objectisnumber(this);
+ if_objectisnumber.If<HCompareMap>(
+ object, isolate()->factory()->heap_number_map());
+ if_objectisnumber.Then();
+ {
+ // Compute hash for heap number similar to double_get_hash().
+ HValue* low = Add<HLoadNamedField>(
+ object, HObjectAccess::ForHeapNumberValueLowestBits());
+ HValue* high = Add<HLoadNamedField>(
+ object, HObjectAccess::ForHeapNumberValueHighestBits());
+ HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, low, high);
+ hash = AddUncasted<HBitwise>(Token::BIT_AND, hash, mask);
+
+ // Load the key.
+ HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
+ HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+
+ // Check if key is a heap number (the number string cache contains only
+ // SMIs and heap number, so it is sufficient to do a SMI check here).
+ IfBuilder if_keyisnotsmi(this);
+ if_keyisnotsmi.IfNot<HIsSmiAndBranch>(key);
+ if_keyisnotsmi.Then();
+ {
+ // Check if values of key and object match.
+ IfBuilder if_keyeqobject(this);
+ if_keyeqobject.If<HCompareNumericAndBranch>(
+ Add<HLoadNamedField>(key, HObjectAccess::ForHeapNumberValue()),
+ Add<HLoadNamedField>(object, HObjectAccess::ForHeapNumberValue()),
+ Token::EQ);
+ if_keyeqobject.Then();
+ {
+ // Make the key_index available.
+ Push(key_index);
+ }
+ if_keyeqobject.JoinContinuation(&found);
+ }
+ if_keyisnotsmi.JoinContinuation(&found);
+ }
+ if_objectisnumber.Else();
+ {
+ if (type->Is(Type::Number())) {
+ if_objectisnumber.Deopt("Expected heap number");
+ }
+ }
+ if_objectisnumber.JoinContinuation(&found);
+ }
+ }
+ if_objectissmi.JoinContinuation(&found);
+
+ // Check for cache hit.
+ IfBuilder if_found(this, &found);
+ if_found.Then();
+ {
+ // Count number to string operation in native code.
+ AddIncrementCounter(isolate()->counters()->number_to_string_native());
+
+ // Load the value in case of cache hit.
+ HValue* key_index = Pop();
+ HValue* value_index = AddUncasted<HAdd>(key_index, graph()->GetConstant1());
+ Push(Add<HLoadKeyed>(number_string_cache, value_index,
+ static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE));
+ }
+ if_found.Else();
+ {
+ // Cache miss, fallback to runtime.
+ Add<HPushArgument>(object);
+ Push(Add<HCallRuntime>(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kNumberToStringSkipCache),
+ 1));
+ }
+ if_found.End();
+
+ return Pop();
+}
+
+
+HValue* HGraphBuilder::BuildSeqStringSizeFor(HValue* length,
+ String::Encoding encoding) {
+ STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0);
+ HValue* size = length;
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ size = AddUncasted<HShl>(length, graph()->GetConstant1());
+ size->ClearFlag(HValue::kCanOverflow);
+ size->SetFlag(HValue::kUint32);
+ }
+ size = AddUncasted<HAdd>(size, Add<HConstant>(static_cast<int32_t>(
+ SeqString::kHeaderSize + kObjectAlignmentMask)));
+ size->ClearFlag(HValue::kCanOverflow);
+ size = AddUncasted<HBitwise>(
+ Token::BIT_AND, size, Add<HConstant>(static_cast<int32_t>(
+ ~kObjectAlignmentMask)));
+ return size;
+}
+
+
+void HGraphBuilder::BuildCopySeqStringChars(HValue* src,
+ HValue* src_offset,
+ String::Encoding src_encoding,
+ HValue* dst,
+ HValue* dst_offset,
+ String::Encoding dst_encoding,
+ HValue* length) {
+ ASSERT(dst_encoding != String::ONE_BYTE_ENCODING ||
+ src_encoding == String::ONE_BYTE_ENCODING);
+ LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement);
+ HValue* index = loop.BeginBody(graph()->GetConstant0(), length, Token::LT);
+ {
+ HValue* src_index = AddUncasted<HAdd>(src_offset, index);
+ HValue* value =
+ AddUncasted<HSeqStringGetChar>(src_encoding, src, src_index);
+ HValue* dst_index = AddUncasted<HAdd>(dst_offset, index);
+ Add<HSeqStringSetChar>(dst_encoding, dst, dst_index, value);
+ }
+ loop.EndBody();
+}
+
+
+HValue* HGraphBuilder::BuildUncheckedStringAdd(HValue* left,
+ HValue* right,
+ PretenureFlag pretenure_flag) {
+ // Determine the string lengths.
+ HValue* left_length = Add<HLoadNamedField>(
+ left, HObjectAccess::ForStringLength());
+ HValue* right_length = Add<HLoadNamedField>(
+ right, HObjectAccess::ForStringLength());
+
+ // Compute the combined string length. If the result is larger than the max
+ // supported string length, we bailout to the runtime. This is done implicitly
+ // when converting the result back to a smi in case the max string length
+ // equals the max smi valie. Otherwise, for platforms with 32-bit smis, we do
+ HValue* length = AddUncasted<HAdd>(left_length, right_length);
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (String::kMaxLength != Smi::kMaxValue) {
+ IfBuilder if_nooverflow(this);
+ if_nooverflow.If<HCompareNumericAndBranch>(
+ length, Add<HConstant>(String::kMaxLength), Token::LTE);
+ if_nooverflow.Then();
+ if_nooverflow.ElseDeopt("String length exceeds limit");
+ }
+
+ // Determine the string instance types.
+ HLoadNamedField* left_instance_type = Add<HLoadNamedField>(
+ Add<HLoadNamedField>(left, HObjectAccess::ForMap()),
+ HObjectAccess::ForMapInstanceType());
+ HLoadNamedField* right_instance_type = Add<HLoadNamedField>(
+ Add<HLoadNamedField>(right, HObjectAccess::ForMap()),
+ HObjectAccess::ForMapInstanceType());
+
+ // Compute difference of instance types.
+ HValue* xored_instance_types = AddUncasted<HBitwise>(
+ Token::BIT_XOR, left_instance_type, right_instance_type);
+
+ // Check if we should create a cons string.
+ IfBuilder if_createcons(this);
+ if_createcons.If<HCompareNumericAndBranch>(
+ length, Add<HConstant>(ConsString::kMinLength), Token::GTE);
+ if_createcons.Then();
+ {
+ // Allocate the cons string object. HAllocate does not care whether we
+ // pass CONS_STRING_TYPE or CONS_ASCII_STRING_TYPE here, so we just use
+ // CONS_STRING_TYPE here. Below we decide whether the cons string is
+ // one-byte or two-byte and set the appropriate map.
+ HAllocate* string = Add<HAllocate>(Add<HConstant>(ConsString::kSize),
+ HType::String(), pretenure_flag,
+ CONS_STRING_TYPE);
+
+ // Compute the intersection of instance types.
+ HValue* anded_instance_types = AddUncasted<HBitwise>(
+ Token::BIT_AND, left_instance_type, right_instance_type);
+
+ // We create a one-byte cons string if
+ // 1. both strings are one-byte, or
+ // 2. at least one of the strings is two-byte, but happens to contain only
+ // one-byte characters.
+ // To do this, we check
+ // 1. if both strings are one-byte, or if the one-byte data hint is set in
+ // both strings, or
+ // 2. if one of the strings has the one-byte data hint set and the other
+ // string is one-byte.
+ IfBuilder if_onebyte(this);
+ STATIC_ASSERT(kOneByteStringTag != 0);
+ STATIC_ASSERT(kOneByteDataHintMask != 0);
+ if_onebyte.If<HCompareNumericAndBranch>(
+ AddUncasted<HBitwise>(
+ Token::BIT_AND, anded_instance_types,
+ Add<HConstant>(static_cast<int32_t>(
+ kStringEncodingMask | kOneByteDataHintMask))),
+ graph()->GetConstant0(), Token::NE);
+ if_onebyte.Or();
+ STATIC_ASSERT(kOneByteStringTag != 0 &&
+ kOneByteDataHintTag != 0 &&
+ kOneByteDataHintTag != kOneByteStringTag);
+ if_onebyte.If<HCompareNumericAndBranch>(
+ AddUncasted<HBitwise>(
+ Token::BIT_AND, xored_instance_types,
+ Add<HConstant>(static_cast<int32_t>(
+ kOneByteStringTag | kOneByteDataHintTag))),
+ Add<HConstant>(static_cast<int32_t>(
+ kOneByteStringTag | kOneByteDataHintTag)), Token::EQ);
+ if_onebyte.Then();
+ {
+ // We can safely skip the write barrier for storing the map here.
+ Handle<Map> map = isolate()->factory()->cons_ascii_string_map();
+ AddStoreMapConstantNoWriteBarrier(string, map);
+ }
+ if_onebyte.Else();
+ {
+ // We can safely skip the write barrier for storing the map here.
+ Handle<Map> map = isolate()->factory()->cons_string_map();
+ AddStoreMapConstantNoWriteBarrier(string, map);
+ }
+ if_onebyte.End();
+
+ // Initialize the cons string fields.
+ Add<HStoreNamedField>(string, HObjectAccess::ForStringHashField(),
+ Add<HConstant>(String::kEmptyHashField));
+ Add<HStoreNamedField>(string, HObjectAccess::ForStringLength(), length);
+ Add<HStoreNamedField>(string, HObjectAccess::ForConsStringFirst(), left);
+ Add<HStoreNamedField>(string, HObjectAccess::ForConsStringSecond(),
+ right);
+
+ // Count the native string addition.
+ AddIncrementCounter(isolate()->counters()->string_add_native());
+
+ // Cons string is result.
+ Push(string);
+ }
+ if_createcons.Else();
+ {
+ // Compute union of instance types.
+ HValue* ored_instance_types = AddUncasted<HBitwise>(
+ Token::BIT_OR, left_instance_type, right_instance_type);
+
+ // Check if both strings have the same encoding and both are
+ // sequential.
+ IfBuilder if_sameencodingandsequential(this);
+ if_sameencodingandsequential.If<HCompareNumericAndBranch>(
+ AddUncasted<HBitwise>(
+ Token::BIT_AND, xored_instance_types,
+ Add<HConstant>(static_cast<int32_t>(kStringEncodingMask))),
+ graph()->GetConstant0(), Token::EQ);
+ if_sameencodingandsequential.And();
+ STATIC_ASSERT(kSeqStringTag == 0);
+ if_sameencodingandsequential.If<HCompareNumericAndBranch>(
+ AddUncasted<HBitwise>(
+ Token::BIT_AND, ored_instance_types,
+ Add<HConstant>(static_cast<int32_t>(kStringRepresentationMask))),
+ graph()->GetConstant0(), Token::EQ);
+ if_sameencodingandsequential.Then();
+ {
+ // Check if the result is a one-byte string.
+ IfBuilder if_onebyte(this);
+ STATIC_ASSERT(kOneByteStringTag != 0);
+ if_onebyte.If<HCompareNumericAndBranch>(
+ AddUncasted<HBitwise>(
+ Token::BIT_AND, ored_instance_types,
+ Add<HConstant>(static_cast<int32_t>(kStringEncodingMask))),
+ graph()->GetConstant0(), Token::NE);
+ if_onebyte.Then();
+ {
+ // Calculate the number of bytes needed for the characters in the
+ // string while observing object alignment.
+ HValue* size = BuildSeqStringSizeFor(
+ length, String::ONE_BYTE_ENCODING);
+
+ // Allocate the ASCII string object.
+ Handle<Map> map = isolate()->factory()->ascii_string_map();
+ HAllocate* string = Add<HAllocate>(size, HType::String(),
+ pretenure_flag, ASCII_STRING_TYPE);
+ string->set_known_initial_map(map);
+
+ // We can safely skip the write barrier for storing map here.
+ AddStoreMapConstantNoWriteBarrier(string, map);
+
+ // Length must be stored into the string before we copy characters to
+ // make debug verification code happy.
+ Add<HStoreNamedField>(string, HObjectAccess::ForStringLength(),
+ length);
+
+ // Copy bytes from the left string.
+ BuildCopySeqStringChars(
+ left, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
+ string, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
+ left_length);
+
+ // Copy bytes from the right string.
+ BuildCopySeqStringChars(
+ right, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
+ string, left_length, String::ONE_BYTE_ENCODING,
+ right_length);
+
+ // Count the native string addition.
+ AddIncrementCounter(isolate()->counters()->string_add_native());
+
+ // Return the string.
+ Push(string);
+ }
+ if_onebyte.Else();
+ {
+ // Calculate the number of bytes needed for the characters in the
+ // string while observing object alignment.
+ HValue* size = BuildSeqStringSizeFor(
+ length, String::TWO_BYTE_ENCODING);
+
+ // Allocate the two-byte string object.
+ Handle<Map> map = isolate()->factory()->string_map();
+ HAllocate* string = Add<HAllocate>(size, HType::String(),
+ pretenure_flag, STRING_TYPE);
+ string->set_known_initial_map(map);
+
+ // We can safely skip the write barrier for storing map here.
+ AddStoreMapConstantNoWriteBarrier(string, map);
+
+ // Length must be stored into the string before we copy characters to
+ // make debug verification code happy.
+ Add<HStoreNamedField>(string, HObjectAccess::ForStringLength(),
+ length);
+
+ // Copy bytes from the left string.
+ BuildCopySeqStringChars(
+ left, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
+ string, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
+ left_length);
+
+ // Copy bytes from the right string.
+ BuildCopySeqStringChars(
+ right, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
+ string, left_length, String::TWO_BYTE_ENCODING,
+ right_length);
+
+ // Return the string.
+ Push(string);
+ }
+ if_onebyte.End();
+
+ // Initialize the (common) string fields.
+ HValue* string = Pop();
+ Add<HStoreNamedField>(string, HObjectAccess::ForStringHashField(),
+ Add<HConstant>(String::kEmptyHashField));
+
+ // Count the native string addition.
+ AddIncrementCounter(isolate()->counters()->string_add_native());
+
+ Push(string);
+ }
+ if_sameencodingandsequential.Else();
+ {
+ // Fallback to the runtime to add the two strings.
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kStringAdd),
+ 2));
+ }
+ if_sameencodingandsequential.End();
+ }
+ if_createcons.End();
+
+ return Pop();
+}
+
+
+HValue* HGraphBuilder::BuildStringAdd(HValue* left,
+ HValue* right,
+ PretenureFlag pretenure_flag) {
+ // Determine the string lengths.
+ HValue* left_length = Add<HLoadNamedField>(
+ left, HObjectAccess::ForStringLength());
+ HValue* right_length = Add<HLoadNamedField>(
+ right, HObjectAccess::ForStringLength());
+
+ // Check if left string is empty.
+ IfBuilder if_leftisempty(this);
+ if_leftisempty.If<HCompareNumericAndBranch>(
+ left_length, graph()->GetConstant0(), Token::EQ);
+ if_leftisempty.Then();
+ {
+ // Count the native string addition.
+ AddIncrementCounter(isolate()->counters()->string_add_native());
+
+ // Just return the right string.
+ Push(right);
+ }
+ if_leftisempty.Else();
+ {
+ // Check if right string is empty.
+ IfBuilder if_rightisempty(this);
+ if_rightisempty.If<HCompareNumericAndBranch>(
+ right_length, graph()->GetConstant0(), Token::EQ);
+ if_rightisempty.Then();
+ {
+ // Count the native string addition.
+ AddIncrementCounter(isolate()->counters()->string_add_native());
+
+ // Just return the left string.
+ Push(left);
+ }
+ if_rightisempty.Else();
+ {
+ // Concatenate the two non-empty strings.
+ Push(BuildUncheckedStringAdd(left, right, pretenure_flag));
+ }
+ if_rightisempty.End();
+ }
+ if_leftisempty.End();
+
+ return Pop();
+}
+
+
HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* checked_object,
HValue* key,
@@ -1318,9 +2070,10 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* bounds_check = negative_checker.If<HCompareNumericAndBranch>(
key, graph()->GetConstant0(), Token::GTE);
negative_checker.Then();
- HInstruction* result = AddExternalArrayElementAccess(
+ HInstruction* result = AddElementAccess(
external_elements, key, val, bounds_check, elements_kind, is_store);
negative_checker.ElseDeopt("Negative key encountered");
+ negative_checker.End();
length_checker.End();
return result;
} else {
@@ -1328,7 +2081,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
checked_key = Add<HBoundsCheck>(key, length);
HLoadExternalArrayPointer* external_elements =
Add<HLoadExternalArrayPointer>(elements);
- return AddExternalArrayElementAccess(
+ return AddElementAccess(
external_elements, checked_key, val,
checked_object, elements_kind, is_store);
}
@@ -1342,7 +2095,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// deopt, leaving the backing store in an invalid state.
if (is_store && IsFastSmiElementsKind(elements_kind) &&
!val->type().IsSmi()) {
- val = Add<HForceRepresentation>(val, Representation::Smi());
+ val = AddUncasted<HForceRepresentation>(val, Representation::Smi());
}
if (IsGrowStoreMode(store_mode)) {
@@ -1361,16 +2114,58 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
elements_kind, length);
} else {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
- elements, isolate()->factory()->fixed_array_map(),
- top_info());
+ elements, isolate()->factory()->fixed_array_map(), top_info());
check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
}
}
}
- return AddFastElementAccess(elements, checked_key, val, checked_object,
- elements_kind, is_store, load_mode, store_mode);
-}
+ return AddElementAccess(elements, checked_key, val, checked_object,
+ elements_kind, is_store, load_mode);
+}
+
+
+
+HValue* HGraphBuilder::BuildAllocateArrayFromLength(
+ JSArrayBuilder* array_builder,
+ HValue* length_argument) {
+ if (length_argument->IsConstant() &&
+ HConstant::cast(length_argument)->HasSmiValue()) {
+ int array_length = HConstant::cast(length_argument)->Integer32Value();
+ HValue* new_object = array_length == 0
+ ? array_builder->AllocateEmptyArray()
+ : array_builder->AllocateArray(length_argument, length_argument);
+ return new_object;
+ }
+
+ HValue* constant_zero = graph()->GetConstant0();
+ HConstant* max_alloc_length =
+ Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ HInstruction* checked_length = Add<HBoundsCheck>(length_argument,
+ max_alloc_length);
+ IfBuilder if_builder(this);
+ if_builder.If<HCompareNumericAndBranch>(checked_length, constant_zero,
+ Token::EQ);
+ if_builder.Then();
+ const int initial_capacity = JSArray::kPreallocatedArrayElements;
+ HConstant* initial_capacity_node = Add<HConstant>(initial_capacity);
+ Push(initial_capacity_node); // capacity
+ Push(constant_zero); // length
+ if_builder.Else();
+ if (!(top_info()->IsStub()) &&
+ IsFastPackedElementsKind(array_builder->kind())) {
+ // We'll come back later with better (holey) feedback.
+ if_builder.Deopt("Holey array despite packed elements_kind feedback");
+ } else {
+ Push(checked_length); // capacity
+ Push(checked_length); // length
+ }
+ if_builder.End();
+ // Figure out total size
+ HValue* length = Pop();
+ HValue* capacity = Pop();
+ return array_builder->AllocateArray(capacity, length);
+}
HValue* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
HValue* capacity) {
@@ -1386,11 +2181,11 @@ HValue* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
}
HConstant* elements_size_value = Add<HConstant>(elements_size);
- HValue* mul = Add<HMul>(capacity, elements_size_value);
+ HValue* mul = AddUncasted<HMul>(capacity, elements_size_value);
mul->ClearFlag(HValue::kCanOverflow);
HConstant* header_size = Add<HConstant>(FixedArray::kHeaderSize);
- HValue* total_size = Add<HAdd>(mul, header_size);
+ HValue* total_size = AddUncasted<HAdd>(mul, header_size);
total_size->ClearFlag(HValue::kCanOverflow);
return Add<HAllocate>(total_size, HType::JSArray(),
@@ -1417,7 +2212,7 @@ HValue* HGraphBuilder::BuildAllocateElementsAndInitializeElementsHeader(
HValue* capacity) {
// The HForceRepresentation is to prevent possible deopt on int-smi
// conversion after allocation but before the new object fields are set.
- capacity = Add<HForceRepresentation>(capacity, Representation::Smi());
+ capacity = AddUncasted<HForceRepresentation>(capacity, Representation::Smi());
HValue* new_elements = BuildAllocateElements(kind, capacity);
BuildInitializeElementsHeader(new_elements, kind, capacity);
return new_elements;
@@ -1442,9 +2237,8 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
length_field);
if (mode == TRACK_ALLOCATION_SITE) {
- BuildCreateAllocationMemento(array,
- JSArray::kSize,
- allocation_site_payload);
+ BuildCreateAllocationMemento(
+ array, Add<HConstant>(JSArray::kSize), allocation_site_payload);
}
int elements_location = JSArray::kSize;
@@ -1452,91 +2246,38 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array,
elements_location += AllocationMemento::kSize;
}
- HValue* elements = Add<HInnerAllocatedObject>(array, elements_location);
+ HInnerAllocatedObject* elements = Add<HInnerAllocatedObject>(
+ array, Add<HConstant>(elements_location));
Add<HStoreNamedField>(array, HObjectAccess::ForElementsPointer(), elements);
- return static_cast<HInnerAllocatedObject*>(elements);
+ return elements;
}
-HInstruction* HGraphBuilder::AddExternalArrayElementAccess(
- HValue* external_elements,
+HInstruction* HGraphBuilder::AddElementAccess(
+ HValue* elements,
HValue* checked_key,
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
- bool is_store) {
+ bool is_store,
+ LoadKeyedHoleMode load_mode) {
if (is_store) {
ASSERT(val != NULL);
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- val = Add<HClampToUint8>(val);
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- break;
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
+ if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
+ val = Add<HClampToUint8>(val);
}
- return Add<HStoreKeyed>(external_elements, checked_key, val, elements_kind);
- } else {
- ASSERT(val == NULL);
- HLoadKeyed* load = Add<HLoadKeyed>(external_elements,
- checked_key,
- dependency,
- elements_kind);
- if (FLAG_opt_safe_uint32_operations &&
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- graph()->RecordUint32Instruction(load);
- }
- return load;
+ return Add<HStoreKeyed>(elements, checked_key, val, elements_kind);
}
-}
-
-HInstruction* HGraphBuilder::AddFastElementAccess(
- HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* load_dependency,
- ElementsKind elements_kind,
- bool is_store,
- LoadKeyedHoleMode load_mode,
- KeyedAccessStoreMode store_mode) {
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return Add<HStoreKeyed>(elements, checked_key, val, elements_kind);
- default:
- UNREACHABLE();
- return NULL;
- }
+ ASSERT(!is_store);
+ ASSERT(val == NULL);
+ HLoadKeyed* load = Add<HLoadKeyed>(
+ elements, checked_key, dependency, elements_kind, load_mode);
+ if (FLAG_opt_safe_uint32_operations &&
+ elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ graph()->RecordUint32Instruction(load);
}
- // It's an element load (!is_store).
- return Add<HLoadKeyed>(
- elements, checked_key, load_dependency, elements_kind, load_mode);
+ return load;
}
@@ -1614,19 +2355,15 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* elements,
: Add<HConstant>(nan_double);
// Special loop unfolding case
- static const int kLoopUnfoldLimit = 4;
- bool unfold_loop = false;
- int initial_capacity = JSArray::kPreallocatedArrayElements;
- if (from->IsConstant() && to->IsConstant() &&
- initial_capacity <= kLoopUnfoldLimit) {
- HConstant* constant_from = HConstant::cast(from);
- HConstant* constant_to = HConstant::cast(to);
+ static const int kLoopUnfoldLimit = 8;
+ STATIC_ASSERT(JSArray::kPreallocatedArrayElements <= kLoopUnfoldLimit);
+ int initial_capacity = -1;
+ if (from->IsInteger32Constant() && to->IsInteger32Constant()) {
+ int constant_from = from->GetInteger32Constant();
+ int constant_to = to->GetInteger32Constant();
- if (constant_from->HasInteger32Value() &&
- constant_from->Integer32Value() == 0 &&
- constant_to->HasInteger32Value() &&
- constant_to->Integer32Value() == initial_capacity) {
- unfold_loop = true;
+ if (constant_from == 0 && constant_to <= kLoopUnfoldLimit) {
+ initial_capacity = constant_to;
}
}
@@ -1636,7 +2373,7 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* elements,
elements_kind = FAST_HOLEY_ELEMENTS;
}
- if (unfold_loop) {
+ if (initial_capacity >= 0) {
for (int i = 0; i < initial_capacity; i++) {
HInstruction* key = Add<HConstant>(i);
Add<HStoreKeyed>(elements, key, hole, elements_kind);
@@ -1742,7 +2479,8 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
// Create an allocation site info if requested.
if (mode == TRACK_ALLOCATION_SITE) {
- BuildCreateAllocationMemento(object, JSArray::kSize, allocation_site);
+ BuildCreateAllocationMemento(
+ object, Add<HConstant>(JSArray::kSize), allocation_site);
}
if (length > 0) {
@@ -1786,9 +2524,8 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
void HGraphBuilder::BuildCompareNil(
HValue* value,
Handle<Type> type,
- int position,
HIfContinuation* continuation) {
- IfBuilder if_nil(this, position);
+ IfBuilder if_nil(this);
bool some_case_handled = false;
bool some_case_missing = false;
@@ -1836,19 +2573,31 @@ void HGraphBuilder::BuildCompareNil(
}
-HValue* HGraphBuilder::BuildCreateAllocationMemento(HValue* previous_object,
- int previous_object_size,
- HValue* alloc_site) {
- // TODO(mvstanton): ASSERT altered to CHECK to diagnose chromium bug 284577
- CHECK(alloc_site != NULL);
- HInnerAllocatedObject* alloc_memento = Add<HInnerAllocatedObject>(
+void HGraphBuilder::BuildCreateAllocationMemento(
+ HValue* previous_object,
+ HValue* previous_object_size,
+ HValue* allocation_site) {
+ ASSERT(allocation_site != NULL);
+ HInnerAllocatedObject* allocation_memento = Add<HInnerAllocatedObject>(
previous_object, previous_object_size);
- Handle<Map> alloc_memento_map(
- isolate()->heap()->allocation_memento_map());
- AddStoreMapConstant(alloc_memento, alloc_memento_map);
- HObjectAccess access = HObjectAccess::ForAllocationMementoSite();
- Add<HStoreNamedField>(alloc_memento, access, alloc_site);
- return alloc_memento;
+ AddStoreMapConstant(
+ allocation_memento, isolate()->factory()->allocation_memento_map());
+ Add<HStoreNamedField>(
+ allocation_memento,
+ HObjectAccess::ForAllocationMementoSite(),
+ allocation_site);
+ if (FLAG_allocation_site_pretenuring) {
+ HValue* memento_create_count = Add<HLoadNamedField>(
+ allocation_site, HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kMementoCreateCountOffset));
+ memento_create_count = AddUncasted<HAdd>(
+ memento_create_count, graph()->GetConstant1());
+ HStoreNamedField* store = Add<HStoreNamedField>(
+ allocation_site, HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kMementoCreateCountOffset), memento_create_count);
+ // No write barrier needed to store a smi.
+ store->SkipWriteBarrier();
+ }
}
@@ -1897,12 +2646,18 @@ HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
- if (kind_ == GetInitialFastElementsKind()) {
+ if (!builder()->top_info()->IsStub()) {
+ // A constant map is fine.
+ Handle<Map> map(builder()->isolate()->get_initial_js_array_map(kind_),
+ builder()->isolate());
+ return builder()->Add<HConstant>(map);
+ }
+
+ if (constructor_function_ != NULL && kind_ == GetInitialFastElementsKind()) {
// No need for a context lookup if the kind_ matches the initial
// map, because we can just load the map in that case.
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->AddInstruction(
- builder()->BuildLoadNamedField(constructor_function_, access));
+ return builder()->AddLoadNamedField(constructor_function_, access);
}
HInstruction* native_context = builder()->BuildGetNativeContext();
@@ -1922,8 +2677,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->AddInstruction(
- builder()->BuildLoadNamedField(constructor_function_, access));
+ return builder()->AddLoadNamedField(constructor_function_, access);
}
@@ -1941,12 +2695,14 @@ HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize(
HInstruction* elements_size_value =
builder()->Add<HConstant>(elements_size());
- HInstruction* mul = builder()->Add<HMul>(length_node, elements_size_value);
- mul->ClearFlag(HValue::kCanOverflow);
-
+ HInstruction* mul = HMul::NewImul(builder()->zone(), builder()->context(),
+ length_node, elements_size_value);
+ builder()->AddInstruction(mul);
HInstruction* base = builder()->Add<HConstant>(base_size);
- HInstruction* total_size = builder()->Add<HAdd>(base, mul);
+ HInstruction* total_size = HAdd::New(builder()->zone(), builder()->context(),
+ base, mul);
total_size->ClearFlag(HValue::kCanOverflow);
+ builder()->AddInstruction(total_size);
return total_size;
}
@@ -1970,34 +2726,40 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
HConstant* capacity = builder()->Add<HConstant>(initial_capacity());
return AllocateArray(size_in_bytes,
capacity,
- builder()->graph()->GetConstant0(),
- true);
+ builder()->graph()->GetConstant0());
}
HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* capacity,
HValue* length_field,
- bool fill_with_hole) {
+ FillMode fill_mode) {
HValue* size_in_bytes = EstablishAllocationSize(capacity);
- return AllocateArray(size_in_bytes, capacity, length_field, fill_with_hole);
+ return AllocateArray(size_in_bytes, capacity, length_field, fill_mode);
}
HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
HValue* capacity,
HValue* length_field,
- bool fill_with_hole) {
+ FillMode fill_mode) {
// These HForceRepresentations are because we store these as fields in the
// objects we construct, and an int32-to-smi HChange could deopt. Accept
// the deopt possibility now, before allocation occurs.
- capacity = builder()->Add<HForceRepresentation>(capacity,
- Representation::Smi());
- length_field = builder()->Add<HForceRepresentation>(length_field,
- Representation::Smi());
+ capacity =
+ builder()->AddUncasted<HForceRepresentation>(capacity,
+ Representation::Smi());
+ length_field =
+ builder()->AddUncasted<HForceRepresentation>(length_field,
+ Representation::Smi());
// Allocate (dealing with failure appropriately)
HAllocate* new_object = builder()->Add<HAllocate>(size_in_bytes,
HType::JSArray(), NOT_TENURED, JS_ARRAY_TYPE);
+ // Folded array allocation should be aligned if it has fast double elements.
+ if (IsFastDoubleElementsKind(kind_)) {
+ new_object->MakeDoubleAligned();
+ }
+
// Fill in the fields: map, properties, length
HValue* map;
if (allocation_site_payload_ == NULL) {
@@ -2015,7 +2777,7 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes,
// Initialize the elements
builder()->BuildInitializeElementsHeader(elements_location_, kind_, capacity);
- if (fill_with_hole) {
+ if (fill_mode == FILL_WITH_HOLE) {
builder()->BuildFillElementsWithHole(elements_location_, kind_,
graph()->GetConstant0(), capacity);
}
@@ -2057,6 +2819,9 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// to know it's the initial state.
function_state_= &initial_function_state_;
InitializeAstVisitor(info->isolate());
+ if (FLAG_emit_opt_code_positions) {
+ SetSourcePosition(info->shared_info()->start_position());
+ }
}
@@ -2069,8 +2834,8 @@ HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
return first;
} else {
HBasicBlock* join_block = graph()->CreateBasicBlock();
- first->Goto(join_block);
- second->Goto(join_block);
+ Goto(first, join_block);
+ Goto(second, join_block);
join_block->SetJoinId(join_id);
return join_block;
}
@@ -2081,7 +2846,7 @@ HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
HBasicBlock* exit_block,
HBasicBlock* continue_block) {
if (continue_block != NULL) {
- if (exit_block != NULL) exit_block->Goto(continue_block);
+ if (exit_block != NULL) Goto(exit_block, continue_block);
continue_block->SetJoinId(statement->ContinueId());
return continue_block;
}
@@ -2094,10 +2859,10 @@ HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
HBasicBlock* body_exit,
HBasicBlock* loop_successor,
HBasicBlock* break_block) {
- if (body_exit != NULL) body_exit->Goto(loop_entry);
+ if (body_exit != NULL) Goto(body_exit, loop_entry);
loop_entry->PostProcessLoopHeader(statement);
if (break_block != NULL) {
- if (loop_successor != NULL) loop_successor->Goto(break_block);
+ if (loop_successor != NULL) Goto(loop_successor, break_block);
break_block->SetJoinId(statement->ExitId());
return break_block;
}
@@ -2105,8 +2870,26 @@ HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
}
-void HBasicBlock::FinishExit(HControlInstruction* instruction) {
- Finish(instruction);
+// Build a new loop header block and set it as the current block.
+HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry() {
+ HBasicBlock* loop_entry = CreateLoopHeaderBlock();
+ Goto(loop_entry);
+ set_current_block(loop_entry);
+ return loop_entry;
+}
+
+
+HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
+ IterationStatement* statement) {
+ HBasicBlock* loop_entry = osr()->HasOsrEntryAt(statement)
+ ? osr()->BuildOsrLoopEntry(statement)
+ : BuildLoopEntry();
+ return loop_entry;
+}
+
+
+void HBasicBlock::FinishExit(HControlInstruction* instruction, int position) {
+ Finish(instruction, position);
ClearEnvironment();
}
@@ -2124,7 +2907,6 @@ HGraph::HGraph(CompilationInfo* info)
zone_(info->zone()),
is_recursive_(false),
use_optimistic_licm_(false),
- has_soft_deoptimize_(false),
depends_on_empty_array_proto_elements_(false),
type_change_checksum_(0),
maximum_environment_size_(0),
@@ -2153,12 +2935,12 @@ HBasicBlock* HGraph::CreateBasicBlock() {
}
-void HGraph::FinalizeUniqueValueIds() {
+void HGraph::FinalizeUniqueness() {
DisallowHeapAllocation no_gc;
- ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
- it.Current()->FinalizeUniqueValueId();
+ it.Current()->FinalizeUniqueness();
}
}
}
@@ -2656,7 +3438,7 @@ void EffectContext::ReturnControl(HControlInstruction* instr,
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
instr->SetSuccessorAt(1, empty_false);
- owner()->current_block()->Finish(instr);
+ owner()->FinishCurrentBlock(instr);
HBasicBlock* join = owner()->CreateJoin(empty_true, empty_false, ast_id);
owner()->set_current_block(join);
}
@@ -2666,7 +3448,7 @@ void EffectContext::ReturnContinuation(HIfContinuation* continuation,
BailoutId ast_id) {
HBasicBlock* true_branch = NULL;
HBasicBlock* false_branch = NULL;
- continuation->Continue(&true_branch, &false_branch, NULL);
+ continuation->Continue(&true_branch, &false_branch);
if (!continuation->IsTrueReachable()) {
owner()->set_current_block(false_branch);
} else if (!continuation->IsFalseReachable()) {
@@ -2700,7 +3482,7 @@ void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
HBasicBlock* materialize_true = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, materialize_true);
instr->SetSuccessorAt(1, materialize_false);
- owner()->current_block()->Finish(instr);
+ owner()->FinishCurrentBlock(instr);
owner()->set_current_block(materialize_true);
owner()->Push(owner()->graph()->GetConstantTrue());
owner()->set_current_block(materialize_false);
@@ -2715,7 +3497,7 @@ void ValueContext::ReturnContinuation(HIfContinuation* continuation,
BailoutId ast_id) {
HBasicBlock* materialize_true = NULL;
HBasicBlock* materialize_false = NULL;
- continuation->Continue(&materialize_true, &materialize_false, NULL);
+ continuation->Continue(&materialize_true, &materialize_false);
if (continuation->IsTrueReachable()) {
owner()->set_current_block(materialize_true);
owner()->Push(owner()->graph()->GetConstantTrue());
@@ -2755,9 +3537,9 @@ void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
instr->SetSuccessorAt(0, empty_true);
instr->SetSuccessorAt(1, empty_false);
- owner()->current_block()->Finish(instr);
- empty_true->Goto(if_true(), owner()->function_state());
- empty_false->Goto(if_false(), owner()->function_state());
+ owner()->FinishCurrentBlock(instr);
+ owner()->Goto(empty_true, if_true(), owner()->function_state());
+ owner()->Goto(empty_false, if_false(), owner()->function_state());
owner()->set_current_block(NULL);
}
@@ -2766,12 +3548,12 @@ void TestContext::ReturnContinuation(HIfContinuation* continuation,
BailoutId ast_id) {
HBasicBlock* true_branch = NULL;
HBasicBlock* false_branch = NULL;
- continuation->Continue(&true_branch, &false_branch, NULL);
+ continuation->Continue(&true_branch, &false_branch);
if (continuation->IsTrueReachable()) {
- true_branch->Goto(if_true(), owner()->function_state());
+ owner()->Goto(true_branch, if_true(), owner()->function_state());
}
if (continuation->IsFalseReachable()) {
- false_branch->Goto(if_false(), owner()->function_state());
+ owner()->Goto(false_branch, if_false(), owner()->function_state());
}
owner()->set_current_block(NULL);
}
@@ -2786,15 +3568,8 @@ void TestContext::BuildBranch(HValue* value) {
if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
builder->Bailout(kArgumentsObjectValueInATestContext);
}
- HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
- HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
ToBooleanStub::Types expected(condition()->to_boolean_types());
- HBranch* test = new(zone()) HBranch(value, expected, empty_true, empty_false);
- builder->current_block()->Finish(test);
-
- empty_true->Goto(if_true(), builder->function_state());
- empty_false->Goto(if_false(), builder->function_state());
- builder->set_current_block(NULL);
+ ReturnControl(owner()->New<HBranch>(value, expected), BailoutId::None());
}
@@ -2910,7 +3685,7 @@ bool HOptimizedGraphBuilder::BuildGraph() {
// not replayed by the Lithium translation.
HEnvironment* initial_env = environment()->CopyWithoutHistory();
HBasicBlock* body_entry = CreateBasicBlock(initial_env);
- current_block()->Goto(body_entry);
+ Goto(body_entry);
body_entry->SetJoinId(BailoutId::FunctionEntry());
set_current_block(body_entry);
@@ -2922,8 +3697,7 @@ bool HOptimizedGraphBuilder::BuildGraph() {
VisitDeclarations(scope->declarations());
Add<HSimulate>(BailoutId::Declarations());
- HValue* context = environment()->context();
- Add<HStackCheck>(context, HStackCheck::kFunctionEntry);
+ Add<HStackCheck>(HStackCheck::kFunctionEntry);
VisitStatements(current_info()->function()->body());
if (HasStackOverflow()) return false;
@@ -2948,7 +3722,7 @@ bool HOptimizedGraphBuilder::BuildGraph() {
type_info->set_inlined_type_change_checksum(composite_checksum);
// Perform any necessary OSR-specific cleanups or changes to the graph.
- osr_->FinishGraph();
+ osr()->FinishGraph();
return true;
}
@@ -2973,7 +3747,6 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
Run<HEnvironmentLivenessAnalysisPhase>();
}
- Run<HPropagateDeoptimizingMarkPhase>();
if (!CheckConstPhiUses()) {
*bailout_reason = kUnsupportedPhiUseOfConstVariable;
return false;
@@ -2984,11 +3757,15 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
return false;
}
- // Remove dead code and phis
- if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
+ // Find and mark unreachable code to simplify optimizations, especially gvn,
+ // where unreachable code could unnecessarily defeat LICM.
+ Run<HMarkUnreachableBlocksPhase>();
+ if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
+ if (FLAG_load_elimination) Run<HLoadEliminationPhase>();
+
CollectPhis();
if (has_osr()) osr()->FinishOsrValues();
@@ -3014,6 +3791,8 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
if (FLAG_use_gvn) Run<HGlobalValueNumberingPhase>();
+ if (FLAG_check_elimination) Run<HCheckEliminationPhase>();
+
if (FLAG_use_range) Run<HRangeAnalysisPhase>();
Run<HComputeChangeUndefinedToNaN>();
@@ -3022,17 +3801,17 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
// Eliminate redundant stack checks on backwards branches.
Run<HStackCheckEliminationPhase>();
- if (FLAG_array_bounds_checks_elimination) {
- Run<HBoundsCheckEliminationPhase>();
- }
- if (FLAG_array_bounds_checks_hoisting) {
- Run<HBoundsCheckHoistingPhase>();
- }
+ if (FLAG_array_bounds_checks_elimination) Run<HBoundsCheckEliminationPhase>();
+ if (FLAG_array_bounds_checks_hoisting) Run<HBoundsCheckHoistingPhase>();
if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
RestoreActualValues();
+ // Find unreachable code a second time, GVN and other optimizations may have
+ // made blocks unreachable that were previously reachable.
+ Run<HMarkUnreachableBlocksPhase>();
+
return true;
}
@@ -3065,12 +3844,6 @@ void HGraph::RestoreActualValues() {
}
-void HGraphBuilder::PushAndAdd(HInstruction* instr) {
- Push(instr);
- AddInstruction(instr);
-}
-
-
template <class Instruction>
HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
int count = call->argument_count();
@@ -3091,10 +3864,6 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
HInstruction* context = Add<HContext>();
environment()->BindContext(context);
- HConstant* undefined_constant = HConstant::cast(Add<HConstant>(
- isolate()->factory()->undefined_value()));
- graph()->set_undefined_constant(undefined_constant);
-
// Create an arguments object containing the initial parameters. Set the
// initial values of parameters including "this" having parameter index 0.
ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
@@ -3108,6 +3877,7 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
AddInstruction(arguments_object);
graph()->SetArgumentsObject(arguments_object);
+ HConstant* undefined_constant = graph()->GetConstantUndefined();
// Initialize specials and locals to undefined.
for (int i = environment()->parameter_count() + 1;
i < environment()->length();
@@ -3150,7 +3920,7 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
}
HBasicBlock* break_block = break_info.break_block();
if (break_block != NULL) {
- if (current_block() != NULL) current_block()->Goto(break_block);
+ if (current_block() != NULL) Goto(break_block);
break_block->SetJoinId(stmt->ExitId());
set_current_block(break_block);
}
@@ -3260,7 +4030,7 @@ void HOptimizedGraphBuilder::VisitContinueStatement(
HBasicBlock* continue_block = break_scope()->Get(
stmt->target(), BreakAndContinueScope::CONTINUE, &drop_extra);
Drop(drop_extra);
- current_block()->Goto(continue_block);
+ Goto(continue_block);
set_current_block(NULL);
}
@@ -3273,7 +4043,7 @@ void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
HBasicBlock* break_block = break_scope()->Get(
stmt->target(), BreakAndContinueScope::BREAK, &drop_extra);
Drop(drop_extra);
- current_block()->Goto(break_block);
+ Goto(break_block);
set_current_block(NULL);
}
@@ -3296,26 +4066,26 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
if (context->IsTest()) {
TestContext* test = TestContext::cast(context);
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(test->if_true(), state);
+ Goto(test->if_true(), state);
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* return_value = Pop();
HValue* receiver = environment()->arguments_environment()->Lookup(0);
HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(return_value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
+ New<HHasInstanceTypeAndBranch>(return_value,
+ FIRST_SPEC_OBJECT_TYPE,
+ LAST_SPEC_OBJECT_TYPE);
HBasicBlock* if_spec_object = graph()->CreateBasicBlock();
HBasicBlock* not_spec_object = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_spec_object);
typecheck->SetSuccessorAt(1, not_spec_object);
- current_block()->Finish(typecheck);
- if_spec_object->AddLeaveInlined(return_value, state);
- not_spec_object->AddLeaveInlined(receiver, state);
+ FinishCurrentBlock(typecheck);
+ AddLeaveInlined(if_spec_object, return_value, state);
+ AddLeaveInlined(not_spec_object, receiver, state);
}
} else if (state->inlining_kind() == SETTER_CALL_RETURN) {
// Return from an inlined setter call. The returned value is never used, the
@@ -3325,11 +4095,11 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
HValue* rhs = environment()->arguments_environment()->Lookup(1);
context->ReturnValue(rhs);
} else if (context->IsEffect()) {
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
HValue* rhs = environment()->arguments_environment()->Lookup(1);
- current_block()->AddLeaveInlined(rhs, state);
+ AddLeaveInlined(rhs, state);
}
} else {
// Return from a normal inlined function. Visit the subexpression in the
@@ -3339,11 +4109,11 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
VisitForControl(stmt->expression(), test->if_true(), test->if_false());
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
- current_block()->AddLeaveInlined(Pop(), state);
+ AddLeaveInlined(Pop(), state);
}
}
set_current_block(NULL);
@@ -3377,8 +4147,6 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
return Bailout(kSwitchStatementMixedOrNonLiteralSwitchLabels);
}
- HValue* context = environment()->context();
-
CHECK_ALIVE(VisitForValue(stmt->tag()));
Add<HSimulate>(stmt->EntryId());
HValue* tag_value = Pop();
@@ -3389,13 +4157,11 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// Test switch's tag value if all clauses are string literals
if (stmt->switch_type() == SwitchStatement::STRING_SWITCH) {
- string_check = new(zone()) HIsStringAndBranch(tag_value);
first_test_block = graph()->CreateBasicBlock();
not_string_block = graph()->CreateBasicBlock();
-
- string_check->SetSuccessorAt(0, first_test_block);
- string_check->SetSuccessorAt(1, not_string_block);
- current_block()->Finish(string_check);
+ string_check = New<HIsStringAndBranch>(
+ tag_value, first_test_block, not_string_block);
+ FinishCurrentBlock(string_check);
set_current_block(first_test_block);
}
@@ -3424,21 +4190,21 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
HCompareNumericAndBranch* compare_ =
- new(zone()) HCompareNumericAndBranch(tag_value,
- label_value,
- Token::EQ_STRICT);
+ New<HCompareNumericAndBranch>(tag_value,
+ label_value,
+ Token::EQ_STRICT);
compare_->set_observed_input_representation(
Representation::Smi(), Representation::Smi());
compare = compare_;
} else {
- compare = new(zone()) HStringCompareAndBranch(context, tag_value,
- label_value,
- Token::EQ_STRICT);
+ compare = New<HStringCompareAndBranch>(tag_value,
+ label_value,
+ Token::EQ_STRICT);
}
compare->SetSuccessorAt(0, body_block);
compare->SetSuccessorAt(1, next_test_block);
- current_block()->Finish(compare);
+ FinishCurrentBlock(compare);
set_current_block(next_test_block);
}
@@ -3471,6 +4237,13 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
last_block = NULL; // Cleared to indicate we've handled it.
}
} else {
+ // If the current test block is deoptimizing due to an unhandled clause
+ // of the switch, the test instruction is in the next block since the
+ // deopt must end the current block.
+ if (curr_test_block->IsDeoptimizing()) {
+ ASSERT(curr_test_block->end()->SecondSuccessor() == NULL);
+ curr_test_block = curr_test_block->end()->FirstSuccessor();
+ }
normal_block = curr_test_block->end()->FirstSuccessor();
curr_test_block = curr_test_block->end()->SecondSuccessor();
}
@@ -3512,8 +4285,8 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
last_block,
stmt->ExitId()));
} else {
- if (fall_through_block != NULL) fall_through_block->Goto(break_block);
- if (last_block != NULL) last_block->Goto(break_block);
+ if (fall_through_block != NULL) Goto(fall_through_block, break_block);
+ if (last_block != NULL) Goto(last_block, break_block);
break_block->SetJoinId(stmt->ExitId());
set_current_block(break_block);
}
@@ -3525,9 +4298,8 @@ void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
BreakAndContinueInfo* break_info) {
BreakAndContinueScope push(break_info, this);
Add<HSimulate>(stmt->StackCheckId());
- HValue* context = environment()->context();
- HStackCheck* stack_check = HStackCheck::cast(Add<HStackCheck>(
- context, HStackCheck::kBackwardsBranch));
+ HStackCheck* stack_check =
+ HStackCheck::cast(Add<HStackCheck>(HStackCheck::kBackwardsBranch));
ASSERT(loop_entry->IsLoopHeader());
loop_entry->loop_information()->set_stack_check(stack_check);
CHECK_BAILOUT(Visit(stmt->body()));
@@ -3539,7 +4311,7 @@ void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
- HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt);
+ HBasicBlock* loop_entry = BuildLoopEntry(stmt);
BreakAndContinueInfo break_info(stmt);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
@@ -3578,7 +4350,7 @@ void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
- HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt);
+ HBasicBlock* loop_entry = BuildLoopEntry(stmt);
// If the condition is constant true, do not generate a branch.
HBasicBlock* loop_successor = NULL;
@@ -3620,7 +4392,7 @@ void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
CHECK_ALIVE(Visit(stmt->init()));
}
ASSERT(current_block() != NULL);
- HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt);
+ HBasicBlock* loop_entry = BuildLoopEntry(stmt);
HBasicBlock* loop_successor = NULL;
if (stmt->cond() != NULL) {
@@ -3703,14 +4475,14 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
HForInCacheArray::cast(array)->set_index_cache(
HForInCacheArray::cast(index_cache));
- HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt);
+ HBasicBlock* loop_entry = BuildLoopEntry(stmt);
HValue* index = environment()->ExpressionStackAt(0);
HValue* limit = environment()->ExpressionStackAt(1);
// Check that we still have more keys.
HCompareNumericAndBranch* compare_index =
- new(zone()) HCompareNumericAndBranch(index, limit, Token::LT);
+ New<HCompareNumericAndBranch>(index, limit, Token::LT);
compare_index->set_observed_input_representation(
Representation::Smi(), Representation::Smi());
@@ -3719,7 +4491,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
compare_index->SetSuccessorAt(0, loop_body);
compare_index->SetSuccessorAt(1, loop_successor);
- current_block()->Finish(compare_index);
+ FinishCurrentBlock(compare_index);
set_current_block(loop_successor);
Drop(5);
@@ -3749,9 +4521,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
set_current_block(body_exit);
HValue* current_index = Pop();
- HInstruction* new_index = New<HAdd>(current_index,
- graph()->GetConstant1());
- PushAndAdd(new_index);
+ Push(AddUncasted<HAdd>(current_index, graph()->GetConstant1()));
body_exit = current_block();
}
@@ -3798,6 +4568,11 @@ void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
}
+void HOptimizedGraphBuilder::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+
static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
Code* unoptimized_code, FunctionLiteral* expr) {
int start_position = expr->start_position();
@@ -3828,19 +4603,18 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
}
// We also have a stack overflow if the recursive compilation did.
if (HasStackOverflow()) return;
- HValue* context = environment()->context();
HFunctionLiteral* instr =
- new(zone()) HFunctionLiteral(context, shared_info, expr->pretenure());
+ New<HFunctionLiteral>(shared_info, expr->pretenure());
return ast_context()->ReturnInstruction(instr, expr->id());
}
-void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
+void HOptimizedGraphBuilder::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout(kSharedFunctionInfoLiteral);
+ return Bailout(kNativeFunctionLiteral);
}
@@ -3954,19 +4728,15 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return ast_context()->ReturnInstruction(constant, expr->id());
} else {
HLoadGlobalCell* instr =
- new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
+ New<HLoadGlobalCell>(cell, lookup.GetPropertyDetails());
return ast_context()->ReturnInstruction(instr, expr->id());
}
} else {
- HValue* context = environment()->context();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- AddInstruction(global_object);
+ HGlobalObject* global_object = Add<HGlobalObject>();
HLoadGlobalGeneric* instr =
- new(zone()) HLoadGlobalGeneric(context,
- global_object,
- variable->name(),
- ast_context()->is_for_typeof());
- instr->set_position(expr->position());
+ New<HLoadGlobalGeneric>(global_object,
+ variable->name(),
+ ast_context()->is_for_typeof());
return ast_context()->ReturnInstruction(instr, expr->id());
}
}
@@ -4009,13 +4779,10 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
ASSERT(current_block()->HasPredecessor());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
Handle<FixedArray> literals(closure->literals());
- HValue* context = environment()->context();
-
- HRegExpLiteral* instr = new(zone()) HRegExpLiteral(context,
- literals,
- expr->pattern(),
- expr->flags(),
- expr->literal_index());
+ HRegExpLiteral* instr = New<HRegExpLiteral>(literals,
+ expr->pattern(),
+ expr->flags(),
+ expr->literal_index());
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -4080,20 +4847,6 @@ static bool LookupAccessorPair(Handle<Map> map,
}
-static bool LookupGetter(Handle<Map> map,
- Handle<String> name,
- Handle<JSFunction>* getter,
- Handle<JSObject>* holder) {
- Handle<AccessorPair> accessors;
- if (LookupAccessorPair(map, name, &accessors, holder) &&
- accessors->getter()->IsJSFunction()) {
- *getter = Handle<JSFunction>(JSFunction::cast(accessors->getter()));
- return true;
- }
- return false;
-}
-
-
static bool LookupSetter(Handle<Map> map,
Handle<String> name,
Handle<JSFunction>* setter,
@@ -4101,7 +4854,11 @@ static bool LookupSetter(Handle<Map> map,
Handle<AccessorPair> accessors;
if (LookupAccessorPair(map, name, &accessors, holder) &&
accessors->setter()->IsJSFunction()) {
- *setter = Handle<JSFunction>(JSFunction::cast(accessors->setter()));
+ Handle<JSFunction> func(JSFunction::cast(accessors->setter()));
+ CallOptimization call_optimization(func);
+ // TODO(dcarney): temporary hack unless crankshaft can handle api calls.
+ if (call_optimization.is_simple_api_call()) return false;
+ *setter = func;
return true;
}
return false;
@@ -4116,7 +4873,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
int* max_properties) {
if (boilerplate->map()->is_deprecated()) {
Handle<Object> result = JSObject::TryMigrateInstance(boilerplate);
- if (result->IsSmi()) return false;
+ if (result.is_null()) return false;
}
ASSERT(max_depth >= 0 && *max_properties >= 0);
@@ -4177,23 +4934,29 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ expr->BuildConstantProperties(isolate());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
HInstruction* literal;
// Check whether to use fast or slow deep-copying for boilerplate.
int max_properties = kMaxFastLiteralProperties;
- Handle<Object> boilerplate(closure->literals()->get(
- expr->literal_index()), isolate());
- if (boilerplate->IsJSObject() &&
- IsFastLiteral(Handle<JSObject>::cast(boilerplate),
- kMaxFastLiteralDepth,
- &max_properties)) {
- Handle<JSObject> boilerplate_object =
- Handle<JSObject>::cast(boilerplate);
+ Handle<Object> literals_cell(closure->literals()->get(expr->literal_index()),
+ isolate());
+ Handle<AllocationSite> site;
+ Handle<JSObject> boilerplate;
+ if (!literals_cell->IsUndefined()) {
+ // Retrieve the boilerplate
+ site = Handle<AllocationSite>::cast(literals_cell);
+ boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()),
+ isolate());
+ }
- literal = BuildFastLiteral(boilerplate_object,
- Handle<Object>::null(),
- DONT_TRACK_ALLOCATION_SITE);
+ if (!boilerplate.is_null() &&
+ IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
+ AllocationSiteUsageContext usage_context(isolate(), site, false);
+ usage_context.EnterNewScope();
+ literal = BuildFastLiteral(boilerplate, &usage_context);
+ usage_context.ExitScope(site, boilerplate);
} else {
NoObservableSideEffectsScope no_effects(this);
Handle<FixedArray> closure_literals(closure->literals(), isolate());
@@ -4209,9 +4972,10 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Add<HPushArgument>(Add<HConstant>(constant_properties));
Add<HPushArgument>(Add<HConstant>(flags));
- Runtime::FunctionId function_id =
- (expr->depth() > 1 || expr->may_store_doubles())
- ? Runtime::kCreateObjectLiteral : Runtime::kCreateObjectLiteralShallow;
+ // TODO(mvstanton): Add a flag to turn off creation of any
+ // AllocationMementos for this call: we are in crankshaft and should have
+ // learned enough about transition behavior to stop emitting mementos.
+ Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
4);
@@ -4292,6 +5056,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ expr->BuildConstantElements(isolate());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
HInstruction* literal;
@@ -4301,67 +5066,73 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
bool uninitialized = false;
Handle<Object> literals_cell(literals->get(expr->literal_index()),
isolate());
- Handle<Object> raw_boilerplate;
+ Handle<JSObject> boilerplate_object;
if (literals_cell->IsUndefined()) {
uninitialized = true;
- raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
+ Handle<Object> raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
isolate(), literals, expr->constant_elements());
if (raw_boilerplate.is_null()) {
return Bailout(kArrayBoilerplateCreationFailed);
}
- site = isolate()->factory()->NewAllocationSite();
- site->set_transition_info(*raw_boilerplate);
+ boilerplate_object = Handle<JSObject>::cast(raw_boilerplate);
+ AllocationSiteCreationContext creation_context(isolate());
+ site = creation_context.EnterNewScope();
+ if (JSObject::DeepWalk(boilerplate_object, &creation_context).is_null()) {
+ return Bailout(kArrayBoilerplateCreationFailed);
+ }
+ creation_context.ExitScope(site, boilerplate_object);
literals->set(expr->literal_index(), *site);
- if (JSObject::cast(*raw_boilerplate)->elements()->map() ==
+ if (boilerplate_object->elements()->map() ==
isolate()->heap()->fixed_cow_array_map()) {
isolate()->counters()->cow_arrays_created_runtime()->Increment();
}
} else {
ASSERT(literals_cell->IsAllocationSite());
site = Handle<AllocationSite>::cast(literals_cell);
- raw_boilerplate = Handle<Object>(site->transition_info(), isolate());
+ boilerplate_object = Handle<JSObject>(
+ JSObject::cast(site->transition_info()), isolate());
}
- ASSERT(!raw_boilerplate.is_null());
- ASSERT(site->IsLiteralSite());
+ ASSERT(!boilerplate_object.is_null());
+ ASSERT(site->SitePointsToLiteral());
- Handle<JSObject> boilerplate_object =
- Handle<JSObject>::cast(raw_boilerplate);
ElementsKind boilerplate_elements_kind =
- Handle<JSObject>::cast(boilerplate_object)->GetElementsKind();
-
- // TODO(mvstanton): This heuristic is only a temporary solution. In the
- // end, we want to quit creating allocation site info after a certain number
- // of GCs for a call site.
- AllocationSiteMode mode = AllocationSite::GetMode(
- boilerplate_elements_kind);
+ boilerplate_object->GetElementsKind();
// Check whether to use fast or slow deep-copying for boilerplate.
int max_properties = kMaxFastLiteralProperties;
if (IsFastLiteral(boilerplate_object,
kMaxFastLiteralDepth,
&max_properties)) {
- literal = BuildFastLiteral(boilerplate_object,
- site,
- mode);
+ AllocationSiteUsageContext usage_context(isolate(), site, false);
+ usage_context.EnterNewScope();
+ literal = BuildFastLiteral(boilerplate_object, &usage_context);
+ usage_context.ExitScope(site, boilerplate_object);
} else {
NoObservableSideEffectsScope no_effects(this);
// Boilerplate already exists and constant elements are never accessed,
// pass an empty fixed array to the runtime function instead.
Handle<FixedArray> constants = isolate()->factory()->empty_fixed_array();
int literal_index = expr->literal_index();
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+ flags |= ArrayLiteral::kDisableMementos;
Add<HPushArgument>(Add<HConstant>(literals));
Add<HPushArgument>(Add<HConstant>(literal_index));
Add<HPushArgument>(Add<HConstant>(constants));
+ Add<HPushArgument>(Add<HConstant>(flags));
- Runtime::FunctionId function_id = (expr->depth() > 1)
- ? Runtime::kCreateArrayLiteral : Runtime::kCreateArrayLiteralShallow;
+ // TODO(mvstanton): Consider a flag to turn off creation of any
+ // AllocationMementos for this call: we are in crankshaft and should have
+ // learned enough about transition behavior to stop emitting mementos.
+ Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
- 3);
+ 4);
// De-opt if elements kind changed from boilerplate_elements_kind.
Handle<Map> map = Handle<Map>(boilerplate_object->map(), isolate());
@@ -4415,31 +5186,6 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
}
-// Sets the lookup result and returns true if the load/store can be inlined.
-static bool ComputeLoadStoreField(Handle<Map> type,
- Handle<String> name,
- LookupResult* lookup,
- bool is_store) {
- ASSERT(!is_store || !type->is_observed());
- if (!CanInlinePropertyAccess(*type)) {
- lookup->NotFound();
- return false;
- }
- // If we directly find a field, the access can be inlined.
- type->LookupDescriptor(NULL, *name, lookup);
- if (lookup->IsField()) return true;
-
- // For a load, we are out of luck if there is no such field.
- if (!is_store) return false;
-
- // 2nd chance: A store into a non-existent field can still be inlined if we
- // have a matching transition and some room left in the object.
- type->LookupTransition(NULL, *name, lookup);
- return lookup->IsTransitionToField(*type) &&
- (type->unused_property_fields() > 0);
-}
-
-
HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
Handle<Map> map) {
BuildCheckHeapObject(object);
@@ -4484,7 +5230,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
}
HObjectAccess field_access = HObjectAccess::ForField(map, lookup, name);
- bool transition_to_field = lookup->IsTransitionToField(*map);
+ bool transition_to_field = lookup->IsTransitionToField();
HStoreNamedField *instr;
if (FLAG_track_double_fields && field_access.representation().IsDouble()) {
@@ -4520,7 +5266,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
}
if (transition_to_field) {
- Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
+ Handle<Map> transition(lookup->GetTransitionTarget());
HConstant* transition_constant = Add<HConstant>(transition);
instr->SetTransition(transition_constant, top_info());
// TODO(fschneider): Record the new map type of the object in the IR to
@@ -4535,9 +5281,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
HValue* object,
Handle<String> name,
HValue* value) {
- HValue* context = environment()->context();
- return new(zone()) HStoreNamedGeneric(
- context,
+ return New<HStoreNamedGeneric>(
object,
name,
value,
@@ -4545,6 +5289,28 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
}
+// Sets the lookup result and returns true if the load/store can be inlined.
+static bool ComputeStoreField(Handle<Map> type,
+ Handle<String> name,
+ LookupResult* lookup,
+ bool lookup_transition = true) {
+ ASSERT(!type->is_observed());
+ if (!CanInlinePropertyAccess(*type)) {
+ lookup->NotFound();
+ return false;
+ }
+ // If we directly find a field, the access can be inlined.
+ type->LookupDescriptor(NULL, *name, lookup);
+ if (lookup->IsField()) return true;
+
+ if (!lookup_transition) return false;
+
+ type->LookupTransition(NULL, *name, lookup);
+ return lookup->IsTransitionToField() &&
+ (type->unused_property_fields() > 0);
+}
+
+
HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
HValue* object,
Handle<String> name,
@@ -4552,7 +5318,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
Handle<Map> map) {
// Handle a store to a known field.
LookupResult lookup(isolate());
- if (ComputeLoadStoreField(map, name, &lookup, true)) {
+ if (ComputeStoreField(map, name, &lookup)) {
HCheckMaps* checked_object = AddCheckMap(object, map);
return BuildStoreNamedField(checked_object, name, value, map, &lookup);
}
@@ -4562,140 +5328,192 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
}
-static bool CanLoadPropertyFromPrototype(Handle<Map> map,
- Handle<Name> name,
- LookupResult* lookup) {
- if (!CanInlinePropertyAccess(*map)) return false;
- map->LookupDescriptor(NULL, *name, lookup);
- if (lookup->IsFound()) return false;
+bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatibleForLoad(
+ PropertyAccessInfo* info) {
+ if (!CanInlinePropertyAccess(*map_)) return false;
+
+ if (!LookupDescriptor()) return false;
+
+ if (!lookup_.IsFound()) {
+ return (!info->lookup_.IsFound() || info->has_holder()) &&
+ map_->prototype() == info->map_->prototype();
+ }
+
+ // Mismatch if the other access info found the property in the prototype
+ // chain.
+ if (info->has_holder()) return false;
+
+ if (lookup_.IsPropertyCallbacks()) {
+ return accessor_.is_identical_to(info->accessor_);
+ }
+
+ if (lookup_.IsConstant()) {
+ return constant_.is_identical_to(info->constant_);
+ }
+
+ ASSERT(lookup_.IsField());
+ if (!info->lookup_.IsField()) return false;
+
+ Representation r = access_.representation();
+ if (!info->access_.representation().IsCompatibleForLoad(r)) return false;
+ if (info->access_.offset() != access_.offset()) return false;
+ if (info->access_.IsInobject() != access_.IsInobject()) return false;
+ info->GeneralizeRepresentation(r);
return true;
}
-HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
- HValue* object,
- SmallMapList* types,
- Handle<String> name) {
- // Use monomorphic load if property lookup results in the same field index
- // for all maps. Requires special map check on the set of all handled maps.
- if (types->length() > kMaxLoadPolymorphism) return NULL;
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() {
+ map_->LookupDescriptor(NULL, *name_, &lookup_);
+ return LoadResult(map_);
+}
- LookupResult lookup(isolate());
- int count;
- HObjectAccess access = HObjectAccess::ForMap(); // initial value unused.
- for (count = 0; count < types->length(); ++count) {
- Handle<Map> map = types->at(count);
- if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
- HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
+ if (lookup_.IsField()) {
+ access_ = HObjectAccess::ForField(map, &lookup_, name_);
+ } else if (lookup_.IsPropertyCallbacks()) {
+ Handle<Object> callback(lookup_.GetValueFromMap(*map), isolate());
+ if (!callback->IsAccessorPair()) return false;
+ Object* getter = Handle<AccessorPair>::cast(callback)->getter();
+ if (!getter->IsJSFunction()) return false;
+ Handle<JSFunction> accessor = handle(JSFunction::cast(getter));
+ CallOptimization call_optimization(accessor);
+ // TODO(dcarney): temporary hack unless crankshaft can handle api calls.
+ if (call_optimization.is_simple_api_call()) return false;
+ accessor_ = accessor;
+ } else if (lookup_.IsConstant()) {
+ constant_ = handle(lookup_.GetConstantFromMap(*map), isolate());
+ }
- if (count == 0) {
- // First time through the loop; set access and representation.
- access = new_access;
- } else if (!access.representation().IsCompatibleForLoad(
- new_access.representation())) {
- // Representations did not match.
- break;
- } else if (access.offset() != new_access.offset()) {
- // Offsets did not match.
- break;
- } else if (access.IsInobject() != new_access.IsInobject()) {
- // In-objectness did not match.
- break;
+ return true;
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
+ Handle<Map> map = map_;
+ while (map->prototype()->IsJSObject()) {
+ holder_ = handle(JSObject::cast(map->prototype()));
+ if (holder_->map()->is_deprecated()) {
+ JSObject::TryMigrateInstance(holder_);
+ }
+ map = Handle<Map>(holder_->map());
+ if (!CanInlinePropertyAccess(*map)) {
+ lookup_.NotFound();
+ return false;
}
- access = access.WithRepresentation(
- access.representation().generalize(new_access.representation()));
+ map->LookupDescriptor(*holder_, *name_, &lookup_);
+ if (lookup_.IsFound()) return LoadResult(map);
}
+ lookup_.NotFound();
+ return true;
+}
- if (count == types->length()) {
- // Everything matched; can use monomorphic load.
- BuildCheckHeapObject(object);
- HCheckMaps* checked_object = Add<HCheckMaps>(object, types);
- return BuildLoadNamedField(checked_object, access);
- }
- if (count != 0) return NULL;
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadMonomorphic() {
+ if (!CanInlinePropertyAccess(*map_)) return IsStringLength();
+ if (IsJSObjectFieldAccessor()) return true;
+ if (!LookupDescriptor()) return false;
+ if (lookup_.IsFound()) return true;
+ return LookupInPrototypes();
+}
+
- // Second chance: the property is on the prototype and all maps have the
- // same prototype.
- Handle<Map> map(types->at(0));
- if (!CanLoadPropertyFromPrototype(map, name, &lookup)) return NULL;
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadAsMonomorphic(
+ SmallMapList* types) {
+ ASSERT(map_.is_identical_to(types->first()));
+ if (!CanLoadMonomorphic()) return false;
+ if (types->length() > kMaxLoadPolymorphism) return false;
- Handle<Object> prototype(map->prototype(), isolate());
- for (count = 1; count < types->length(); ++count) {
- Handle<Map> test_map(types->at(count));
- if (!CanLoadPropertyFromPrototype(test_map, name, &lookup)) return NULL;
- if (test_map->prototype() != *prototype) return NULL;
+ if (IsStringLength()) {
+ for (int i = 1; i < types->length(); ++i) {
+ if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+ }
+ return true;
}
- LookupInPrototypes(map, name, &lookup);
- if (!lookup.IsField()) return NULL;
+ if (IsArrayLength()) {
+ bool is_fast = IsFastElementsKind(map_->elements_kind());
+ for (int i = 1; i < types->length(); ++i) {
+ Handle<Map> test_map = types->at(i);
+ if (test_map->instance_type() != JS_ARRAY_TYPE) return false;
+ if (IsFastElementsKind(test_map->elements_kind()) != is_fast) {
+ return false;
+ }
+ }
+ return true;
+ }
- BuildCheckHeapObject(object);
- Add<HCheckMaps>(object, types);
+ if (IsJSObjectFieldAccessor()) {
+ InstanceType instance_type = map_->instance_type();
+ for (int i = 1; i < types->length(); ++i) {
+ if (types->at(i)->instance_type() != instance_type) return false;
+ }
+ return true;
+ }
+
+ for (int i = 1; i < types->length(); ++i) {
+ PropertyAccessInfo test_info(isolate(), types->at(i), name_);
+ if (!test_info.IsCompatibleForLoad(this)) return false;
+ }
- Handle<JSObject> holder(lookup.holder());
- Handle<Map> holder_map(holder->map());
- HValue* checked_holder = BuildCheckPrototypeMaps(
- Handle<JSObject>::cast(prototype), holder);
- return BuildLoadNamedField(checked_holder,
- HObjectAccess::ForField(holder_map, &lookup, name));
+ return true;
}
-// Returns true if an instance of this map can never find a property with this
-// name in its prototype chain. This means all prototypes up to the top are
-// fast and don't have the name in them. It would be good if we could optimize
-// polymorphic loads where the property is sometimes found in the prototype
-// chain.
-static bool PrototypeChainCanNeverResolve(
- Handle<Map> map, Handle<String> name) {
- Isolate* isolate = map->GetIsolate();
- Object* current = map->prototype();
- while (current != isolate->heap()->null_value()) {
- if (current->IsJSGlobalProxy() ||
- current->IsGlobalObject() ||
- !current->IsJSObject() ||
- !CanInlinePropertyAccess(JSObject::cast(current)->map()) ||
- JSObject::cast(current)->IsAccessCheckNeeded()) {
- return false;
- }
+HInstruction* HOptimizedGraphBuilder::BuildLoadMonomorphic(
+ PropertyAccessInfo* info,
+ HValue* object,
+ HInstruction* checked_object,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool can_inline_accessor) {
- LookupResult lookup(isolate);
- Map* map = JSObject::cast(current)->map();
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsFound()) return false;
- if (!lookup.IsCacheable()) return false;
- current = JSObject::cast(current)->GetPrototype();
+ HObjectAccess access = HObjectAccess::ForMap(); // bogus default
+ if (info->GetJSObjectFieldAccess(&access)) {
+ return New<HLoadNamedField>(checked_object, access);
}
- return true;
+
+ HValue* checked_holder = checked_object;
+ if (info->has_holder()) {
+ Handle<JSObject> prototype(JSObject::cast(info->map()->prototype()));
+ checked_holder = BuildCheckPrototypeMaps(prototype, info->holder());
+ }
+
+ if (!info->lookup()->IsFound()) return graph()->GetConstantUndefined();
+
+ if (info->lookup()->IsField()) {
+ return BuildLoadNamedField(checked_holder, info->access());
+ }
+
+ if (info->lookup()->IsPropertyCallbacks()) {
+ Push(checked_object);
+ if (FLAG_inline_accessors &&
+ can_inline_accessor &&
+ TryInlineGetter(info->accessor(), ast_id, return_id)) {
+ return NULL;
+ }
+ Add<HPushArgument>(Pop());
+ return New<HCallConstantFunction>(info->accessor(), 1);
+ }
+
+ ASSERT(info->lookup()->IsConstant());
+ return New<HConstant>(info->constant());
}
void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
- int position,
BailoutId ast_id,
+ BailoutId return_id,
HValue* object,
SmallMapList* types,
Handle<String> name) {
- HInstruction* instr = TryLoadPolymorphicAsMonomorphic(object, types, name);
- if (instr != NULL) {
- instr->set_position(position);
- return ast_context()->ReturnInstruction(instr, ast_id);
- }
-
// Something did not match; must use a polymorphic load.
int count = 0;
HBasicBlock* join = NULL;
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- Handle<Map> map = types->at(i);
- LookupResult lookup(isolate());
- if (ComputeLoadStoreField(map, name, &lookup, false) ||
- (lookup.IsCacheable() &&
- CanInlinePropertyAccess(*map) &&
- (lookup.IsConstant() ||
- (!lookup.IsFound() &&
- PrototypeChainCanNeverResolve(map, name))))) {
+ PropertyAccessInfo info(isolate(), types->at(i), name);
+ if (info.CanLoadMonomorphic()) {
if (count == 0) {
BuildCheckHeapObject(object);
join = graph()->CreateBasicBlock();
@@ -4703,37 +5521,24 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare =
- new(zone()) HCompareMap(object, map, if_true, if_false);
- current_block()->Finish(compare);
+ HCompareMap* compare = New<HCompareMap>(
+ object, info.map(), if_true, if_false);
+ FinishCurrentBlock(compare);
set_current_block(if_true);
- // TODO(verwaest): Merge logic with BuildLoadNamedMonomorphic.
- if (lookup.IsField()) {
- HObjectAccess access = HObjectAccess::ForField(map, &lookup, name);
- HLoadNamedField* load = BuildLoadNamedField(compare, access);
- load->set_position(position);
- AddInstruction(load);
- if (!ast_context()->IsEffect()) Push(load);
- } else if (lookup.IsConstant()) {
- Handle<Object> constant(lookup.GetConstantFromMap(*map), isolate());
- HConstant* hconstant = Add<HConstant>(constant);
- if (!ast_context()->IsEffect()) Push(hconstant);
+ HInstruction* load = BuildLoadMonomorphic(
+ &info, object, compare, ast_id, return_id, FLAG_polymorphic_inlining);
+ if (load == NULL) {
+ if (HasStackOverflow()) return;
} else {
- ASSERT(!lookup.IsFound());
- if (map->prototype()->IsJSObject()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Handle<JSObject> holder = prototype;
- while (holder->map()->prototype()->IsJSObject()) {
- holder = handle(JSObject::cast(holder->map()->prototype()));
- }
- BuildCheckPrototypeMaps(prototype, holder);
+ if (!load->IsLinked()) {
+ AddInstruction(load);
}
- if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
+ if (!ast_context()->IsEffect()) Push(load);
}
- current_block()->Goto(join);
+ if (current_block() != NULL) Goto(join);
set_current_block(if_false);
}
}
@@ -4742,16 +5547,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
+ // Because the deopt may be the only path in the polymorphic load, make sure
+ // that the environment stack matches the depth on deopt that it otherwise
+ // would have had after a successful load.
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
FinishExitWithHardDeoptimization("Unknown map in polymorphic load", join);
} else {
- HValue* context = environment()->context();
- HInstruction* load = new(zone()) HLoadNamedGeneric(context, object, name);
- load->set_position(position);
- AddInstruction(load);
+ HInstruction* load = Add<HLoadNamedGeneric>(object, name);
if (!ast_context()->IsEffect()) Push(load);
if (join != NULL) {
- current_block()->Goto(join);
+ Goto(join);
} else {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
@@ -4767,7 +5573,6 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
- int position,
BailoutId assignment_id,
HValue* object,
HValue* value,
@@ -4777,8 +5582,6 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
// for all maps. Requires special map check on the set of all handled maps.
if (types->length() > kMaxStorePolymorphism) return false;
- // TODO(verwaest): Merge the checking logic with the code in
- // TryLoadPolymorphicAsMonomorphic.
LookupResult lookup(isolate());
int count;
Representation representation = Representation::None();
@@ -4786,7 +5589,7 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
for (count = 0; count < types->length(); ++count) {
Handle<Map> map = types->at(count);
// Pass false to ignore transitions.
- if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
+ if (!ComputeStoreField(map, name, &lookup, false)) break;
ASSERT(!map->is_observed());
HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
@@ -4819,7 +5622,6 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
checked_object, name, value, types->at(count - 1), &lookup),
true);
if (!ast_context()->IsEffect()) Push(value);
- store->set_position(position);
AddInstruction(store);
Add<HSimulate>(assignment_id);
if (!ast_context()->IsEffect()) Drop(1);
@@ -4829,14 +5631,13 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
- int position,
BailoutId assignment_id,
HValue* object,
HValue* value,
SmallMapList* types,
Handle<String> name) {
if (TryStorePolymorphicAsMonomorphic(
- position, assignment_id, object, value, types, name)) {
+ assignment_id, object, value, types, name)) {
return;
}
@@ -4848,7 +5649,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
Handle<Map> map = types->at(i);
LookupResult lookup(isolate());
- if (ComputeLoadStoreField(map, name, &lookup, true)) {
+ if (ComputeStoreField(map, name, &lookup)) {
if (count == 0) {
BuildCheckHeapObject(object);
join = graph()->CreateBasicBlock();
@@ -4856,19 +5657,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare =
- new(zone()) HCompareMap(object, map, if_true, if_false);
- current_block()->Finish(compare);
+ HCompareMap* compare = New<HCompareMap>(object, map, if_true, if_false);
+ FinishCurrentBlock(compare);
set_current_block(if_true);
HInstruction* instr;
CHECK_ALIVE(instr = BuildStoreNamedField(
compare, name, value, map, &lookup));
- instr->set_position(position);
// Goto will add the HSimulate for the store.
AddInstruction(instr);
if (!ast_context()->IsEffect()) Push(value);
- current_block()->Goto(join);
+ Goto(join);
set_current_block(if_false);
}
@@ -4881,14 +5680,13 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
FinishExitWithHardDeoptimization("Unknown map in polymorphic store", join);
} else {
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
- instr->set_position(position);
AddInstruction(instr);
if (join != NULL) {
if (!ast_context()->IsEffect()) {
Push(value);
}
- current_block()->Goto(join);
+ Goto(join);
} else {
// The HSimulate for the store should not see the stored value in
// effect contexts (it is not materialized at expr->id() in the
@@ -4942,8 +5740,7 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
HValue* key = environment()->ExpressionStackAt(1);
HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
- HandleKeyedElementAccess(object, key, value, expr, return_id,
- expr->position(),
+ HandleKeyedElementAccess(object, key, value, expr,
true, // is_store
&has_side_effects);
Drop(3);
@@ -4982,7 +5779,7 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
Drop(2);
Add<HPushArgument>(object);
Add<HPushArgument>(value);
- instr = new(zone()) HCallConstantFunction(setter, 2);
+ instr = New<HCallConstantFunction>(setter, 2);
} else {
Drop(2);
CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
@@ -4992,15 +5789,13 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
}
} else if (types != NULL && types->length() > 1) {
Drop(2);
- return HandlePolymorphicStoreNamedField(
- expr->position(), ast_id, object, value, types, name);
+ return HandlePolymorphicStoreNamedField(ast_id, object, value, types, name);
} else {
Drop(2);
instr = BuildStoreNamedGeneric(object, name, value);
}
if (!ast_context()->IsEffect()) Push(value);
- instr->set_position(expr->position());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -5029,7 +5824,6 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
Variable* var,
HValue* value,
- int position,
BailoutId ast_id) {
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
@@ -5052,7 +5846,6 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
HInstruction* instr =
Add<HStoreGlobalCell>(value, cell, lookup.GetPropertyDetails());
- instr->set_position(position);
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
@@ -5061,7 +5854,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HStoreGlobalGeneric* instr =
Add<HStoreGlobalGeneric>(global_object, var->name(),
value, function_strict_mode_flag());
- instr->set_position(position);
+ USE(instr);
ASSERT(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
@@ -5090,7 +5883,6 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
case Variable::UNALLOCATED:
HandleGlobalVariableAssignment(var,
Top(),
- expr->position(),
expr->AssignmentId());
break;
@@ -5152,25 +5944,19 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* object = Top();
HValue* key = NULL;
- if ((!prop->IsStringLength() &&
- !prop->IsFunctionPrototype() &&
- !prop->key()->IsPropertyName()) ||
+ if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
prop->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(prop->key()));
key = Top();
}
- CHECK_ALIVE(PushLoad(prop, object, key, expr->position()));
+ CHECK_ALIVE(PushLoad(prop, object, key));
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
HValue* left = Pop();
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
- }
+ Push(BuildBinaryOperation(operation, left, right));
BuildStore(expr, prop, expr->id(),
expr->AssignmentId(), expr->IsUninitialized());
} else {
@@ -5223,7 +6009,6 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
CHECK_ALIVE(VisitForValue(expr->value()));
HandleGlobalVariableAssignment(var,
Top(),
- expr->position(),
expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
@@ -5322,9 +6107,16 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
CHECK_ALIVE(VisitForValue(expr->exception()));
HValue* value = environment()->Pop();
- HThrow* instr = Add<HThrow>(value);
- instr->set_position(expr->position());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+ Add<HThrow>(value);
Add<HSimulate>(expr->id());
+
+ // If the throw definitely exits the function, we can finish with a dummy
+ // control flow at this point. This is not the case if the throw is inside
+ // an inlined function which may be replaced.
+ if (call_context() == NULL) {
+ FinishExitCurrentBlock(New<HAbnormalExit>());
+ }
}
@@ -5343,6 +6135,12 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
}
+HInstruction* HGraphBuilder::AddLoadNamedField(HValue* object,
+ HObjectAccess access) {
+ return AddInstruction(BuildLoadNamedField(object, access));
+}
+
+
HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* object,
HValue* checked_string) {
if (FLAG_fold_constants && object->IsConstant()) {
@@ -5363,93 +6161,14 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
Add<HDeoptimize>("Insufficient type feedback for generic named load",
Deoptimizer::SOFT);
}
- HValue* context = environment()->context();
- return new(zone()) HLoadNamedGeneric(context, object, name);
-}
-
-
-HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
- HValue* object,
- Handle<Map> map,
- Handle<JSFunction> getter,
- Handle<JSObject> holder) {
- AddCheckConstantFunction(holder, object, map);
- Add<HPushArgument>(object);
- return new(zone()) HCallConstantFunction(getter, 1);
+ return New<HLoadNamedGeneric>(object, name);
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
- HValue* object,
- Handle<String> name,
- Handle<Map> map) {
- // Handle a load from a known field.
- ASSERT(!map->is_dictionary_map());
-
- // Handle access to various length properties
- if (name->Equals(isolate()->heap()->length_string())) {
- if (map->instance_type() == JS_ARRAY_TYPE) {
- HCheckMaps* checked_object = AddCheckMap(object, map);
- return New<HLoadNamedField>(
- checked_object, HObjectAccess::ForArrayLength(map->elements_kind()));
- }
- }
-
- LookupResult lookup(isolate());
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsField()) {
- HCheckMaps* checked_object = AddCheckMap(object, map);
- ASSERT(map->IsJSObjectMap());
- return BuildLoadNamedField(
- checked_object, HObjectAccess::ForField(map, &lookup, name));
- }
-
- // Handle a load of a constant known function.
- if (lookup.IsConstant()) {
- AddCheckMap(object, map);
- Handle<Object> constant(lookup.GetConstantFromMap(*map), isolate());
- return New<HConstant>(constant);
- }
-
- if (lookup.IsFound()) {
- // Cannot handle the property, do a generic load instead.
- HValue* context = environment()->context();
- return new(zone()) HLoadNamedGeneric(context, object, name);
- }
-
- // Handle a load from a known field somewhere in the prototype chain.
- LookupInPrototypes(map, name, &lookup);
- if (lookup.IsField()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Handle<JSObject> holder(lookup.holder());
- AddCheckMap(object, map);
- HValue* checked_holder = BuildCheckPrototypeMaps(prototype, holder);
- Handle<Map> holder_map(holder->map());
- return BuildLoadNamedField(
- checked_holder, HObjectAccess::ForField(holder_map, &lookup, name));
- }
-
- // Handle a load of a constant function somewhere in the prototype chain.
- if (lookup.IsConstant()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Handle<JSObject> holder(lookup.holder());
- Handle<Map> holder_map(holder->map());
- AddCheckMap(object, map);
- BuildCheckPrototypeMaps(prototype, holder);
- Handle<Object> constant(lookup.GetConstantFromMap(*holder_map), isolate());
- return New<HConstant>(constant);
- }
-
- // No luck, do a generic load.
- HValue* context = environment()->context();
- return new(zone()) HLoadNamedGeneric(context, object, name);
-}
-
HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
HValue* key) {
- HValue* context = environment()->context();
- return new(zone()) HLoadKeyedGeneric(context, object, key);
+ return New<HLoadKeyedGeneric>(object, key);
}
@@ -5483,6 +6202,21 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
checked_object->ClearGVNFlag(kDependsOnElementsKind);
}
+ if (is_store && map->prototype()->IsJSObject()) {
+ // monomorphic stores need a prototype chain check because shape
+ // changes could allow callbacks on elements in the chain that
+ // aren't compatible with monomorphic keyed stores.
+ Handle<JSObject> prototype(JSObject::cast(map->prototype()));
+ Object* holder = map->prototype();
+ while (holder->GetPrototype(isolate())->IsJSObject()) {
+ holder = holder->GetPrototype(isolate());
+ }
+ ASSERT(holder->GetPrototype(isolate())->IsNull());
+
+ BuildCheckPrototypeMaps(prototype,
+ Handle<JSObject>(JSObject::cast(holder)));
+ }
+
LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
return BuildUncheckedMonomorphicElementAccess(
checked_object, key, val,
@@ -5563,8 +6297,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* key,
HValue* val,
SmallMapList* maps,
- BailoutId ast_id,
- int position,
bool is_store,
KeyedAccessStoreMode store_mode,
bool* has_side_effects) {
@@ -5576,9 +6308,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
TryBuildConsolidatedElementLoad(object, key, val, maps);
if (consolidated_load != NULL) {
*has_side_effects |= consolidated_load->HasObservableSideEffects();
- if (position != RelocInfo::kNoPosition) {
- consolidated_load->set_position(position);
- }
return consolidated_load;
}
}
@@ -5635,7 +6364,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
store_mode);
}
*has_side_effects |= instr->HasObservableSideEffects();
- if (position != RelocInfo::kNoPosition) instr->set_position(position);
return is_store ? NULL : instr;
}
@@ -5648,8 +6376,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HBasicBlock* this_map = graph()->CreateBasicBlock();
HBasicBlock* other_map = graph()->CreateBasicBlock();
HCompareMap* mapcompare =
- new(zone()) HCompareMap(object, map, this_map, other_map);
- current_block()->Finish(mapcompare);
+ New<HCompareMap>(object, map, this_map, other_map);
+ FinishCurrentBlock(mapcompare);
set_current_block(this_map);
HInstruction* access = NULL;
@@ -5672,12 +6400,11 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
*has_side_effects |= access->HasObservableSideEffects();
// The caller will use has_side_effects and add a correct Simulate.
access->SetFlag(HValue::kHasNoObservableSideEffects);
- if (position != RelocInfo::kNoPosition) access->set_position(position);
if (!is_store) {
Push(access);
}
NoObservableSideEffectsScope scope(this);
- current_block()->GotoNoSimulate(join);
+ GotoNoSimulate(join);
set_current_block(other_map);
}
@@ -5695,8 +6422,6 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
HValue* key,
HValue* val,
Expression* expr,
- BailoutId ast_id,
- int position,
bool is_store,
bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
@@ -5705,6 +6430,22 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
SmallMapList* types;
bool monomorphic = ComputeReceiverTypes(expr, obj, &types);
+ bool force_generic = false;
+ if (is_store && (monomorphic || (types != NULL && !types->is_empty()))) {
+ // Stores can't be mono/polymorphic if their prototype chain has dictionary
+ // elements. However a receiver map that has dictionary elements itself
+ // should be left to normal mono/poly behavior (the other maps may benefit
+ // from highly optimized stores).
+ for (int i = 0; i < types->length(); i++) {
+ Handle<Map> current_map = types->at(i);
+ if (current_map->DictionaryElementsInPrototypeChainOnly()) {
+ force_generic = true;
+ monomorphic = false;
+ break;
+ }
+ }
+ }
+
if (monomorphic) {
Handle<Map> map = types->first();
if (map->has_slow_elements_kind()) {
@@ -5716,19 +6457,20 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
instr = BuildMonomorphicElementAccess(
obj, key, val, NULL, map, is_store, expr->GetStoreMode());
}
- } else if (types != NULL && !types->is_empty()) {
+ } else if (!force_generic && (types != NULL && !types->is_empty())) {
return HandlePolymorphicElementAccess(
- obj, key, val, types, ast_id, position, is_store,
+ obj, key, val, types, is_store,
expr->GetStoreMode(), has_side_effects);
} else {
if (is_store) {
- if (expr->IsAssignment() && expr->AsAssignment()->IsUninitialized()) {
+ if (expr->IsAssignment() &&
+ expr->AsAssignment()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed store",
Deoptimizer::SOFT);
}
instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
- if (expr->AsProperty()->IsUninitialized()) {
+ if (expr->AsProperty()->HasNoTypeInformation()) {
Add<HDeoptimize>("Insufficient type feedback for keyed load",
Deoptimizer::SOFT);
}
@@ -5736,7 +6478,6 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
}
AddInstruction(instr);
}
- if (position != RelocInfo::kNoPosition) instr->set_position(position);
*has_side_effects = instr->HasObservableSideEffects();
return instr;
}
@@ -5746,9 +6487,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
HValue* object,
HValue* key,
HValue* value) {
- HValue* context = environment()->context();
- return new(zone()) HStoreKeyedGeneric(
- context,
+ return New<HStoreKeyedGeneric>(
object,
key,
value,
@@ -5815,7 +6554,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
HInstruction* checked_key = Add<HBoundsCheck>(key, length);
- result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
+ result = New<HAccessArgumentsAt>(elements, length, checked_key);
} else {
EnsureArgumentsArePushedForAccess();
@@ -5825,7 +6564,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
arguments_environment()->parameter_count() - 1;
HInstruction* length = Add<HConstant>(argument_count);
HInstruction* checked_key = Add<HBoundsCheck>(key, length);
- result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
+ result = New<HAccessArgumentsAt>(elements, length, checked_key);
}
}
ast_context()->ReturnInstruction(result, expr->id());
@@ -5835,66 +6574,66 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
void HOptimizedGraphBuilder::PushLoad(Property* expr,
HValue* object,
- HValue* key,
- int position) {
+ HValue* key) {
ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
Push(object);
if (key != NULL) Push(key);
- BuildLoad(expr, position, expr->LoadId());
+ BuildLoad(expr, expr->LoadId());
+}
+
+
+static bool AreStringTypes(SmallMapList* types) {
+ for (int i = 0; i < types->length(); i++) {
+ if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+ }
+ return true;
}
void HOptimizedGraphBuilder::BuildLoad(Property* expr,
- int position,
BailoutId ast_id) {
HInstruction* instr = NULL;
- if (expr->IsStringLength()) {
- HValue* string = Pop();
- BuildCheckHeapObject(string);
- HInstruction* checkstring =
- AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
- instr = BuildLoadStringLength(string, checkstring);
- } else if (expr->IsStringAccess()) {
+ if (expr->IsStringAccess()) {
HValue* index = Pop();
HValue* string = Pop();
- HValue* context = environment()->context();
- HInstruction* char_code =
- BuildStringCharCodeAt(string, index);
+ HInstruction* char_code = BuildStringCharCodeAt(string, index);
AddInstruction(char_code);
- instr = HStringCharFromCode::New(zone(), context, char_code);
+ instr = NewUncasted<HStringCharFromCode>(char_code);
} else if (expr->IsFunctionPrototype()) {
HValue* function = Pop();
BuildCheckHeapObject(function);
- instr = new(zone()) HLoadFunctionPrototype(function);
+ instr = New<HLoadFunctionPrototype>(function);
} else if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- HValue* object = Top();
+ HValue* object = Pop();
SmallMapList* types;
- bool monomorphic = ComputeReceiverTypes(expr, object, &types);
+ ComputeReceiverTypes(expr, object, &types);
+ ASSERT(types != NULL);
+
+ if (types->length() > 0) {
+ PropertyAccessInfo info(isolate(), types->first(), name);
+ if (!info.CanLoadAsMonomorphic(types)) {
+ return HandlePolymorphicLoadNamedField(
+ ast_id, expr->LoadId(), object, types, name);
+ }
- if (monomorphic) {
- Handle<Map> map = types->first();
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- AddCheckConstantFunction(holder, Top(), map);
- if (FLAG_inline_accessors &&
- TryInlineGetter(getter, ast_id, expr->LoadId())) {
- return;
- }
- Add<HPushArgument>(Pop());
- instr = new(zone()) HCallConstantFunction(getter, 1);
+ BuildCheckHeapObject(object);
+ HInstruction* checked_object;
+ if (AreStringTypes(types)) {
+ checked_object =
+ Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
} else {
- instr = BuildLoadNamedMonomorphic(Pop(), name, map);
+ checked_object = Add<HCheckMaps>(object, types);
}
- } else if (types != NULL && types->length() > 1) {
- return HandlePolymorphicLoadNamedField(
- position, ast_id, Pop(), types, name);
+ instr = BuildLoadMonomorphic(
+ &info, object, checked_object, ast_id, expr->LoadId());
+ if (instr == NULL) return;
+ if (instr->IsLinked()) return ast_context()->ReturnValue(instr);
} else {
- instr = BuildLoadNamedGeneric(Pop(), name, expr);
+ instr = BuildLoadNamedGeneric(object, name, expr);
}
} else {
@@ -5903,7 +6642,7 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr, ast_id, position,
+ obj, key, NULL, expr,
false, // is_store
&has_side_effects);
if (has_side_effects) {
@@ -5917,7 +6656,6 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
}
return ast_context()->ReturnValue(load);
}
- instr->set_position(position);
return ast_context()->ReturnInstruction(instr, ast_id);
}
@@ -5930,14 +6668,12 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
if (TryArgumentsAccess(expr)) return;
CHECK_ALIVE(VisitForValue(expr->obj()));
- if ((!expr->IsStringLength() &&
- !expr->IsFunctionPrototype() &&
- !expr->key()->IsPropertyName()) ||
+ if ((!expr->IsFunctionPrototype() && !expr->key()->IsPropertyName()) ||
expr->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(expr->key()));
}
- BuildLoad(expr, expr->position(), expr->id());
+ BuildLoad(expr, expr->id());
}
@@ -6031,22 +6767,13 @@ bool HOptimizedGraphBuilder::TryCallPolymorphicAsMonomorphic(
Handle<String> name) {
if (types->length() > kMaxCallPolymorphism) return false;
- Handle<Map> map(types->at(0));
- LookupResult lookup(isolate());
- if (!CanLoadPropertyFromPrototype(map, name, &lookup)) return false;
-
- Handle<Object> prototype(map->prototype(), isolate());
- for (int count = 1; count < types->length(); ++count) {
- Handle<Map> test_map(types->at(count));
- if (!CanLoadPropertyFromPrototype(test_map, name, &lookup)) return false;
- if (test_map->prototype() != *prototype) return false;
- }
-
- if (!expr->ComputeTarget(map, name)) return false;
+ PropertyAccessInfo info(isolate(), types->at(0), name);
+ if (!info.CanLoadAsMonomorphic(types)) return false;
+ if (!expr->ComputeTarget(info.map(), name)) return false;
BuildCheckHeapObject(receiver);
Add<HCheckMaps>(receiver, types);
- AddCheckPrototypeMaps(expr->holder(), map);
+ AddCheckPrototypeMaps(expr->holder(), info.map());
if (FLAG_trace_inlining) {
Handle<JSFunction> caller = current_info()->closure();
SmartArrayPointer<char> caller_name =
@@ -6058,8 +6785,7 @@ bool HOptimizedGraphBuilder::TryCallPolymorphicAsMonomorphic(
if (!TryInlineCall(expr)) {
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
HCallConstantFunction* call =
- new(zone()) HCallConstantFunction(expr->target(), argument_count);
- call->set_position(expr->position());
+ New<HCallConstantFunction>(expr->target(), argument_count);
PreProcessCall(call);
AddInstruction(call);
if (!ast_context()->IsEffect()) Push(call);
@@ -6123,11 +6849,9 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
HBasicBlock* empty_smi_block = graph()->CreateBasicBlock();
HBasicBlock* not_smi_block = graph()->CreateBasicBlock();
number_block = graph()->CreateBasicBlock();
- HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(receiver);
- smicheck->SetSuccessorAt(0, empty_smi_block);
- smicheck->SetSuccessorAt(1, not_smi_block);
- current_block()->Finish(smicheck);
- empty_smi_block->Goto(number_block);
+ FinishCurrentBlock(New<HIsSmiAndBranch>(
+ receiver, empty_smi_block, not_smi_block));
+ Goto(empty_smi_block, number_block);
set_current_block(not_smi_block);
} else {
BuildCheckHeapObject(receiver);
@@ -6138,27 +6862,24 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
HUnaryControlInstruction* compare;
if (handle_smi && map.is_identical_to(number_marker_map)) {
- compare = new(zone()) HCompareMap(
- receiver, heap_number_map, if_true, if_false);
+ compare = New<HCompareMap>(receiver, heap_number_map, if_true, if_false);
map = initial_number_map;
expr->set_number_check(
Handle<JSObject>(JSObject::cast(map->prototype())));
} else if (map.is_identical_to(string_marker_map)) {
- compare = new(zone()) HIsStringAndBranch(receiver);
- compare->SetSuccessorAt(0, if_true);
- compare->SetSuccessorAt(1, if_false);
+ compare = New<HIsStringAndBranch>(receiver, if_true, if_false);
map = initial_string_map;
expr->set_string_check(
Handle<JSObject>(JSObject::cast(map->prototype())));
} else {
- compare = new(zone()) HCompareMap(receiver, map, if_true, if_false);
+ compare = New<HCompareMap>(receiver, map, if_true, if_false);
expr->set_map_check();
}
- current_block()->Finish(compare);
+ FinishCurrentBlock(compare);
if (expr->check_type() == NUMBER_CHECK) {
- if_true->Goto(number_block);
+ Goto(if_true, number_block);
if_true = number_block;
number_block->SetJoinId(expr->id());
}
@@ -6180,14 +6901,13 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
if (HasStackOverflow()) return;
} else {
HCallConstantFunction* call =
- new(zone()) HCallConstantFunction(expr->target(), argument_count);
- call->set_position(expr->position());
+ New<HCallConstantFunction>(expr->target(), argument_count);
PreProcessCall(call);
AddInstruction(call);
if (!ast_context()->IsEffect()) Push(call);
}
- if (current_block() != NULL) current_block()->Goto(join);
+ if (current_block() != NULL) Goto(join);
set_current_block(if_false);
}
@@ -6198,18 +6918,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
// Because the deopt may be the only path in the polymorphic call, make sure
// that the environment stack matches the depth on deopt that it otherwise
// would have had after a successful call.
- Drop(argument_count - (ast_context()->IsEffect() ? 0 : 1));
+ Drop(argument_count);
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join);
} else {
- HValue* context = environment()->context();
- HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
- call->set_position(expr->position());
+ HCallNamed* call = New<HCallNamed>(name, argument_count);
PreProcessCall(call);
if (join != NULL) {
AddInstruction(call);
if (!ast_context()->IsEffect()) Push(call);
- current_block()->Goto(join);
+ Goto(join);
} else {
return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -6258,6 +6977,11 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
Handle<JSFunction> caller = current_info()->closure();
Handle<SharedFunctionInfo> target_shared(target->shared());
+ // Always inline builtins marked for inlining.
+ if (target->IsBuiltin()) {
+ return target_shared->inline_builtin() ? 0 : kNotInlinable;
+ }
+
// Do a quick check on source code length to avoid parsing large
// inlining candidates.
if (target_shared->SourceSize() >
@@ -6267,7 +6991,7 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
}
// Target must be inlineable.
- if (!target->IsInlineable()) {
+ if (!target_shared->IsInlineable()) {
TraceInline(target, caller, "target not inlineable");
return kNotInlinable;
}
@@ -6298,18 +7022,6 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
return false;
}
-#if !V8_TARGET_ARCH_IA32
- // Target must be able to use caller's context.
- CompilationInfo* outer_info = current_info();
- if (target->context() != outer_info->closure()->context() ||
- outer_info->scope()->contains_with() ||
- outer_info->scope()->num_heap_slots() > 0) {
- TraceInline(target, caller, "target requires context change");
- return false;
- }
-#endif
-
-
// Don't inline deeper than the maximum number of inlining levels.
HEnvironment* env = environment();
int current_level = 1;
@@ -6447,15 +7159,9 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
undefined,
function_state()->inlining_kind(),
undefined_receiver);
-#if V8_TARGET_ARCH_IA32
- // IA32 only, overwrite the caller's context in the deoptimization
- // environment with the correct one.
- //
- // TODO(kmillikin): implement the same inlining on other platforms so we
- // can remove the unsightly ifdefs in this function.
+
HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
inner_env->BindContext(context);
-#endif
Add<HSimulate>(return_id);
current_block()->UpdateEnvironment(inner_env);
@@ -6511,12 +7217,12 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
// return value will always evaluate to true, in a value context the
// return value is the newly allocated receiver.
if (call_context()->IsTest()) {
- current_block()->Goto(inlined_test_context()->if_true(), state);
+ Goto(inlined_test_context()->if_true(), state);
} else if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(call_context()->IsValue());
- current_block()->AddLeaveInlined(implicit_return_value, state);
+ AddLeaveInlined(implicit_return_value, state);
}
} else if (state->inlining_kind() == SETTER_CALL_RETURN) {
// Falling off the end of an inlined setter call. The returned value is
@@ -6525,21 +7231,21 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
if (call_context()->IsTest()) {
inlined_test_context()->ReturnValue(implicit_return_value);
} else if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(call_context()->IsValue());
- current_block()->AddLeaveInlined(implicit_return_value, state);
+ AddLeaveInlined(implicit_return_value, state);
}
} else {
// Falling off the end of a normal inlined function. This basically means
// returning undefined.
if (call_context()->IsTest()) {
- current_block()->Goto(inlined_test_context()->if_false(), state);
+ Goto(inlined_test_context()->if_false(), state);
} else if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), state);
+ Goto(function_return(), state);
} else {
ASSERT(call_context()->IsValue());
- current_block()->AddLeaveInlined(undefined, state);
+ AddLeaveInlined(undefined, state);
}
}
}
@@ -6561,13 +7267,13 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
entry->RegisterReturnTarget(if_true, zone());
if_true->SetJoinId(ast_id);
HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
- if_true->Goto(true_target, function_state());
+ Goto(if_true, true_target, function_state());
}
if (if_false->HasPredecessor()) {
entry->RegisterReturnTarget(if_false, zone());
if_false->SetJoinId(ast_id);
HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
- if_false->Goto(false_target, function_state());
+ Goto(if_false, false_target, function_state());
}
set_current_block(NULL);
return true;
@@ -6665,16 +7371,10 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
case kMathAbs:
case kMathSqrt:
case kMathLog:
- case kMathSin:
- case kMathCos:
- case kMathTan:
if (expr->arguments()->length() == 1) {
HValue* argument = Pop();
- HValue* context = environment()->context();
Drop(1); // Receiver.
- HInstruction* op =
- HUnaryMathOperation::New(zone(), context, argument, id);
- op->set_position(expr->position());
+ HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
if (drop_extra) Drop(1); // Optionally drop the function.
ast_context()->ReturnInstruction(op, expr->id());
return true;
@@ -6685,8 +7385,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
HValue* right = Pop();
HValue* left = Pop();
Drop(1); // Receiver.
- HValue* context = environment()->context();
- HInstruction* op = HMul::NewImul(zone(), context, left, right);
+ HInstruction* op = HMul::NewImul(zone(), context(), left, right);
if (drop_extra) Drop(1); // Optionally drop the function.
ast_context()->ReturnInstruction(op, expr->id());
return true;
@@ -6716,7 +7415,6 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (argument_count == 2 && check_type == STRING_CHECK) {
HValue* index = Pop();
HValue* string = Pop();
- HValue* context = environment()->context();
ASSERT(!expr->holder().is_null());
BuildCheckPrototypeMaps(Call::GetPrototypeForPrimitiveCheck(
STRING_CHECK, expr->holder()->GetIsolate()),
@@ -6728,8 +7426,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
return true;
}
AddInstruction(char_code);
- HInstruction* result =
- HStringCharFromCode::New(zone(), context, char_code);
+ HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -6738,10 +7435,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
HValue* argument = Pop();
- HValue* context = environment()->context();
Drop(1); // Receiver.
- HInstruction* result =
- HStringCharFromCode::New(zone(), context, argument);
+ HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -6754,17 +7449,11 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
case kMathAbs:
case kMathSqrt:
case kMathLog:
- case kMathSin:
- case kMathCos:
- case kMathTan:
if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
HValue* argument = Pop();
- HValue* context = environment()->context();
Drop(1); // Receiver.
- HInstruction* op =
- HUnaryMathOperation::New(zone(), context, argument, id);
- op->set_position(expr->position());
+ HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
ast_context()->ReturnInstruction(op, expr->id());
return true;
}
@@ -6775,45 +7464,32 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HValue* right = Pop();
HValue* left = Pop();
Pop(); // Pop receiver.
- HValue* context = environment()->context();
HInstruction* result = NULL;
// Use sqrt() if exponent is 0.5 or -0.5.
if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
double exponent = HConstant::cast(right)->DoubleValue();
if (exponent == 0.5) {
- result =
- HUnaryMathOperation::New(zone(), context, left, kMathPowHalf);
+ result = NewUncasted<HUnaryMathOperation>(left, kMathPowHalf);
} else if (exponent == -0.5) {
HValue* one = graph()->GetConstant1();
- HInstruction* sqrt =
- HUnaryMathOperation::New(zone(), context, left, kMathPowHalf);
- AddInstruction(sqrt);
+ HInstruction* sqrt = AddUncasted<HUnaryMathOperation>(
+ left, kMathPowHalf);
// MathPowHalf doesn't have side effects so there's no need for
// an environment simulation here.
ASSERT(!sqrt->HasObservableSideEffects());
- result = HDiv::New(zone(), context, one, sqrt);
+ result = NewUncasted<HDiv>(one, sqrt);
} else if (exponent == 2.0) {
- result = HMul::New(zone(), context, left, left);
+ result = NewUncasted<HMul>(left, left);
}
}
if (result == NULL) {
- result = HPower::New(zone(), context, left, right);
+ result = NewUncasted<HPower>(left, right);
}
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
- case kMathRandom:
- if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
- Drop(1); // Receiver.
- HGlobalObject* global_object = Add<HGlobalObject>();
- HRandom* result = new(zone()) HRandom(global_object);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
- }
- break;
case kMathMax:
case kMathMin:
if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
@@ -6821,11 +7497,9 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HValue* right = Pop();
HValue* left = Pop();
Drop(1); // Receiver.
- HValue* context = environment()->context();
HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
: HMathMinMax::kMathMax;
- HInstruction* result =
- HMathMinMax::New(zone(), context, left, right, op);
+ HInstruction* result = NewUncasted<HMathMinMax>(left, right, op);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -6836,8 +7510,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HValue* right = Pop();
HValue* left = Pop();
Drop(1); // Receiver.
- HValue* context = environment()->context();
- HInstruction* result = HMul::NewImul(zone(), context, left, right);
+ HInstruction* result = HMul::NewImul(zone(), context(), left, right);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -6878,6 +7551,7 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
// Found pattern f.apply(receiver, arguments).
CHECK_ALIVE_OR_RETURN(VisitForValue(prop->obj()), true);
HValue* function = Top();
+
AddCheckConstantFunction(expr->holder(), function, function_map);
Drop(1);
@@ -6888,12 +7562,10 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
HValue* wrapped_receiver = BuildWrapReceiver(receiver, function);
- HInstruction* result =
- new(zone()) HApplyArguments(function,
- wrapped_receiver,
- length,
- elements);
- result->set_position(expr->position());
+ HInstruction* result = New<HApplyArguments>(function,
+ wrapped_receiver,
+ length,
+ elements);
ast_context()->ReturnInstruction(result, expr->id());
return true;
} else {
@@ -6910,28 +7582,24 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
}
Handle<JSFunction> known_function;
- if (function->IsConstant()) {
- HConstant* constant_function = HConstant::cast(function);
+ if (function->IsConstant() &&
+ HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
known_function = Handle<JSFunction>::cast(
- constant_function->handle(isolate()));
+ HConstant::cast(function)->handle(isolate()));
int args_count = arguments_count - 1; // Excluding receiver.
if (TryInlineApply(known_function, expr, args_count)) return true;
}
Drop(arguments_count - 1);
- PushAndAdd(New<HPushArgument>(Pop()));
+ Push(Add<HPushArgument>(Pop()));
for (int i = 1; i < arguments_count; i++) {
- PushAndAdd(New<HPushArgument>(arguments_values->at(i)));
+ Push(Add<HPushArgument>(arguments_values->at(i)));
}
- HValue* context = environment()->context();
- HInvokeFunction* call = new(zone()) HInvokeFunction(
- context,
- function,
- known_function,
- arguments_count);
+ HInvokeFunction* call = New<HInvokeFunction>(function,
+ known_function,
+ arguments_count);
Drop(arguments_count);
- call->set_position(expr->position());
ast_context()->ReturnInstruction(call, expr->id());
return true;
}
@@ -6950,20 +7618,30 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
if (prop != NULL) {
if (!prop->key()->IsPropertyName()) {
// Keyed function call.
- CHECK_ALIVE(VisitArgument(prop->obj()));
-
+ CHECK_ALIVE(VisitForValue(prop->obj()));
CHECK_ALIVE(VisitForValue(prop->key()));
+
// Push receiver and key like the non-optimized code generator expects it.
HValue* key = Pop();
HValue* receiver = Pop();
Push(key);
- Push(receiver);
-
+ Push(Add<HPushArgument>(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- HValue* context = environment()->context();
- call = new(zone()) HCallKeyed(context, key, argument_count);
- call->set_position(expr->position());
+ if (expr->IsMonomorphic()) {
+ BuildCheckHeapObject(receiver);
+ ElementsKind kind = expr->KeyedArrayCallIsHoley()
+ ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
+
+ Handle<Map> map(isolate()->get_initial_js_array_map(kind));
+
+ HValue* function = BuildMonomorphicElementAccess(
+ receiver, key, NULL, NULL, map, false, STANDARD_STORE);
+
+ call = New<HCallFunction>(function, argument_count);
+ } else {
+ call = New<HCallKeyed>(key, argument_count);
+ }
Drop(argument_count + 1); // 1 is the key.
return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -7001,16 +7679,13 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// When the target has a custom call IC generator, use the IC,
// because it is likely to generate better code. Also use the IC
// when a primitive receiver check is required.
- HValue* context = environment()->context();
- call = PreProcessCall(
- new(zone()) HCallNamed(context, name, argument_count));
+ call = PreProcessCall(New<HCallNamed>(name, argument_count));
} else {
AddCheckConstantFunction(expr->holder(), receiver, map);
if (TryInlineCall(expr)) return;
call = PreProcessCall(
- new(zone()) HCallConstantFunction(expr->target(),
- argument_count));
+ New<HCallConstantFunction>(expr->target(), argument_count));
}
} else if (types != NULL && types->length() > 1) {
ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
@@ -7018,11 +7693,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
return;
} else {
- HValue* context = environment()->context();
- call = PreProcessCall(
- new(zone()) HCallNamed(context, name, argument_count));
+ call = PreProcessCall(New<HCallNamed>(name, argument_count));
}
-
} else {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
@@ -7046,9 +7718,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
if (known_global_function) {
// Push the global object instead of the global receiver because
// code generated by the full code generator expects it.
- HValue* context = environment()->context();
- HGlobalObject* global_object = new(zone()) HGlobalObject(context);
- PushAndAdd(global_object);
+ HGlobalObject* global_object = Add<HGlobalObject>();
+ Push(global_object);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
CHECK_ALIVE(VisitForValue(expr->expression()));
@@ -7080,16 +7751,14 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
if (CallStubCompiler::HasCustomCallGenerator(expr->target())) {
// When the target has a custom call IC generator, use the IC,
// because it is likely to generate better code.
- HValue* context = environment()->context();
- call = PreProcessCall(
- new(zone()) HCallNamed(context, var->name(), argument_count));
+ call = PreProcessCall(New<HCallNamed>(var->name(), argument_count));
} else {
- call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
- argument_count));
+ call = PreProcessCall(New<HCallKnownGlobal>(
+ expr->target(), argument_count));
}
} else {
HGlobalObject* receiver = Add<HGlobalObject>();
- PushAndAdd(New<HPushArgument>(receiver));
+ Push(Add<HPushArgument>(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
call = New<HCallGlobal>(var->name(), argument_count);
@@ -7102,8 +7771,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
HGlobalObject* global = Add<HGlobalObject>();
- HGlobalReceiver* receiver = New<HGlobalReceiver>(global);
- PushAndAdd(receiver);
+ HGlobalReceiver* receiver = Add<HGlobalReceiver>(global);
+ Push(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Add<HCheckValue>(function, expr->target());
@@ -7129,7 +7798,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
HValue* function = Top();
HGlobalObject* global_object = Add<HGlobalObject>();
HGlobalReceiver* receiver = Add<HGlobalReceiver>(global_object);
- PushAndAdd(New<HPushArgument>(receiver));
+ Push(Add<HPushArgument>(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
call = New<HCallFunction>(function, argument_count);
@@ -7137,11 +7806,75 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
}
}
- call->set_position(expr->position());
return ast_context()->ReturnInstruction(call, expr->id());
}
+void HOptimizedGraphBuilder::BuildInlinedCallNewArray(CallNew* expr) {
+ NoObservableSideEffectsScope no_effects(this);
+
+ int argument_count = expr->arguments()->length();
+ // We should at least have the constructor on the expression stack.
+ HValue* constructor = environment()->ExpressionStackAt(argument_count);
+
+ ElementsKind kind = expr->elements_kind();
+ Handle<Cell> cell = expr->allocation_info_cell();
+ AllocationSite* site = AllocationSite::cast(cell->value());
+
+ // Register on the site for deoptimization if the cell value changes.
+ site->AddDependentCompilationInfo(AllocationSite::TRANSITIONS, top_info());
+ HInstruction* cell_instruction = Add<HConstant>(cell);
+
+ // In the single constant argument case, we may have to adjust elements kind
+ // to avoid creating a packed non-empty array.
+ if (argument_count == 1 && !IsHoleyElementsKind(kind)) {
+ HValue* argument = environment()->Top();
+ if (argument->IsConstant()) {
+ HConstant* constant_argument = HConstant::cast(argument);
+ ASSERT(constant_argument->HasSmiValue());
+ int constant_array_size = constant_argument->Integer32Value();
+ if (constant_array_size != 0) {
+ kind = GetHoleyElementsKind(kind);
+ }
+ }
+ }
+
+ // Build the array.
+ JSArrayBuilder array_builder(this,
+ kind,
+ cell_instruction,
+ constructor,
+ DISABLE_ALLOCATION_SITES);
+ HValue* new_object;
+ if (argument_count == 0) {
+ new_object = array_builder.AllocateEmptyArray();
+ } else if (argument_count == 1) {
+ HValue* argument = environment()->Top();
+ new_object = BuildAllocateArrayFromLength(&array_builder, argument);
+ } else {
+ HValue* length = Add<HConstant>(argument_count);
+ // Smi arrays need to initialize array elements with the hole because
+ // bailout could occur if the arguments don't fit in a smi.
+ //
+ // TODO(mvstanton): If all the arguments are constants in smi range, then
+ // we could set fill_with_hole to false and save a few instructions.
+ JSArrayBuilder::FillMode fill_mode = IsFastSmiElementsKind(kind)
+ ? JSArrayBuilder::FILL_WITH_HOLE
+ : JSArrayBuilder::DONT_FILL_WITH_HOLE;
+ new_object = array_builder.AllocateArray(length, length, fill_mode);
+ HValue* elements = array_builder.GetElementsLocation();
+ for (int i = 0; i < argument_count; i++) {
+ HValue* value = environment()->ExpressionStackAt(argument_count - i - 1);
+ HValue* constant_i = Add<HConstant>(i);
+ Add<HStoreKeyed>(elements, constant_i, value, kind);
+ }
+ }
+
+ Drop(argument_count + 1); // drop constructor and args.
+ ast_context()->ReturnValue(new_object);
+}
+
+
// Checks whether allocation using the given constructor can be inlined.
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
@@ -7151,22 +7884,67 @@ static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
}
+bool HOptimizedGraphBuilder::IsCallNewArrayInlineable(CallNew* expr) {
+ bool inline_ok = false;
+ Handle<JSFunction> caller = current_info()->closure();
+ Handle<JSFunction> target(isolate()->global_context()->array_function(),
+ isolate());
+ int argument_count = expr->arguments()->length();
+ // We should have the function plus array arguments on the environment stack.
+ ASSERT(environment()->length() >= (argument_count + 1));
+ Handle<Cell> cell = expr->allocation_info_cell();
+ AllocationSite* site = AllocationSite::cast(cell->value());
+ if (site->CanInlineCall()) {
+ // We also want to avoid inlining in certain 1 argument scenarios.
+ if (argument_count == 1) {
+ HValue* argument = Top();
+ if (argument->IsConstant()) {
+ // Do not inline if the constant length argument is not a smi or
+ // outside the valid range for a fast array.
+ HConstant* constant_argument = HConstant::cast(argument);
+ if (constant_argument->HasSmiValue()) {
+ int value = constant_argument->Integer32Value();
+ inline_ok = value >= 0 &&
+ value < JSObject::kInitialMaxFastElementArray;
+ if (!inline_ok) {
+ TraceInline(target, caller,
+ "Length outside of valid array range");
+ }
+ }
+ } else {
+ inline_ok = true;
+ }
+ } else {
+ inline_ok = true;
+ }
+ } else {
+ TraceInline(target, caller, "AllocationSite requested no inlining.");
+ }
+
+ if (inline_ok) {
+ TraceInline(target, caller, NULL);
+ }
+ return inline_ok;
+}
+
+
void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
- HValue* context = environment()->context();
Factory* factory = isolate()->factory();
+ // The constructor function is on the stack in the unoptimized code
+ // during evaluation of the arguments.
+ CHECK_ALIVE(VisitForValue(expr->expression()));
+ HValue* function = Top();
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
if (FLAG_inline_construct &&
expr->IsMonomorphic() &&
IsAllocationInlineable(expr->target())) {
- // The constructor function is on the stack in the unoptimized code
- // during evaluation of the arguments.
- CHECK_ALIVE(VisitForValue(expr->expression()));
- HValue* function = Top();
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<JSFunction> constructor = expr->target();
HValue* check = Add<HCheckValue>(function, constructor);
@@ -7245,29 +8023,32 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
receiver->DeleteAndReplaceWith(NULL);
check->DeleteAndReplaceWith(NULL);
environment()->SetExpressionStackAt(receiver_index, function);
- HInstruction* call = PreProcessCall(
- new(zone()) HCallNew(context, function, argument_count));
- call->set_position(expr->position());
+ HInstruction* call =
+ PreProcessCall(New<HCallNew>(function, argument_count));
return ast_context()->ReturnInstruction(call, expr->id());
} else {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
Handle<JSFunction> array_function(
isolate()->global_context()->array_function(), isolate());
- CHECK_ALIVE(VisitArgument(expr->expression()));
- HValue* constructor = HPushArgument::cast(Top())->argument();
- CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+ bool use_call_new_array = expr->target().is_identical_to(array_function);
+ Handle<Cell> cell = expr->allocation_info_cell();
+ if (use_call_new_array && IsCallNewArrayInlineable(expr)) {
+ // Verify we are still calling the array function for our native context.
+ Add<HCheckValue>(function, array_function);
+ BuildInlinedCallNewArray(expr);
+ return;
+ }
+
HBinaryCall* call;
- if (expr->target().is_identical_to(array_function)) {
- Handle<Cell> cell = expr->allocation_info_cell();
- Add<HCheckValue>(constructor, array_function);
- call = new(zone()) HCallNewArray(context, constructor, argument_count,
- cell, expr->elements_kind());
+ if (use_call_new_array) {
+ Add<HCheckValue>(function, array_function);
+ call = New<HCallNewArray>(function, argument_count, cell,
+ expr->elements_kind());
} else {
- call = new(zone()) HCallNew(context, constructor, argument_count);
+ call = New<HCallNew>(function, argument_count);
}
- Drop(argument_count);
- call->set_position(expr->position());
+ PreProcessCall(call);
return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -7289,6 +8070,184 @@ const HOptimizedGraphBuilder::InlineFunctionGenerator
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
+template <class ViewClass>
+void HGraphBuilder::BuildArrayBufferViewInitialization(
+ HValue* obj,
+ HValue* buffer,
+ HValue* byte_offset,
+ HValue* byte_length) {
+
+ for (int offset = ViewClass::kSize;
+ offset < ViewClass::kSizeWithInternalFields;
+ offset += kPointerSize) {
+ Add<HStoreNamedField>(obj,
+ HObjectAccess::ForJSObjectOffset(offset),
+ Add<HConstant>(static_cast<int32_t>(0)));
+ }
+
+ Add<HStoreNamedField>(
+ obj,
+ HObjectAccess::ForJSArrayBufferViewBuffer(), buffer);
+ Add<HStoreNamedField>(
+ obj,
+ HObjectAccess::ForJSArrayBufferViewByteOffset(),
+ byte_offset);
+ Add<HStoreNamedField>(
+ obj,
+ HObjectAccess::ForJSArrayBufferViewByteLength(),
+ byte_length);
+
+ HObjectAccess weak_first_view_access =
+ HObjectAccess::ForJSArrayBufferWeakFirstView();
+ Add<HStoreNamedField>(obj,
+ HObjectAccess::ForJSArrayBufferViewWeakNext(),
+ Add<HLoadNamedField>(buffer, weak_first_view_access));
+ Add<HStoreNamedField>(buffer, weak_first_view_access, obj);
+}
+
+
+void HOptimizedGraphBuilder::VisitDataViewInitialize(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* arguments = expr->arguments();
+
+ NoObservableSideEffectsScope scope(this);
+ ASSERT(arguments->length()== 4);
+ CHECK_ALIVE(VisitForValue(arguments->at(0)));
+ HValue* obj = Pop();
+
+ CHECK_ALIVE(VisitForValue(arguments->at(1)));
+ HValue* buffer = Pop();
+
+ CHECK_ALIVE(VisitForValue(arguments->at(2)));
+ HValue* byte_offset = Pop();
+
+ CHECK_ALIVE(VisitForValue(arguments->at(3)));
+ HValue* byte_length = Pop();
+
+ BuildArrayBufferViewInitialization<JSDataView>(
+ obj, buffer, byte_offset, byte_length);
+}
+
+
+void HOptimizedGraphBuilder::VisitTypedArrayInitialize(
+ CallRuntime* expr) {
+ ZoneList<Expression*>* arguments = expr->arguments();
+
+ NoObservableSideEffectsScope scope(this);
+ static const int kObjectArg = 0;
+ static const int kArrayIdArg = 1;
+ static const int kBufferArg = 2;
+ static const int kByteOffsetArg = 3;
+ static const int kByteLengthArg = 4;
+ static const int kArgsLength = 5;
+ ASSERT(arguments->length() == kArgsLength);
+
+
+ CHECK_ALIVE(VisitForValue(arguments->at(kObjectArg)));
+ HValue* obj = Pop();
+
+ ASSERT(arguments->at(kArrayIdArg)->node_type() == AstNode::kLiteral);
+ Handle<Object> value =
+ static_cast<Literal*>(arguments->at(kArrayIdArg))->value();
+ ASSERT(value->IsSmi());
+ int array_id = Smi::cast(*value)->value();
+
+ CHECK_ALIVE(VisitForValue(arguments->at(kBufferArg)));
+ HValue* buffer = Pop();
+
+ HValue* byte_offset;
+ bool is_zero_byte_offset;
+
+ if (arguments->at(kByteOffsetArg)->node_type() == AstNode::kLiteral
+ && Smi::FromInt(0) ==
+ *static_cast<Literal*>(arguments->at(kByteOffsetArg))->value()) {
+ byte_offset = Add<HConstant>(static_cast<int32_t>(0));
+ is_zero_byte_offset = true;
+ } else {
+ CHECK_ALIVE(VisitForValue(arguments->at(kByteOffsetArg)));
+ byte_offset = Pop();
+ is_zero_byte_offset = false;
+ }
+
+ CHECK_ALIVE(VisitForValue(arguments->at(kByteLengthArg)));
+ HValue* byte_length = Pop();
+
+ IfBuilder byte_offset_smi(this);
+
+ if (!is_zero_byte_offset) {
+ byte_offset_smi.If<HIsSmiAndBranch>(byte_offset);
+ byte_offset_smi.Then();
+ }
+
+ { // byte_offset is Smi.
+ BuildArrayBufferViewInitialization<JSTypedArray>(
+ obj, buffer, byte_offset, byte_length);
+
+ ExternalArrayType array_type = kExternalByteArray; // Bogus initialization.
+ size_t element_size = 1; // Bogus initialization.
+ Runtime::ArrayIdToTypeAndSize(array_id, &array_type, &element_size);
+
+ HInstruction* length = AddUncasted<HDiv>(byte_length,
+ Add<HConstant>(static_cast<int32_t>(element_size)));
+
+ Add<HStoreNamedField>(obj,
+ HObjectAccess::ForJSTypedArrayLength(),
+ length);
+
+ HValue* elements =
+ Add<HAllocate>(
+ Add<HConstant>(ExternalArray::kAlignedSize),
+ HType::JSArray(),
+ NOT_TENURED,
+ static_cast<InstanceType>(FIRST_EXTERNAL_ARRAY_TYPE + array_type));
+
+ Handle<Map> external_array_map(
+ isolate()->heap()->MapForExternalArrayType(array_type));
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForMap(),
+ Add<HConstant>(external_array_map));
+
+ HValue* backing_store = Add<HLoadNamedField>(
+ buffer, HObjectAccess::ForJSArrayBufferBackingStore());
+
+ HValue* typed_array_start;
+ if (is_zero_byte_offset) {
+ typed_array_start = backing_store;
+ } else {
+ HInstruction* external_pointer =
+ AddUncasted<HAdd>(backing_store, byte_offset);
+ // Arguments are checked prior to call to TypedArrayInitialize,
+ // including byte_offset.
+ external_pointer->ClearFlag(HValue::kCanOverflow);
+ typed_array_start = external_pointer;
+ }
+
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForExternalArrayExternalPointer(),
+ typed_array_start);
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForFixedArrayLength(),
+ length);
+ Add<HStoreNamedField>(
+ obj, HObjectAccess::ForElementsPointer(), elements);
+ }
+
+ if (!is_zero_byte_offset) {
+ byte_offset_smi.Else();
+ { // byte_offset is not Smi.
+ Push(Add<HPushArgument>(obj));
+ VisitArgument(arguments->at(kArrayIdArg));
+ Push(Add<HPushArgument>(buffer));
+ Push(Add<HPushArgument>(byte_offset));
+ Push(Add<HPushArgument>(byte_length));
+ Add<HCallRuntime>(expr->name(), expr->function(), kArgsLength);
+ Drop(kArgsLength);
+ }
+ }
+ byte_offset_smi.End();
+}
+
+
void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -7299,6 +8258,21 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
const Runtime::Function* function = expr->function();
ASSERT(function != NULL);
+
+ if (function->function_id == Runtime::kDataViewInitialize) {
+ return VisitDataViewInitialize(expr);
+ }
+
+ if (function->function_id == Runtime::kTypedArrayInitialize) {
+ return VisitTypedArrayInitialize(expr);
+ }
+
+ if (function->function_id == Runtime::kMaxSmi) {
+ ASSERT(expr->arguments()->length() == 0);
+ HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue));
+ return ast_context()->ReturnInstruction(max_smi, expr->id());
+ }
+
if (function->intrinsic_type == Runtime::INLINE) {
ASSERT(expr->name()->length() > 0);
ASSERT(expr->name()->Get(0) == '_');
@@ -7389,8 +8363,7 @@ void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
CHECK_ALIVE(VisitForTypeOf(expr->expression()));
HValue* value = Pop();
- HValue* context = environment()->context();
- HInstruction* instr = new(zone()) HTypeof(context, value);
+ HInstruction* instr = New<HTypeof>(value);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -7443,7 +8416,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
bool returns_original_input,
CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
- TypeInfo info = expr->type();
+ Handle<Type> info = expr->type();
Representation rep = Representation::FromType(info);
if (rep.IsNone() || rep.IsTagged()) {
rep = Representation::Smi();
@@ -7454,7 +8427,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
// actual HChange instruction we need is (sometimes) added in a later
// phase, so it is not available now to be used as an input to HAdd and
// as the return value.
- HInstruction* number_input = Add<HForceRepresentation>(Pop(), rep);
+ HInstruction* number_input = AddUncasted<HForceRepresentation>(Pop(), rep);
if (!rep.IsDouble()) {
number_input->SetFlag(HInstruction::kFlexibleRepresentation);
number_input->SetFlag(HInstruction::kCannotBeTagged);
@@ -7499,6 +8472,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
Expression* target = expr->expression();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
@@ -7531,7 +8505,6 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
case Variable::UNALLOCATED:
HandleGlobalVariableAssignment(var,
after,
- expr->position(),
expr->AssignmentId());
break;
@@ -7583,15 +8556,13 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
HValue* object = Top();
HValue* key = NULL;
- if ((!prop->IsStringLength() &&
- !prop->IsFunctionPrototype() &&
- !prop->key()->IsPropertyName()) ||
+ if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
prop->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(prop->key()));
key = Top();
}
- CHECK_ALIVE(PushLoad(prop, object, key, expr->position()));
+ CHECK_ALIVE(PushLoad(prop, object, key));
after = BuildIncrement(returns_original_input, expr);
@@ -7627,7 +8598,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
}
BuildCheckHeapObject(string);
HValue* checkstring =
- AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
+ Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING);
HInstruction* length = BuildLoadStringLength(string, checkstring);
AddInstruction(length);
HInstruction* checked_index = Add<HBoundsCheck>(index, length);
@@ -7635,9 +8606,16 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
}
-// Checks if the given shift amounts have form: (sa) and (32 - sa).
+// Checks if the given shift amounts have following forms:
+// (N1) and (N2) with N1 + N2 = 32; (sa) and (32 - sa).
static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
HValue* const32_minus_sa) {
+ if (sa->IsConstant() && const32_minus_sa->IsConstant()) {
+ const HConstant* c1 = HConstant::cast(sa);
+ const HConstant* c2 = HConstant::cast(const32_minus_sa);
+ return c1->HasInteger32Value() && c2->HasInteger32Value() &&
+ (c1->Integer32Value() + c2->Integer32Value() == 32);
+ }
if (!const32_minus_sa->IsSub()) return false;
HSub* sub = HSub::cast(const32_minus_sa);
if (sa != sub->right()) return false;
@@ -7654,10 +8632,10 @@ static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
// directions that can be replaced by one rotate right instruction or not.
// Returns the operand and the shift amount for the rotate instruction in the
// former case.
-bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount) {
+bool HGraphBuilder::MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount) {
HShl* shl;
HShr* shr;
if (left->IsShl() && right->IsShr()) {
@@ -7693,6 +8671,19 @@ bool CanBeZero(HValue* right) {
}
+HValue* HGraphBuilder::EnforceNumberType(HValue* number,
+ Handle<Type> expected) {
+ if (expected->Is(Type::Smi())) {
+ return AddUncasted<HForceRepresentation>(number, Representation::Smi());
+ }
+ if (expected->Is(Type::Signed32())) {
+ return AddUncasted<HForceRepresentation>(number,
+ Representation::Integer32());
+ }
+ return number;
+}
+
+
HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
@@ -7703,97 +8694,221 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
}
}
+ // We put temporary values on the stack, which don't correspond to anything
+ // in baseline code. Since nothing is observable we avoid recording those
+ // pushes with a NoObservableSideEffectsScope.
+ NoObservableSideEffectsScope no_effects(this);
+
+ Handle<Type> expected_type = *expected;
+
+ // Separate the number type from the rest.
+ Handle<Type> expected_obj = handle(Type::Intersect(
+ expected_type, handle(Type::NonNumber(), isolate())), isolate());
+ Handle<Type> expected_number = handle(Type::Intersect(
+ expected_type, handle(Type::Number(), isolate())), isolate());
+
+ // We expect to get a number.
+ // (We need to check first, since Type::None->Is(Type::Any()) == true.
+ if (expected_obj->Is(Type::None())) {
+ ASSERT(!expected_number->Is(Type::None()));
+ return value;
+ }
+
+ if (expected_obj->Is(Type::Undefined())) {
+ // This is already done by HChange.
+ *expected = handle(Type::Union(
+ expected_number, handle(Type::Double(), isolate())), isolate());
+ return value;
+ }
+
return value;
}
-HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
+HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
BinaryOperation* expr,
HValue* left,
HValue* right) {
- HValue* context = environment()->context();
Handle<Type> left_type = expr->left()->bounds().lower;
Handle<Type> right_type = expr->right()->bounds().lower;
Handle<Type> result_type = expr->bounds().lower;
Maybe<int> fixed_right_arg = expr->fixed_right_arg();
+
+ HValue* result = HGraphBuilder::BuildBinaryOperation(
+ expr->op(), left, right, left_type, right_type,
+ result_type, fixed_right_arg);
+ // Add a simulate after instructions with observable side effects, and
+ // after phis, which are the result of BuildBinaryOperation when we
+ // inlined some complex subgraph.
+ if (result->HasObservableSideEffects() || result->IsPhi()) {
+ Push(result);
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Drop(1);
+ }
+ return result;
+}
+
+
+HValue* HGraphBuilder::BuildBinaryOperation(
+ Token::Value op,
+ HValue* left,
+ HValue* right,
+ Handle<Type> left_type,
+ Handle<Type> right_type,
+ Handle<Type> result_type,
+ Maybe<int> fixed_right_arg) {
+
Representation left_rep = Representation::FromType(left_type);
Representation right_rep = Representation::FromType(right_type);
- Representation result_rep = Representation::FromType(result_type);
- if (expr->op() != Token::ADD ||
- (left->type().IsNonString() && right->type().IsNonString())) {
- // For addition we can only truncate the arguments to number if we can
- // prove that we will not end up in string concatenation mode.
- left = TruncateToNumber(left, &left_type);
- right = TruncateToNumber(right, &right_type);
- }
+ bool maybe_string_add = op == Token::ADD &&
+ (left_type->Maybe(Type::String()) ||
+ right_type->Maybe(Type::String()));
if (left_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation",
Deoptimizer::SOFT);
- // TODO(rossberg): we should be able to get rid of non-continuous defaults.
+ // TODO(rossberg): we should be able to get rid of non-continuous
+ // defaults.
left_type = handle(Type::Any(), isolate());
+ } else {
+ if (!maybe_string_add) left = TruncateToNumber(left, &left_type);
+ left_rep = Representation::FromType(left_type);
}
+
if (right_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
Deoptimizer::SOFT);
right_type = handle(Type::Any(), isolate());
+ } else {
+ if (!maybe_string_add) right = TruncateToNumber(right, &right_type);
+ right_rep = Representation::FromType(right_type);
+ }
+
+ // Special case for string addition here.
+ if (op == Token::ADD &&
+ (left_type->Is(Type::String()) || right_type->Is(Type::String()))) {
+ // Validate type feedback for left argument.
+ if (left_type->Is(Type::String())) {
+ left = BuildCheckString(left);
+ }
+
+ // Validate type feedback for right argument.
+ if (right_type->Is(Type::String())) {
+ right = BuildCheckString(right);
+ }
+
+ // Convert left argument as necessary.
+ if (left_type->Is(Type::Number())) {
+ ASSERT(right_type->Is(Type::String()));
+ left = BuildNumberToString(left, left_type);
+ } else if (!left_type->Is(Type::String())) {
+ ASSERT(right_type->Is(Type::String()));
+ HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_RIGHT);
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ return AddUncasted<HInvokeFunction>(function, 2);
+ }
+
+ // Convert right argument as necessary.
+ if (right_type->Is(Type::Number())) {
+ ASSERT(left_type->Is(Type::String()));
+ right = BuildNumberToString(right, right_type);
+ } else if (!right_type->Is(Type::String())) {
+ ASSERT(left_type->Is(Type::String()));
+ HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_LEFT);
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ return AddUncasted<HInvokeFunction>(function, 2);
+ }
+
+ return AddUncasted<HStringAdd>(left, right, STRING_ADD_CHECK_NONE);
}
+
+ if (graph()->info()->IsStub()) {
+ left = EnforceNumberType(left, left_type);
+ right = EnforceNumberType(right, right_type);
+ }
+
+ Representation result_rep = Representation::FromType(result_type);
+
+ bool is_non_primitive = (left_rep.IsTagged() && !left_rep.IsSmi()) ||
+ (right_rep.IsTagged() && !right_rep.IsSmi());
+
HInstruction* instr = NULL;
- switch (expr->op()) {
- case Token::ADD:
- if (left_type->Is(Type::String()) && right_type->Is(Type::String())) {
- BuildCheckHeapObject(left);
- AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
- BuildCheckHeapObject(right);
- AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
- instr = HStringAdd::New(zone(), context, left, right);
- } else {
- instr = HAdd::New(zone(), context, left, right);
+ // Only the stub is allowed to call into the runtime, since otherwise we would
+ // inline several instructions (including the two pushes) for every tagged
+ // operation in optimized code, which is more expensive, than a stub call.
+ if (graph()->info()->IsStub() && is_non_primitive) {
+ HValue* function = AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op));
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ instr = AddUncasted<HInvokeFunction>(function, 2);
+ } else {
+ switch (op) {
+ case Token::ADD:
+ instr = AddUncasted<HAdd>(left, right);
+ break;
+ case Token::SUB:
+ instr = AddUncasted<HSub>(left, right);
+ break;
+ case Token::MUL:
+ instr = AddUncasted<HMul>(left, right);
+ break;
+ case Token::MOD: {
+ if (fixed_right_arg.has_value) {
+ if (right->IsConstant()) {
+ HConstant* c_right = HConstant::cast(right);
+ if (c_right->HasInteger32Value()) {
+ ASSERT_EQ(fixed_right_arg.value, c_right->Integer32Value());
+ }
+ } else {
+ HConstant* fixed_right = Add<HConstant>(
+ static_cast<int>(fixed_right_arg.value));
+ IfBuilder if_same(this);
+ if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
+ if_same.Then();
+ if_same.ElseDeopt("Unexpected RHS of binary operation");
+ right = fixed_right;
+ }
+ }
+ instr = AddUncasted<HMod>(left, right);
+ break;
}
- break;
- case Token::SUB:
- instr = HSub::New(zone(), context, left, right);
- break;
- case Token::MUL:
- instr = HMul::New(zone(), context, left, right);
- break;
- case Token::MOD:
- instr = HMod::New(zone(), context, left, right, fixed_right_arg);
- break;
- case Token::DIV:
- instr = HDiv::New(zone(), context, left, right);
- break;
- case Token::BIT_XOR:
- case Token::BIT_AND:
- instr = NewUncasted<HBitwise>(expr->op(), left, right);
- break;
- case Token::BIT_OR: {
- HValue* operand, *shift_amount;
- if (left_type->Is(Type::Signed32()) &&
- right_type->Is(Type::Signed32()) &&
- MatchRotateRight(left, right, &operand, &shift_amount)) {
- instr = new(zone()) HRor(context, operand, shift_amount);
- } else {
- instr = NewUncasted<HBitwise>(expr->op(), left, right);
+ case Token::DIV:
+ instr = AddUncasted<HDiv>(left, right);
+ break;
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ instr = AddUncasted<HBitwise>(op, left, right);
+ break;
+ case Token::BIT_OR: {
+ HValue* operand, *shift_amount;
+ if (left_type->Is(Type::Signed32()) &&
+ right_type->Is(Type::Signed32()) &&
+ MatchRotateRight(left, right, &operand, &shift_amount)) {
+ instr = AddUncasted<HRor>(operand, shift_amount);
+ } else {
+ instr = AddUncasted<HBitwise>(op, left, right);
+ }
+ break;
}
- break;
+ case Token::SAR:
+ instr = AddUncasted<HSar>(left, right);
+ break;
+ case Token::SHR:
+ instr = AddUncasted<HShr>(left, right);
+ if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
+ CanBeZero(right)) {
+ graph()->RecordUint32Instruction(instr);
+ }
+ break;
+ case Token::SHL:
+ instr = AddUncasted<HShl>(left, right);
+ break;
+ default:
+ UNREACHABLE();
}
- case Token::SAR:
- instr = HSar::New(zone(), context, left, right);
- break;
- case Token::SHR:
- instr = HShr::New(zone(), context, left, right);
- if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
- CanBeZero(right)) {
- graph()->RecordUint32Instruction(instr);
- }
- break;
- case Token::SHL:
- instr = HShl::New(zone(), context, left, right);
- break;
- default:
- UNREACHABLE();
}
if (instr->IsBinaryOperation()) {
@@ -7801,6 +8916,19 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
binop->set_observed_input_representation(1, left_rep);
binop->set_observed_input_representation(2, right_rep);
binop->initialize_output_representation(result_rep);
+ if (graph()->info()->IsStub()) {
+ // Stub should not call into stub.
+ instr->SetFlag(HValue::kCannotBeTagged);
+ // And should truncate on HForceRepresentation already.
+ if (left->IsForceRepresentation()) {
+ left->CopyFlag(HValue::kTruncatingToSmi, instr);
+ left->CopyFlag(HValue::kTruncatingToInt32, instr);
+ }
+ if (right->IsForceRepresentation()) {
+ right->CopyFlag(HValue::kTruncatingToSmi, instr);
+ right->CopyFlag(HValue::kTruncatingToInt32, instr);
+ }
+ }
}
return instr;
}
@@ -7875,11 +9003,14 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
ASSERT(current_block() != NULL);
HValue* left_value = Top();
- if (left_value->IsConstant()) {
- HConstant* left_constant = HConstant::cast(left_value);
- if ((is_logical_and && left_constant->BooleanValue()) ||
- (!is_logical_and && !left_constant->BooleanValue())) {
- Drop(1); // left_value.
+ // Short-circuit left values that always evaluate to the same boolean value.
+ if (expr->left()->ToBooleanIsTrue() || expr->left()->ToBooleanIsFalse()) {
+ // l (evals true) && r -> r
+ // l (evals true) || r -> l
+ // l (evals false) && r -> l
+ // l (evals false) || r -> r
+ if (is_logical_and == expr->left()->ToBooleanIsTrue()) {
+ Drop(1);
CHECK_ALIVE(VisitForValue(expr->right()));
}
return ast_context()->ReturnValue(Pop());
@@ -7890,9 +9021,9 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
HBasicBlock* eval_right = graph()->CreateBasicBlock();
ToBooleanStub::Types expected(expr->left()->to_boolean_types());
HBranch* test = is_logical_and
- ? new(zone()) HBranch(left_value, expected, eval_right, empty_block)
- : new(zone()) HBranch(left_value, expected, empty_block, eval_right);
- current_block()->Finish(test);
+ ? New<HBranch>(left_value, expected, eval_right, empty_block)
+ : New<HBranch>(left_value, expected, empty_block, eval_right);
+ FinishCurrentBlock(test);
set_current_block(eval_right);
Drop(1); // Value of the left subexpression.
@@ -7949,11 +9080,15 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
+ SetSourcePosition(expr->position());
HValue* right = Pop();
HValue* left = Pop();
- HInstruction* instr = BuildBinaryOperation(expr, left, right);
- instr->set_position(expr->position());
- return ast_context()->ReturnInstruction(instr, expr->id());
+ HValue* result = BuildBinaryOperation(expr, left, right);
+ if (FLAG_emit_opt_code_positions && result->IsBinaryOperation()) {
+ HBinaryOperation::cast(result)->SetOperandPositions(
+ zone(), expr->left()->position(), expr->right()->position());
+ }
+ return ast_context()->ReturnValue(result);
}
@@ -7961,9 +9096,9 @@ void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
Expression* sub_expr,
Handle<String> check) {
CHECK_ALIVE(VisitForTypeOf(sub_expr));
+ SetSourcePosition(expr->position());
HValue* value = Pop();
- HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
- instr->set_position(expr->position());
+ HTypeofIsAndBranch* instr = New<HTypeofIsAndBranch>(value, check);
return ast_context()->ReturnControl(instr, expr->id());
}
@@ -7985,6 +9120,8 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+
// Check for a few fast cases. The AST visiting behavior must be in sync
// with the full codegen: We don't push both left and right values onto
// the expression stack when one side is a special-case literal.
@@ -8007,9 +9144,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HValue* value = Pop();
Literal* literal = expr->right()->AsLiteral();
Handle<String> rhs = Handle<String>::cast(literal->value());
- HClassOfTestAndBranch* instr =
- new(zone()) HClassOfTestAndBranch(value, rhs);
- instr->set_position(expr->position());
+ HClassOfTestAndBranch* instr = New<HClassOfTestAndBranch>(value, rhs);
return ast_context()->ReturnControl(instr, expr->id());
}
@@ -8023,7 +9158,8 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
- HValue* context = environment()->context();
+ if (FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+
HValue* right = Pop();
HValue* left = Pop();
Token::Value op = expr->op();
@@ -8031,7 +9167,6 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
if (IsLiteralCompareBool(isolate(), left, op, right)) {
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
- result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
}
@@ -8062,14 +9197,12 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// If the target is not null we have found a known global function that is
// assumed to stay the same for this instanceof.
if (target.is_null()) {
- HInstanceOf* result = new(zone()) HInstanceOf(context, left, right);
- result->set_position(expr->position());
+ HInstanceOf* result = New<HInstanceOf>(left, right);
return ast_context()->ReturnInstruction(result, expr->id());
} else {
Add<HCheckValue>(right, target);
HInstanceOfKnownGlobal* result =
- new(zone()) HInstanceOfKnownGlobal(context, left, target);
- result->set_position(expr->position());
+ New<HInstanceOfKnownGlobal>(left, target);
return ast_context()->ReturnInstruction(result, expr->id());
}
@@ -8081,8 +9214,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Add<HPushArgument>(right);
// TODO(olivf) InvokeFunction produces a check for the parameter count,
// even though we are certain to pass the correct number of arguments here.
- HInstruction* result = new(zone()) HInvokeFunction(context, function, 2);
- result->set_position(expr->position());
+ HInstruction* result = New<HInvokeFunction>(function, 2);
return ast_context()->ReturnInstruction(result, expr->id());
}
@@ -8106,16 +9238,18 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
AddCheckMap(right, map);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
- result->set_position(expr->position());
+ if (FLAG_emit_opt_code_positions) {
+ result->set_operand_position(zone(), 0, expr->left()->position());
+ result->set_operand_position(zone(), 1, expr->right()->position());
+ }
return ast_context()->ReturnControl(result, expr->id());
} else {
BuildCheckHeapObject(left);
- AddInstruction(HCheckInstanceType::NewIsSpecObject(left, zone()));
+ Add<HCheckInstanceType>(left, HCheckInstanceType::IS_SPEC_OBJECT);
BuildCheckHeapObject(right);
- AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone()));
+ Add<HCheckInstanceType>(right, HCheckInstanceType::IS_SPEC_OBJECT);
HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
+ New<HCompareObjectEqAndBranch>(left, right);
return ast_context()->ReturnControl(result, expr->id());
}
}
@@ -8125,26 +9259,35 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
} else if (combined_type->Is(Type::InternalizedString()) &&
Token::IsEqualityOp(op)) {
BuildCheckHeapObject(left);
- AddInstruction(HCheckInstanceType::NewIsInternalizedString(left, zone()));
+ Add<HCheckInstanceType>(left, HCheckInstanceType::IS_INTERNALIZED_STRING);
BuildCheckHeapObject(right);
- AddInstruction(HCheckInstanceType::NewIsInternalizedString(right, zone()));
+ Add<HCheckInstanceType>(right, HCheckInstanceType::IS_INTERNALIZED_STRING);
HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
+ New<HCompareObjectEqAndBranch>(left, right);
+ return ast_context()->ReturnControl(result, expr->id());
+ } else if (combined_type->Is(Type::String())) {
+ BuildCheckHeapObject(left);
+ Add<HCheckInstanceType>(left, HCheckInstanceType::IS_STRING);
+ BuildCheckHeapObject(right);
+ Add<HCheckInstanceType>(right, HCheckInstanceType::IS_STRING);
+ HStringCompareAndBranch* result =
+ New<HStringCompareAndBranch>(left, right, op);
return ast_context()->ReturnControl(result, expr->id());
} else {
if (combined_rep.IsTagged() || combined_rep.IsNone()) {
- HCompareGeneric* result =
- new(zone()) HCompareGeneric(context, left, right, op);
+ HCompareGeneric* result = New<HCompareGeneric>(left, right, op);
result->set_observed_input_representation(1, left_rep);
result->set_observed_input_representation(2, right_rep);
- result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
HCompareNumericAndBranch* result =
- new(zone()) HCompareNumericAndBranch(left, right, op);
+ New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
- result->set_position(expr->position());
+ if (FLAG_emit_opt_code_positions) {
+ result->SetOperandPositions(zone(),
+ expr->left()->position(),
+ expr->right()->position());
+ }
return ast_context()->ReturnControl(result, expr->id());
}
}
@@ -8158,6 +9301,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
+ if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
if (expr->op() == Token::EQ_STRICT) {
@@ -8166,7 +9310,6 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
: graph()->GetConstantUndefined();
HCompareObjectEqAndBranch* instr =
New<HCompareObjectEqAndBranch>(value, nil_constant);
- instr->set_position(expr->position());
return ast_context()->ReturnControl(instr, expr->id());
} else {
ASSERT_EQ(Token::EQ, expr->op());
@@ -8174,7 +9317,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
? handle(Type::Any(), isolate_)
: expr->combined_type();
HIfContinuation continuation;
- BuildCompareNil(value, type, expr->position(), &continuation);
+ BuildCompareNil(value, type, &continuation);
return ast_context()->ReturnContinuation(&continuation, expr->id());
}
}
@@ -8187,49 +9330,40 @@ HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
return New<HConstant>(
function_state()->compilation_info()->closure());
} else {
- return new(zone()) HThisFunction;
+ return New<HThisFunction>();
}
}
HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
Handle<JSObject> boilerplate_object,
- Handle<Object> allocation_site_object,
- AllocationSiteMode mode) {
+ AllocationSiteUsageContext* site_context) {
NoObservableSideEffectsScope no_effects(this);
-
- Handle<FixedArrayBase> elements(boilerplate_object->elements());
- int object_size = boilerplate_object->map()->instance_size();
- int object_offset = object_size;
-
InstanceType instance_type = boilerplate_object->map()->instance_type();
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- AllocationSite::CanTrack(instance_type);
+ ASSERT(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
- // If using allocation sites, then the payload on the site should already
- // be filled in as a valid (boilerplate) array.
- ASSERT(!create_allocation_site_info ||
- AllocationSite::cast(*allocation_site_object)->IsLiteralSite());
+ HType type = instance_type == JS_ARRAY_TYPE
+ ? HType::JSArray() : HType::JSObject();
+ HValue* object_size_constant = Add<HConstant>(
+ boilerplate_object->map()->instance_size());
- if (create_allocation_site_info) {
- object_size += AllocationMemento::kSize;
+ // We should pull pre-tenure mode from the allocation site.
+ // For now, just see what it says, and remark on it if it sez
+ // we should pretenure. That means the rudimentary counting in the garbage
+ // collector is having an effect.
+ PretenureFlag pretenure_flag = isolate()->heap()->GetPretenureMode();
+ if (FLAG_allocation_site_pretenuring) {
+ pretenure_flag = site_context->current()->GetPretenureMode()
+ ? TENURED
+ : NOT_TENURED;
}
- ASSERT(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
- HType type = instance_type == JS_ARRAY_TYPE
- ? HType::JSArray() : HType::JSObject();
- HValue* object_size_constant = Add<HConstant>(object_size);
HInstruction* object = Add<HAllocate>(object_size_constant, type,
- isolate()->heap()->GetPretenureMode(), instance_type);
-
+ pretenure_flag, instance_type, site_context->current());
BuildEmitObjectHeader(boilerplate_object, object);
- if (create_allocation_site_info) {
- HInstruction* allocation_site = Add<HConstant>(allocation_site_object);
- BuildCreateAllocationMemento(object, object_offset, allocation_site);
- }
-
+ Handle<FixedArrayBase> elements(boilerplate_object->elements());
int elements_size = (elements->length() > 0 &&
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
@@ -8239,23 +9373,24 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
HValue* object_elements_size = Add<HConstant>(elements_size);
if (boilerplate_object->HasFastDoubleElements()) {
object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
- isolate()->heap()->GetPretenureMode(), FIXED_DOUBLE_ARRAY_TYPE);
+ pretenure_flag, FIXED_DOUBLE_ARRAY_TYPE, site_context->current());
} else {
object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
- isolate()->heap()->GetPretenureMode(), FIXED_ARRAY_TYPE);
+ pretenure_flag, FIXED_ARRAY_TYPE, site_context->current());
}
}
BuildInitElementsInObjectHeader(boilerplate_object, object, object_elements);
-
// Copy object elements if non-COW.
if (object_elements != NULL) {
- BuildEmitElements(boilerplate_object, elements, object_elements);
+ BuildEmitElements(boilerplate_object, elements, object_elements,
+ site_context);
}
// Copy in-object properties.
if (boilerplate_object->map()->NumberOfFields() != 0) {
- BuildEmitInObjectProperties(boilerplate_object, object);
+ BuildEmitInObjectProperties(boilerplate_object, object, site_context,
+ pretenure_flag);
}
return object;
}
@@ -8307,7 +9442,9 @@ void HOptimizedGraphBuilder::BuildInitElementsInObjectHeader(
void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Handle<JSObject> boilerplate_object,
- HInstruction* object) {
+ HInstruction* object,
+ AllocationSiteUsageContext* site_context,
+ PretenureFlag pretenure_flag) {
Handle<DescriptorArray> descriptors(
boilerplate_object->map()->instance_descriptors());
int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
@@ -8331,25 +9468,38 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ Handle<AllocationSite> current_site = site_context->EnterNewScope();
HInstruction* result =
- BuildFastLiteral(value_object,
- Handle<Object>::null(), DONT_TRACK_ALLOCATION_SITE);
+ BuildFastLiteral(value_object, site_context);
+ site_context->ExitScope(current_site, value_object);
Add<HStoreNamedField>(object, access, result);
} else {
Representation representation = details.representation();
- HInstruction* value_instruction = Add<HConstant>(value);
+ HInstruction* value_instruction;
if (representation.IsDouble()) {
// Allocate a HeapNumber box and store the value into it.
HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
+ // This heap number alloc does not have a corresponding
+ // AllocationSite. That is okay because
+ // 1) it's a child object of another object with a valid allocation site
+ // 2) we can just use the mode of the parent object for pretenuring
HInstruction* double_box =
Add<HAllocate>(heap_number_constant, HType::HeapNumber(),
- isolate()->heap()->GetPretenureMode(), HEAP_NUMBER_TYPE);
+ pretenure_flag, HEAP_NUMBER_TYPE);
AddStoreMapConstant(double_box,
isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(),
- value_instruction);
+ Add<HConstant>(value));
value_instruction = double_box;
+ } else if (representation.IsSmi()) {
+ value_instruction = value->IsUninitialized()
+ ? graph()->GetConstant0()
+ : Add<HConstant>(value);
+ // Ensure that value is stored as smi.
+ access = access.WithRepresentation(representation);
+ } else {
+ value_instruction = Add<HConstant>(value);
}
Add<HStoreNamedField>(object, access, value_instruction);
@@ -8371,7 +9521,8 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
void HOptimizedGraphBuilder::BuildEmitElements(
Handle<JSObject> boilerplate_object,
Handle<FixedArrayBase> elements,
- HValue* object_elements) {
+ HValue* object_elements,
+ AllocationSiteUsageContext* site_context) {
ElementsKind kind = boilerplate_object->map()->elements_kind();
int elements_length = elements->length();
HValue* object_elements_length = Add<HConstant>(elements_length);
@@ -8381,7 +9532,8 @@ void HOptimizedGraphBuilder::BuildEmitElements(
if (elements->IsFixedDoubleArray()) {
BuildEmitFixedDoubleArray(elements, kind, object_elements);
} else if (elements->IsFixedArray()) {
- BuildEmitFixedArray(elements, kind, object_elements);
+ BuildEmitFixedArray(elements, kind, object_elements,
+ site_context);
} else {
UNREACHABLE();
}
@@ -8410,7 +9562,8 @@ void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
void HOptimizedGraphBuilder::BuildEmitFixedArray(
Handle<FixedArrayBase> elements,
ElementsKind kind,
- HValue* object_elements) {
+ HValue* object_elements,
+ AllocationSiteUsageContext* site_context) {
HInstruction* boilerplate_elements = Add<HConstant>(elements);
int elements_length = elements->length();
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
@@ -8419,9 +9572,10 @@ void HOptimizedGraphBuilder::BuildEmitFixedArray(
HValue* key_constant = Add<HConstant>(i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ Handle<AllocationSite> current_site = site_context->EnterNewScope();
HInstruction* result =
- BuildFastLiteral(value_object,
- Handle<Object>::null(), DONT_TRACK_ALLOCATION_SITE);
+ BuildFastLiteral(value_object, site_context);
+ site_context->ExitScope(current_site, value_object);
Add<HStoreKeyed>(object_elements, key_constant, result, kind);
} else {
HInstruction* value_instruction =
@@ -8584,7 +9738,7 @@ void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsSmiAndBranch* result = new(zone()) HIsSmiAndBranch(value);
+ HIsSmiAndBranch* result = New<HIsSmiAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8594,9 +9748,9 @@ void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
+ New<HHasInstanceTypeAndBranch>(value,
+ FIRST_SPEC_OBJECT_TYPE,
+ LAST_SPEC_OBJECT_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8606,7 +9760,16 @@ void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
+ New<HHasInstanceTypeAndBranch>(value, JS_FUNCTION_TYPE);
+ return ast_context()->ReturnControl(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateIsMinusZero(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HCompareMinusZeroAndBranch* result = New<HCompareMinusZeroAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8616,7 +9779,7 @@ void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasCachedArrayIndexAndBranch* result =
- new(zone()) HHasCachedArrayIndexAndBranch(value);
+ New<HHasCachedArrayIndexAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8626,7 +9789,7 @@ void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_ARRAY_TYPE);
+ New<HHasInstanceTypeAndBranch>(value, JS_ARRAY_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8636,7 +9799,7 @@ void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
- new(zone()) HHasInstanceTypeAndBranch(value, JS_REGEXP_TYPE);
+ New<HHasInstanceTypeAndBranch>(value, JS_REGEXP_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8645,7 +9808,7 @@ void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsObjectAndBranch* result = new(zone()) HIsObjectAndBranch(value);
+ HIsObjectAndBranch* result = New<HIsObjectAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8659,8 +9822,7 @@ void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsUndetectableAndBranch* result =
- new(zone()) HIsUndetectableAndBranch(value);
+ HIsUndetectableAndBranch* result = New<HIsUndetectableAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8681,7 +9843,7 @@ void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
: graph()->GetConstantFalse();
return ast_context()->ReturnValue(value);
} else {
- return ast_context()->ReturnControl(new(zone()) HIsConstructCallAndBranch,
+ return ast_context()->ReturnControl(New<HIsConstructCallAndBranch>(),
call->id());
}
}
@@ -8711,8 +9873,8 @@ void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
HInstruction* checked_index = Add<HBoundsCheck>(index, length);
- HAccessArgumentsAt* result =
- new(zone()) HAccessArgumentsAt(elements, length, checked_index);
+ HAccessArgumentsAt* result = New<HAccessArgumentsAt>(
+ elements, length, checked_index);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8729,7 +9891,7 @@ void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HValueOf* result = new(zone()) HValueOf(value);
+ HValueOf* result = New<HValueOf>(value);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8740,7 +9902,7 @@ void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value()));
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* date = Pop();
- HDateField* result = new(zone()) HDateField(date, index);
+ HDateField* result = New<HDateField>(date, index);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8754,9 +9916,10 @@ void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
HValue* value = Pop();
HValue* index = Pop();
HValue* string = Pop();
- HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
- String::ONE_BYTE_ENCODING, string, index, value);
- return ast_context()->ReturnInstruction(result, call->id());
+ Add<HSeqStringSetChar>(String::ONE_BYTE_ENCODING, string,
+ index, value);
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
@@ -8769,9 +9932,10 @@ void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
HValue* value = Pop();
HValue* index = Pop();
HValue* string = Pop();
- HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
- String::TWO_BYTE_ENCODING, string, index, value);
- return ast_context()->ReturnInstruction(result, call->id());
+ Add<HSeqStringSetChar>(String::TWO_BYTE_ENCODING, string,
+ index, value);
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
@@ -8782,31 +9946,28 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
HValue* value = Pop();
HValue* object = Pop();
// Check if object is a not a smi.
- HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(object);
HBasicBlock* if_smi = graph()->CreateBasicBlock();
HBasicBlock* if_heap_object = graph()->CreateBasicBlock();
HBasicBlock* join = graph()->CreateBasicBlock();
- smicheck->SetSuccessorAt(0, if_smi);
- smicheck->SetSuccessorAt(1, if_heap_object);
- current_block()->Finish(smicheck);
- if_smi->Goto(join);
+ FinishCurrentBlock(New<HIsSmiAndBranch>(object, if_smi, if_heap_object));
+ Goto(if_smi, join);
// Check if object is a JSValue.
set_current_block(if_heap_object);
HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(object, JS_VALUE_TYPE);
+ New<HHasInstanceTypeAndBranch>(object, JS_VALUE_TYPE);
HBasicBlock* if_js_value = graph()->CreateBasicBlock();
HBasicBlock* not_js_value = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_js_value);
typecheck->SetSuccessorAt(1, not_js_value);
- current_block()->Finish(typecheck);
- not_js_value->Goto(join);
+ FinishCurrentBlock(typecheck);
+ Goto(not_js_value, join);
// Create in-object property store to kValueOffset.
set_current_block(if_js_value);
Add<HStoreNamedField>(object,
HObjectAccess::ForJSObjectOffset(JSValue::kValueOffset), value);
- if_js_value->Goto(join);
+ Goto(if_js_value, join);
join->SetJoinId(call->id());
set_current_block(join);
return ast_context()->ReturnValue(value);
@@ -8830,7 +9991,7 @@ void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* char_code = Pop();
- HInstruction* result = New<HStringCharFromCode>(char_code);
+ HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8844,7 +10005,7 @@ void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
HValue* string = Pop();
HInstruction* char_code = BuildStringCharCodeAt(string, index);
AddInstruction(char_code);
- HInstruction* result = New<HStringCharFromCode>(char_code);
+ HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8868,14 +10029,6 @@ void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
}
-// Fast support for Math.random().
-void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
- HGlobalObject* global_object = Add<HGlobalObject>();
- HRandom* result = new(zone()) HRandom(global_object);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
// Fast support for StringAdd.
void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
@@ -8883,9 +10036,8 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HValue* context = environment()->context();
- HInstruction* result = HStringAdd::New(
- zone(), context, left, right, STRING_ADD_CHECK_BOTH);
+ HInstruction* result =
+ NewUncasted<HStringAdd>(left, right, STRING_ADD_CHECK_BOTH);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8894,8 +10046,7 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result = new(zone()) HCallStub(context, CodeStub::SubString, 3);
+ HCallStub* result = New<HCallStub>(CodeStub::SubString, 3);
Drop(3);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8905,9 +10056,7 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::StringCompare, 2);
+ HCallStub* result = New<HCallStub>(CodeStub::StringCompare, 2);
Drop(2);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8917,8 +10066,7 @@ void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
ASSERT_EQ(4, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result = new(zone()) HCallStub(context, CodeStub::RegExpExec, 4);
+ HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4);
Drop(4);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8928,9 +10076,7 @@ void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::RegExpConstructResult, 3);
+ HCallStub* result = New<HCallStub>(CodeStub::RegExpConstructResult, 3);
Drop(3);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -8945,12 +10091,11 @@ void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
// Fast support for number to string.
void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::NumberToString, 1);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* number = Pop();
+ HValue* result = BuildNumberToString(
+ number, handle(Type::Number(), isolate()));
+ return ast_context()->ReturnValue(result);
}
@@ -8969,25 +10114,25 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
// Branch for function proxies, or other non-functions.
HHasInstanceTypeAndBranch* typecheck =
- new(zone()) HHasInstanceTypeAndBranch(function, JS_FUNCTION_TYPE);
+ New<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE);
HBasicBlock* if_jsfunction = graph()->CreateBasicBlock();
HBasicBlock* if_nonfunction = graph()->CreateBasicBlock();
HBasicBlock* join = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_jsfunction);
typecheck->SetSuccessorAt(1, if_nonfunction);
- current_block()->Finish(typecheck);
+ FinishCurrentBlock(typecheck);
set_current_block(if_jsfunction);
HInstruction* invoke_result = Add<HInvokeFunction>(function, arg_count);
Drop(arg_count);
Push(invoke_result);
- if_jsfunction->Goto(join);
+ Goto(if_jsfunction, join);
set_current_block(if_nonfunction);
HInstruction* call_result = Add<HCallFunction>(function, arg_count);
Drop(arg_count);
Push(call_result);
- if_nonfunction->Goto(join);
+ Goto(if_nonfunction, join);
set_current_block(join);
join->SetJoinId(call->id());
@@ -9002,43 +10147,7 @@ void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HInstruction* result = HPower::New(zone(), context(), left, right);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::SIN);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::COS);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
- ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
- result->set_transcendental_type(TranscendentalCache::TAN);
- Drop(1);
+ HInstruction* result = NewUncasted<HPower>(left, right);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -9046,9 +10155,7 @@ void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
+ HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::LOG);
Drop(1);
return ast_context()->ReturnInstruction(result, call->id());
@@ -9059,9 +10166,7 @@ void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HValue* context = environment()->context();
- HInstruction* result =
- HUnaryMathOperation::New(zone(), context, value, kMathSqrt);
+ HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathSqrt);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -9076,7 +10181,7 @@ void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HGetCachedArrayIndex* result = new(zone()) HGetCachedArrayIndex(value);
+ HGetCachedArrayIndex* result = New<HGetCachedArrayIndex>(value);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -9099,7 +10204,7 @@ void HOptimizedGraphBuilder::GenerateGeneratorThrow(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
CallRuntime* call) {
- AddInstruction(new(zone()) HDebugBreak());
+ Add<HDebugBreak>();
return ast_context()->ReturnValue(graph()->GetConstant0());
}
@@ -9422,7 +10527,7 @@ void HTracer::TraceCompilation(CompilationInfo* info) {
void HTracer::TraceLithium(const char* name, LChunk* chunk) {
- ASSERT(!FLAG_concurrent_recompilation);
+ ASSERT(!chunk->isolate()->concurrent_recompilation_enabled());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, chunk->graph(), chunk);
@@ -9430,7 +10535,7 @@ void HTracer::TraceLithium(const char* name, LChunk* chunk) {
void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
- ASSERT(!FLAG_concurrent_recompilation);
+ ASSERT(!graph->isolate()->concurrent_recompilation_enabled());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, graph, NULL);
@@ -9514,7 +10619,8 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
Tag HIR_tag(this, "HIR");
for (HInstructionIterator it(current); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
- int bci = 0;
+ int bci = FLAG_emit_opt_code_positions && instruction->has_position() ?
+ instruction->position() : 0;
int uses = instruction->UseCount();
PrintIndent();
trace_.Add("%d %d ", bci, uses);
@@ -9539,6 +10645,9 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
trace_.Add("%d ",
LifetimePosition::FromInstructionIndex(i).Value());
linstr->PrintTo(&trace_);
+ trace_.Add(" [hir:");
+ linstr->hydrogen_value()->PrintNameTo(&trace_);
+ trace_.Add("]");
trace_.Add(" <|@\n");
}
}
diff --git a/chromium/v8/src/hydrogen.h b/chromium/v8/src/hydrogen.h
index 0ecbbca1bf6..61e98b2b0ce 100644
--- a/chromium/v8/src/hydrogen.h
+++ b/chromium/v8/src/hydrogen.h
@@ -30,6 +30,7 @@
#include "v8.h"
+#include "accessors.h"
#include "allocation.h"
#include "ast.h"
#include "compiler.h"
@@ -109,7 +110,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
bool IsFinished() const { return end_ != NULL; }
void AddPhi(HPhi* phi);
void RemovePhi(HPhi* phi);
- void AddInstruction(HInstruction* instr);
+ void AddInstruction(HInstruction* instr, int position);
bool Dominates(HBasicBlock* other) const;
int LoopNestingDepth() const;
@@ -132,30 +133,18 @@ class HBasicBlock V8_FINAL : public ZoneObject {
void SetJoinId(BailoutId ast_id);
- void Finish(HControlInstruction* last);
- void FinishExit(HControlInstruction* instruction);
- void Goto(HBasicBlock* block,
- FunctionState* state = NULL,
- bool add_simulate = true);
- void GotoNoSimulate(HBasicBlock* block) {
- Goto(block, NULL, false);
- }
-
int PredecessorIndexOf(HBasicBlock* predecessor) const;
HPhi* AddNewPhi(int merged_index);
HSimulate* AddNewSimulate(BailoutId ast_id,
+ int position,
RemovableSimulate removable = FIXED_SIMULATE) {
HSimulate* instr = CreateSimulate(ast_id, removable);
- AddInstruction(instr);
+ AddInstruction(instr, position);
return instr;
}
void AssignCommonDominator(HBasicBlock* other);
void AssignLoopSuccessorDominators();
- // Add the inlined function exit sequence, adding an HLeaveInlined
- // instruction and updating the bailout environment.
- void AddLeaveInlined(HValue* return_value, FunctionState* state);
-
// If a target block is tagged as an inline function return, all
// predecessors should contain the inlined exit sequence:
//
@@ -169,8 +158,13 @@ class HBasicBlock V8_FINAL : public ZoneObject {
}
HBasicBlock* inlined_entry_block() { return inlined_entry_block_; }
- bool IsDeoptimizing() const { return is_deoptimizing_; }
- void MarkAsDeoptimizing() { is_deoptimizing_ = true; }
+ bool IsDeoptimizing() const {
+ return end() != NULL && end()->IsDeoptimize();
+ }
+
+ void MarkUnreachable();
+ bool IsUnreachable() const { return !is_reachable_; }
+ bool IsReachable() const { return is_reachable_; }
bool IsLoopSuccessorDominator() const {
return dominates_loop_successors_;
@@ -185,14 +179,30 @@ class HBasicBlock V8_FINAL : public ZoneObject {
void Verify();
#endif
- private:
+ protected:
friend class HGraphBuilder;
+ HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
+ void Finish(HControlInstruction* last, int position);
+ void FinishExit(HControlInstruction* instruction, int position);
+ void Goto(HBasicBlock* block,
+ int position,
+ FunctionState* state = NULL,
+ bool add_simulate = true);
+ void GotoNoSimulate(HBasicBlock* block, int position) {
+ Goto(block, position, NULL, false);
+ }
+
+ // Add the inlined function exit sequence, adding an HLeaveInlined
+ // instruction and updating the bailout environment.
+ void AddLeaveInlined(HValue* return_value,
+ FunctionState* state,
+ int position);
+
+ private:
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
- HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
-
int block_id_;
HGraph* graph_;
ZoneList<HPhi*> phis_;
@@ -214,7 +224,7 @@ class HBasicBlock V8_FINAL : public ZoneObject {
// For blocks marked as inline return target: the block with HEnterInlined.
HBasicBlock* inlined_entry_block_;
bool is_inline_return_target_ : 1;
- bool is_deoptimizing_ : 1;
+ bool is_reachable_ : 1;
bool dominates_loop_successors_ : 1;
bool is_osr_entry_ : 1;
};
@@ -316,7 +326,7 @@ class HGraph V8_FINAL : public ZoneObject {
HBasicBlock* entry_block() const { return entry_block_; }
HEnvironment* start_environment() const { return start_environment_; }
- void FinalizeUniqueValueIds();
+ void FinalizeUniqueness();
bool ProcessArgumentsObject();
void OrderBlocks();
void AssignDominators();
@@ -332,10 +342,7 @@ class HGraph V8_FINAL : public ZoneObject {
void CollectPhis();
- void set_undefined_constant(HConstant* constant) {
- constant_undefined_.set(constant);
- }
- HConstant* GetConstantUndefined() const { return constant_undefined_.get(); }
+ HConstant* GetConstantUndefined();
HConstant* GetConstant0();
HConstant* GetConstant1();
HConstant* GetConstantMinus1();
@@ -417,14 +424,6 @@ class HGraph V8_FINAL : public ZoneObject {
use_optimistic_licm_ = value;
}
- bool has_soft_deoptimize() {
- return has_soft_deoptimize_;
- }
-
- void set_has_soft_deoptimize(bool value) {
- has_soft_deoptimize_ = value;
- }
-
void MarkRecursive() {
is_recursive_ = true;
}
@@ -470,6 +469,7 @@ class HGraph V8_FINAL : public ZoneObject {
bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
private:
+ HConstant* ReinsertConstantIfNecessary(HConstant* constant);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
int32_t integer_value);
@@ -507,7 +507,6 @@ class HGraph V8_FINAL : public ZoneObject {
bool is_recursive_;
bool use_optimistic_licm_;
- bool has_soft_deoptimize_;
bool depends_on_empty_array_proto_elements_;
int type_change_checksum_;
int maximum_environment_size_;
@@ -954,26 +953,29 @@ class FunctionState V8_FINAL {
class HIfContinuation V8_FINAL {
public:
- HIfContinuation() { continuation_captured_ = false; }
+ HIfContinuation()
+ : continuation_captured_(false),
+ true_branch_(NULL),
+ false_branch_(NULL) {}
+ HIfContinuation(HBasicBlock* true_branch,
+ HBasicBlock* false_branch)
+ : continuation_captured_(true), true_branch_(true_branch),
+ false_branch_(false_branch) {}
~HIfContinuation() { ASSERT(!continuation_captured_); }
void Capture(HBasicBlock* true_branch,
- HBasicBlock* false_branch,
- int position) {
+ HBasicBlock* false_branch) {
ASSERT(!continuation_captured_);
true_branch_ = true_branch;
false_branch_ = false_branch;
- position_ = position;
continuation_captured_ = true;
}
void Continue(HBasicBlock** true_branch,
- HBasicBlock** false_branch,
- int* position) {
+ HBasicBlock** false_branch) {
ASSERT(continuation_captured_);
*true_branch = true_branch_;
*false_branch = false_branch_;
- if (position != NULL) *position = position_;
continuation_captured_ = false;
}
@@ -983,10 +985,13 @@ class HIfContinuation V8_FINAL {
return IsTrueReachable() || IsFalseReachable();
}
+ HBasicBlock* true_branch() const { return true_branch_; }
+ HBasicBlock* false_branch() const { return false_branch_; }
+
+ private:
bool continuation_captured_;
HBasicBlock* true_branch_;
HBasicBlock* false_branch_;
- int position_;
};
@@ -995,7 +1000,8 @@ class HGraphBuilder {
explicit HGraphBuilder(CompilationInfo* info)
: info_(info),
graph_(NULL),
- current_block_(NULL) {}
+ current_block_(NULL),
+ position_(RelocInfo::kNoPosition) {}
virtual ~HGraphBuilder() {}
HBasicBlock* current_block() const { return current_block_; }
@@ -1018,18 +1024,46 @@ class HGraphBuilder {
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
+ void FinishCurrentBlock(HControlInstruction* last);
+ void FinishExitCurrentBlock(HControlInstruction* instruction);
+
+ void Goto(HBasicBlock* from,
+ HBasicBlock* target,
+ FunctionState* state = NULL,
+ bool add_simulate = true) {
+ from->Goto(target, position_, state, add_simulate);
+ }
+ void Goto(HBasicBlock* target,
+ FunctionState* state = NULL,
+ bool add_simulate = true) {
+ Goto(current_block(), target, state, add_simulate);
+ }
+ void GotoNoSimulate(HBasicBlock* from, HBasicBlock* target) {
+ Goto(from, target, NULL, false);
+ }
+ void GotoNoSimulate(HBasicBlock* target) {
+ Goto(target, NULL, false);
+ }
+ void AddLeaveInlined(HBasicBlock* block,
+ HValue* return_value,
+ FunctionState* state) {
+ block->AddLeaveInlined(return_value, state, position_);
+ }
+ void AddLeaveInlined(HValue* return_value, FunctionState* state) {
+ return AddLeaveInlined(current_block(), return_value, state);
+ }
template<class I>
HInstruction* NewUncasted() { return I::New(zone(), context()); }
template<class I>
- I* New() { return I::cast(NewUncasted<I>()); }
+ I* New() { return I::New(zone(), context()); }
template<class I>
HInstruction* AddUncasted() { return AddInstruction(NewUncasted<I>());}
template<class I>
- I* Add() { return I::cast(AddUncasted<I>());}
+ I* Add() { return AddInstructionTyped(New<I>());}
template<class I, class P1>
HInstruction* NewUncasted(P1 p1) {
@@ -1037,7 +1071,7 @@ class HGraphBuilder {
}
template<class I, class P1>
- I* New(P1 p1) { return I::cast(NewUncasted<I>(p1)); }
+ I* New(P1 p1) { return I::New(zone(), context(), p1); }
template<class I, class P1>
HInstruction* AddUncasted(P1 p1) {
@@ -1051,7 +1085,12 @@ class HGraphBuilder {
template<class I, class P1>
I* Add(P1 p1) {
- return I::cast(AddUncasted<I>(p1));
+ I* result = AddInstructionTyped(New<I>(p1));
+ // Specializations must have their parameters properly casted
+ // to avoid landing here.
+ ASSERT(!result->IsReturn() && !result->IsSimulate() &&
+ !result->IsDeoptimize());
+ return result;
}
template<class I, class P1, class P2>
@@ -1061,7 +1100,7 @@ class HGraphBuilder {
template<class I, class P1, class P2>
I* New(P1 p1, P2 p2) {
- return I::cast(NewUncasted<I>(p1, p2));
+ return I::New(zone(), context(), p1, p2);
}
template<class I, class P1, class P2>
@@ -1075,7 +1114,11 @@ class HGraphBuilder {
template<class I, class P1, class P2>
I* Add(P1 p1, P2 p2) {
- return I::cast(AddUncasted<I>(p1, p2));
+ I* result = AddInstructionTyped(New<I>(p1, p2));
+ // Specializations must have their parameters properly casted
+ // to avoid landing here.
+ ASSERT(!result->IsSimulate());
+ return result;
}
template<class I, class P1, class P2, class P3>
@@ -1085,7 +1128,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3>
I* New(P1 p1, P2 p2, P3 p3) {
- return I::cast(NewUncasted<I>(p1, p2, p3));
+ return I::New(zone(), context(), p1, p2, p3);
}
template<class I, class P1, class P2, class P3>
@@ -1095,7 +1138,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3>
I* Add(P1 p1, P2 p2, P3 p3) {
- return I::cast(AddUncasted<I>(p1, p2, p3));
+ return AddInstructionTyped(New<I>(p1, p2, p3));
}
template<class I, class P1, class P2, class P3, class P4>
@@ -1105,7 +1148,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4>
I* New(P1 p1, P2 p2, P3 p3, P4 p4) {
- return I::cast(NewUncasted<I>(p1, p2, p3, p4));
+ return I::New(zone(), context(), p1, p2, p3, p4);
}
template<class I, class P1, class P2, class P3, class P4>
@@ -1115,7 +1158,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4>
I* Add(P1 p1, P2 p2, P3 p3, P4 p4) {
- return I::cast(AddUncasted<I>(p1, p2, p3, p4));
+ return AddInstructionTyped(New<I>(p1, p2, p3, p4));
}
template<class I, class P1, class P2, class P3, class P4, class P5>
@@ -1125,7 +1168,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4, class P5>
I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- return I::cast(NewUncasted<I>(p1, p2, p3, p4, p5));
+ return I::New(zone(), context(), p1, p2, p3, p4, p5);
}
template<class I, class P1, class P2, class P3, class P4, class P5>
@@ -1135,7 +1178,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4, class P5>
I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- return I::cast(AddUncasted<I>(p1, p2, p3, p4, p5));
+ return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5));
}
template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
@@ -1145,7 +1188,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
- return I::cast(NewUncasted<I>(p1, p2, p3, p4, p5, p6));
+ return I::New(zone(), context(), p1, p2, p3, p4, p5, p6);
}
template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
@@ -1155,7 +1198,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
- return I::cast(AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6)));
+ return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6));
}
template<class I, class P1, class P2, class P3, class P4,
@@ -1167,7 +1210,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4,
class P5, class P6, class P7>
I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
- return I::cast(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7));
+ return I::New(zone(), context(), p1, p2, p3, p4, p5, p6, p7);
}
template<class I, class P1, class P2, class P3,
@@ -1179,8 +1222,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3,
class P4, class P5, class P6, class P7>
I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
- return I::cast(AddInstruction(NewUncasted<I>(p1, p2, p3, p4,
- p5, p6, p7)));
+ return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7));
}
template<class I, class P1, class P2, class P3, class P4,
@@ -1193,7 +1235,7 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4,
class P5, class P6, class P7, class P8>
I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
- return I::cast(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8));
+ return I::New(zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8);
}
template<class I, class P1, class P2, class P3, class P4,
@@ -1206,12 +1248,13 @@ class HGraphBuilder {
template<class I, class P1, class P2, class P3, class P4,
class P5, class P6, class P7, class P8>
I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
- return I::cast(
- AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8)));
+ return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7, p8));
}
void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
+ int position() const { return position_; }
+
protected:
virtual bool BuildGraph() = 0;
@@ -1220,6 +1263,7 @@ class HGraphBuilder {
HValue* BuildCheckHeapObject(HValue* object);
HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
+ HValue* BuildCheckString(HValue* string);
HValue* BuildWrapReceiver(HValue* object, HValue* function);
// Building common constructs
@@ -1241,6 +1285,31 @@ class HGraphBuilder {
ElementsKind to_kind,
bool is_jsarray);
+ HValue* BuildNumberToString(HValue* object, Handle<Type> type);
+
+ HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
+ HValue* key);
+
+ // Computes the size for a sequential string of the given length and encoding.
+ HValue* BuildSeqStringSizeFor(HValue* length,
+ String::Encoding encoding);
+ // Copies characters from one sequential string to another.
+ void BuildCopySeqStringChars(HValue* src,
+ HValue* src_offset,
+ String::Encoding src_encoding,
+ HValue* dst,
+ HValue* dst_offset,
+ String::Encoding dst_encoding,
+ HValue* length);
+ // Both operands are non-empty strings.
+ HValue* BuildUncheckedStringAdd(HValue* left,
+ HValue* right,
+ PretenureFlag pretenure_flag);
+ // Both operands are strings.
+ HValue* BuildStringAdd(HValue* left,
+ HValue* right,
+ PretenureFlag pretenure_flag);
+
HInstruction* BuildUncheckedMonomorphicElementAccess(
HValue* checked_object,
HValue* key,
@@ -1251,46 +1320,55 @@ class HGraphBuilder {
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode);
- HInstruction* AddExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
-
- HInstruction* AddFastElementAccess(
+ HInstruction* AddElementAccess(
HValue* elements,
HValue* checked_key,
HValue* val,
HValue* dependency,
ElementsKind elements_kind,
bool is_store,
- LoadKeyedHoleMode load_mode,
- KeyedAccessStoreMode store_mode);
+ LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE);
HLoadNamedField* BuildLoadNamedField(HValue* object, HObjectAccess access);
+ HInstruction* AddLoadNamedField(HValue* object, HObjectAccess access);
HInstruction* BuildLoadStringLength(HValue* object, HValue* checked_value);
- HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map>);
+ HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map> map);
+ HStoreNamedField* AddStoreMapConstantNoWriteBarrier(HValue* object,
+ Handle<Map> map) {
+ HStoreNamedField* store_map = AddStoreMapConstant(object, map);
+ store_map->SkipWriteBarrier();
+ return store_map;
+ }
HLoadNamedField* AddLoadElements(HValue* object);
+
+ bool MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount);
+
+ HValue* BuildBinaryOperation(Token::Value op,
+ HValue* left,
+ HValue* right,
+ Handle<Type> left_type,
+ Handle<Type> right_type,
+ Handle<Type> result_type,
+ Maybe<int> fixed_right_arg);
+
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin);
+ HValue* EnforceNumberType(HValue* number, Handle<Type> expected);
HValue* TruncateToNumber(HValue* value, Handle<Type>* expected);
- void PushAndAdd(HInstruction* instr);
-
void FinishExitWithHardDeoptimization(const char* reason,
HBasicBlock* continuation);
- void AddIncrementCounter(StatsCounter* counter,
- HValue* context);
+ void AddIncrementCounter(StatsCounter* counter);
class IfBuilder V8_FINAL {
public:
- explicit IfBuilder(HGraphBuilder* builder,
- int position = RelocInfo::kNoPosition);
+ explicit IfBuilder(HGraphBuilder* builder);
IfBuilder(HGraphBuilder* builder,
HIfContinuation* continuation);
@@ -1299,80 +1377,79 @@ class HGraphBuilder {
}
template<class Condition>
- HInstruction* If(HValue *p) {
- HControlInstruction* compare = new(zone()) Condition(p);
+ Condition* If(HValue *p) {
+ Condition* compare = builder()->New<Condition>(p);
AddCompare(compare);
return compare;
}
template<class Condition, class P2>
- HInstruction* If(HValue* p1, P2 p2) {
- HControlInstruction* compare = new(zone()) Condition(p1, p2);
+ Condition* If(HValue* p1, P2 p2) {
+ Condition* compare = builder()->New<Condition>(p1, p2);
AddCompare(compare);
return compare;
}
template<class Condition, class P2, class P3>
- HInstruction* If(HValue* p1, P2 p2, P3 p3) {
- HControlInstruction* compare = new(zone()) Condition(p1, p2, p3);
+ Condition* If(HValue* p1, P2 p2, P3 p3) {
+ Condition* compare = builder()->New<Condition>(p1, p2, p3);
AddCompare(compare);
return compare;
}
+ template<class Condition>
+ Condition* IfNot(HValue* p) {
+ Condition* compare = If<Condition>(p);
+ compare->Not();
+ return compare;
+ }
+
template<class Condition, class P2>
- HInstruction* IfNot(HValue* p1, P2 p2) {
- HControlInstruction* compare = new(zone()) Condition(p1, p2);
- AddCompare(compare);
- HBasicBlock* block0 = compare->SuccessorAt(0);
- HBasicBlock* block1 = compare->SuccessorAt(1);
- compare->SetSuccessorAt(0, block1);
- compare->SetSuccessorAt(1, block0);
+ Condition* IfNot(HValue* p1, P2 p2) {
+ Condition* compare = If<Condition>(p1, p2);
+ compare->Not();
return compare;
}
template<class Condition, class P2, class P3>
- HInstruction* IfNot(HValue* p1, P2 p2, P3 p3) {
- HControlInstruction* compare = new(zone()) Condition(p1, p2, p3);
- AddCompare(compare);
- HBasicBlock* block0 = compare->SuccessorAt(0);
- HBasicBlock* block1 = compare->SuccessorAt(1);
- compare->SetSuccessorAt(0, block1);
- compare->SetSuccessorAt(1, block0);
+ Condition* IfNot(HValue* p1, P2 p2, P3 p3) {
+ Condition* compare = If<Condition>(p1, p2, p3);
+ compare->Not();
return compare;
}
template<class Condition>
- HInstruction* OrIf(HValue *p) {
+ Condition* OrIf(HValue *p) {
Or();
return If<Condition>(p);
}
template<class Condition, class P2>
- HInstruction* OrIf(HValue* p1, P2 p2) {
+ Condition* OrIf(HValue* p1, P2 p2) {
Or();
return If<Condition>(p1, p2);
}
template<class Condition, class P2, class P3>
- HInstruction* OrIf(HValue* p1, P2 p2, P3 p3) {
+ Condition* OrIf(HValue* p1, P2 p2, P3 p3) {
Or();
return If<Condition>(p1, p2, p3);
}
template<class Condition>
- HInstruction* AndIf(HValue *p) {
+ Condition* AndIf(HValue *p) {
And();
return If<Condition>(p);
}
template<class Condition, class P2>
- HInstruction* AndIf(HValue* p1, P2 p2) {
+ Condition* AndIf(HValue* p1, P2 p2) {
And();
return If<Condition>(p1, p2);
}
template<class Condition, class P2, class P3>
- HInstruction* AndIf(HValue* p1, P2 p2, P3 p3) {
+ Condition* AndIf(HValue* p1, P2 p2, P3 p3) {
And();
return If<Condition>(p1, p2, p3);
}
@@ -1380,13 +1457,59 @@ class HGraphBuilder {
void Or();
void And();
+ // Captures the current state of this IfBuilder in the specified
+ // continuation and ends this IfBuilder.
void CaptureContinuation(HIfContinuation* continuation);
+ // Joins the specified continuation from this IfBuilder and ends this
+ // IfBuilder. This appends a Goto instruction from the true branch of
+ // this IfBuilder to the true branch of the continuation unless the
+ // true branch of this IfBuilder is already finished. And vice versa
+ // for the false branch.
+ //
+ // The basic idea is as follows: You have several nested IfBuilder's
+ // that you want to join based on two possible outcomes (i.e. success
+ // and failure, or whatever). You can do this easily using this method
+ // now, for example:
+ //
+ // HIfContinuation cont(graph()->CreateBasicBlock(),
+ // graph()->CreateBasicBlock());
+ // ...
+ // IfBuilder if_whatever(this);
+ // if_whatever.If<Condition>(arg);
+ // if_whatever.Then();
+ // ...
+ // if_whatever.Else();
+ // ...
+ // if_whatever.JoinContinuation(&cont);
+ // ...
+ // IfBuilder if_something(this);
+ // if_something.If<Condition>(arg1, arg2);
+ // if_something.Then();
+ // ...
+ // if_something.Else();
+ // ...
+ // if_something.JoinContinuation(&cont);
+ // ...
+ // IfBuilder if_finally(this, &cont);
+ // if_finally.Then();
+ // // continues after then code of if_whatever or if_something.
+ // ...
+ // if_finally.Else();
+ // // continues after else code of if_whatever or if_something.
+ // ...
+ // if_finally.End();
+ void JoinContinuation(HIfContinuation* continuation);
+
void Then();
void Else();
void End();
void Deopt(const char* reason);
+ void ThenDeopt(const char* reason) {
+ Then();
+ Deopt(reason);
+ }
void ElseDeopt(const char* reason) {
Else();
Deopt(reason);
@@ -1395,26 +1518,45 @@ class HGraphBuilder {
void Return(HValue* value);
private:
- void AddCompare(HControlInstruction* compare);
-
- Zone* zone() { return builder_->zone(); }
+ HControlInstruction* AddCompare(HControlInstruction* compare);
+
+ HGraphBuilder* builder() const { return builder_; }
+
+ void AddMergeAtJoinBlock(bool deopt);
+
+ void Finish();
+ void Finish(HBasicBlock** then_continuation,
+ HBasicBlock** else_continuation);
+
+ class MergeAtJoinBlock : public ZoneObject {
+ public:
+ MergeAtJoinBlock(HBasicBlock* block,
+ bool deopt,
+ MergeAtJoinBlock* next)
+ : block_(block),
+ deopt_(deopt),
+ next_(next) {}
+ HBasicBlock* block_;
+ bool deopt_;
+ MergeAtJoinBlock* next_;
+ };
HGraphBuilder* builder_;
- int position_;
bool finished_ : 1;
- bool deopt_then_ : 1;
- bool deopt_else_ : 1;
bool did_then_ : 1;
bool did_else_ : 1;
+ bool did_else_if_ : 1;
bool did_and_ : 1;
bool did_or_ : 1;
bool captured_ : 1;
bool needs_compare_ : 1;
+ bool pending_merge_block_ : 1;
HBasicBlock* first_true_block_;
- HBasicBlock* last_true_block_;
HBasicBlock* first_false_block_;
HBasicBlock* split_edge_merge_block_;
- HBasicBlock* merge_block_;
+ MergeAtJoinBlock* merge_at_join_blocks_;
+ int normal_merge_at_join_block_count_;
+ int deopt_merge_at_join_block_count_;
};
class LoopBuilder V8_FINAL {
@@ -1478,12 +1620,20 @@ class HGraphBuilder {
JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
- HValue* constructor_function);
+ HValue* constructor_function = NULL);
+
+ enum FillMode {
+ DONT_FILL_WITH_HOLE,
+ FILL_WITH_HOLE
+ };
+
+ ElementsKind kind() { return kind_; }
HValue* AllocateEmptyArray();
HValue* AllocateArray(HValue* capacity, HValue* length_field,
- bool fill_with_hole);
+ FillMode fill_mode = FILL_WITH_HOLE);
HValue* GetElementsLocation() { return elements_location_; }
+ HValue* EmitMapCode();
private:
Zone* zone() const { return builder_->zone(); }
@@ -1497,12 +1647,12 @@ class HGraphBuilder {
return JSArray::kPreallocatedArrayElements;
}
- HValue* EmitMapCode();
HValue* EmitInternalMapCode();
HValue* EstablishEmptyArrayAllocationSize();
HValue* EstablishAllocationSize(HValue* length_node);
HValue* AllocateArray(HValue* size_in_bytes, HValue* capacity,
- HValue* length_field, bool fill_with_hole);
+ HValue* length_field,
+ FillMode fill_mode = FILL_WITH_HOLE);
HGraphBuilder* builder_;
ElementsKind kind_;
@@ -1512,6 +1662,9 @@ class HGraphBuilder {
HInnerAllocatedObject* elements_location_;
};
+ HValue* BuildAllocateArrayFromLength(JSArrayBuilder* array_builder,
+ HValue* length_argument);
+
HValue* BuildAllocateElements(ElementsKind kind,
HValue* capacity);
@@ -1558,15 +1711,16 @@ class HGraphBuilder {
ElementsKind kind,
int length);
+ HValue* BuildElementIndexHash(HValue* index);
+
void BuildCompareNil(
HValue* value,
Handle<Type> type,
- int position,
HIfContinuation* continuation);
- HValue* BuildCreateAllocationMemento(HValue* previous_object,
- int previous_object_size,
- HValue* payload);
+ void BuildCreateAllocationMemento(HValue* previous_object,
+ HValue* previous_object_size,
+ HValue* payload);
HInstruction* BuildConstantMapCheck(Handle<JSObject> constant,
CompilationInfo* info);
@@ -1576,46 +1730,72 @@ class HGraphBuilder {
HInstruction* BuildGetNativeContext();
HInstruction* BuildGetArrayFunction();
+ protected:
+ void SetSourcePosition(int position) {
+ ASSERT(position != RelocInfo::kNoPosition);
+ position_ = position;
+ }
+
+ template <typename ViewClass>
+ void BuildArrayBufferViewInitialization(HValue* obj,
+ HValue* buffer,
+ HValue* byte_offset,
+ HValue* byte_length);
+
private:
HGraphBuilder();
+ HValue* BuildUncheckedDictionaryElementLoadHelper(
+ HValue* elements,
+ HValue* key,
+ HValue* hash,
+ HValue* mask,
+ int current_probe);
+
void PadEnvironmentForContinuation(HBasicBlock* from,
HBasicBlock* continuation);
+ template <class I>
+ I* AddInstructionTyped(I* instr) {
+ return I::cast(AddInstruction(instr));
+ }
+
CompilationInfo* info_;
HGraph* graph_;
HBasicBlock* current_block_;
+ int position_;
};
template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
+inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
const char* reason, Deoptimizer::BailoutType type) {
if (type == Deoptimizer::SOFT) {
isolate()->counters()->soft_deopts_requested()->Increment();
if (FLAG_always_opt) return NULL;
}
if (current_block()->IsDeoptimizing()) return NULL;
- HDeoptimize* instr = New<HDeoptimize>(reason, type);
- AddInstruction(instr);
+ HBasicBlock* after_deopt_block = CreateBasicBlock(
+ current_block()->last_environment());
+ HDeoptimize* instr = New<HDeoptimize>(reason, type, after_deopt_block);
if (type == Deoptimizer::SOFT) {
isolate()->counters()->soft_deopts_inserted()->Increment();
- graph()->set_has_soft_deoptimize(true);
}
- current_block()->MarkAsDeoptimizing();
+ FinishCurrentBlock(instr);
+ set_current_block(after_deopt_block);
return instr;
}
template<>
-inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
+inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
const char* reason, Deoptimizer::BailoutType type) {
- return static_cast<HDeoptimize*>(AddUncasted<HDeoptimize>(reason, type));
+ return Add<HDeoptimize>(reason, type);
}
template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(
+inline HSimulate* HGraphBuilder::Add<HSimulate>(
BailoutId id,
RemovableSimulate removable) {
HSimulate* instr = current_block()->CreateSimulate(id, removable);
@@ -1625,35 +1805,83 @@ inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(
template<>
+inline HSimulate* HGraphBuilder::Add<HSimulate>(
+ BailoutId id) {
+ return Add<HSimulate>(id, FIXED_SIMULATE);
+}
+
+
+template<>
inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(BailoutId id) {
- return AddUncasted<HSimulate>(id, FIXED_SIMULATE);
+ return Add<HSimulate>(id, FIXED_SIMULATE);
}
template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HValue* value) {
+inline HReturn* HGraphBuilder::Add<HReturn>(HValue* value) {
int num_parameters = graph()->info()->num_parameters();
HValue* params = AddUncasted<HConstant>(num_parameters);
HReturn* return_instruction = New<HReturn>(value, params);
- current_block()->FinishExit(return_instruction);
+ FinishExitCurrentBlock(return_instruction);
return return_instruction;
}
template<>
+inline HReturn* HGraphBuilder::Add<HReturn>(HConstant* value) {
+ return Add<HReturn>(static_cast<HValue*>(value));
+}
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HValue* value) {
+ return Add<HReturn>(value);
+}
+
+
+template<>
inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HConstant* value) {
- return AddUncasted<HReturn>(static_cast<HValue*>(value));
+ return Add<HReturn>(value);
}
template<>
-inline HInstruction* HGraphBuilder::NewUncasted<HContext>() {
+inline HCallRuntime* HGraphBuilder::Add<HCallRuntime>(
+ Handle<String> name,
+ const Runtime::Function* c_function,
+ int argument_count) {
+ HCallRuntime* instr = New<HCallRuntime>(name, c_function, argument_count);
+ if (graph()->info()->IsStub()) {
+ // When compiling code stubs, we don't want to save all double registers
+ // upon entry to the stub, but instead have the call runtime instruction
+ // save the double registers only on-demand (in the fallback case).
+ instr->set_save_doubles(kSaveFPRegs);
+ }
+ AddInstruction(instr);
+ return instr;
+}
+
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HCallRuntime>(
+ Handle<String> name,
+ const Runtime::Function* c_function,
+ int argument_count) {
+ return Add<HCallRuntime>(name, c_function, argument_count);
+}
+
+
+template<>
+inline HContext* HGraphBuilder::New<HContext>() {
return HContext::New(zone());
}
-class HOptimizedGraphBuilder V8_FINAL
- : public HGraphBuilder, public AstVisitor {
+template<>
+inline HInstruction* HGraphBuilder::NewUncasted<HContext>() {
+ return New<HContext>();
+}
+
+class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
public:
// A class encapsulating (lazily-allocated) break and continue blocks for
// a breakable statement. Separated from BreakAndContinueScope so that it
@@ -1720,6 +1948,8 @@ class HOptimizedGraphBuilder V8_FINAL
HValue* context() { return environment()->context(); }
+ HOsrBuilder* osr() const { return osr_; }
+
void Bailout(BailoutReason reason);
HBasicBlock* CreateJoin(HBasicBlock* first,
@@ -1738,7 +1968,7 @@ class HOptimizedGraphBuilder V8_FINAL
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
- private:
+ protected:
// Type of a member function that generates inline code for a native function.
typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator)
(CallRuntime* call);
@@ -1825,6 +2055,12 @@ class HOptimizedGraphBuilder V8_FINAL
HBasicBlock* loop_successor,
HBasicBlock* break_block);
+ // Build a loop entry
+ HBasicBlock* BuildLoopEntry();
+
+ // Builds a loop entry respectful of OSR requirements
+ HBasicBlock* BuildLoopEntry(IterationStatement* statement);
+
HBasicBlock* JoinContinue(IterationStatement* statement,
HBasicBlock* exit_block,
HBasicBlock* continue_block);
@@ -1850,21 +2086,22 @@ class HOptimizedGraphBuilder V8_FINAL
env->Bind(index, value);
if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
HEnvironmentMarker* bind =
- new(zone()) HEnvironmentMarker(HEnvironmentMarker::BIND, index);
- AddInstruction(bind);
+ Add<HEnvironmentMarker>(HEnvironmentMarker::BIND, index);
+ USE(bind);
#ifdef DEBUG
bind->set_closure(env->closure());
#endif
}
}
+
HValue* LookupAndMakeLive(Variable* var) {
HEnvironment* env = environment();
int index = env->IndexFor(var);
HValue* value = env->Lookup(index);
if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
HEnvironmentMarker* lookup =
- new(zone()) HEnvironmentMarker(HEnvironmentMarker::LOOKUP, index);
- AddInstruction(lookup);
+ Add<HEnvironmentMarker>(HEnvironmentMarker::LOOKUP, index);
+ USE(lookup);
#ifdef DEBUG
lookup->set_closure(env->closure());
#endif
@@ -1902,6 +2139,7 @@ class HOptimizedGraphBuilder V8_FINAL
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
+ private:
// Helpers for flow graph construction.
enum GlobalPropertyAccess {
kUseCell,
@@ -1953,27 +2191,120 @@ class HOptimizedGraphBuilder V8_FINAL
void HandleGlobalVariableAssignment(Variable* var,
HValue* value,
- int position,
BailoutId ast_id);
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicLoadNamedField(int position,
+ void HandlePolymorphicLoadNamedField(BailoutId ast_id,
BailoutId return_id,
HValue* object,
SmallMapList* types,
Handle<String> name);
- HInstruction* TryLoadPolymorphicAsMonomorphic(HValue* object,
- SmallMapList* types,
- Handle<String> name);
- void HandlePolymorphicStoreNamedField(int position,
- BailoutId assignment_id,
+
+ void VisitTypedArrayInitialize(CallRuntime* expr);
+
+ bool IsCallNewArrayInlineable(CallNew* expr);
+ void BuildInlinedCallNewArray(CallNew* expr);
+
+ void VisitDataViewInitialize(CallRuntime* expr);
+
+ class PropertyAccessInfo {
+ public:
+ PropertyAccessInfo(Isolate* isolate, Handle<Map> map, Handle<String> name)
+ : lookup_(isolate),
+ map_(map),
+ name_(name),
+ access_(HObjectAccess::ForMap()) { }
+
+ // Checkes whether this PropertyAccessInfo can be handled as a monomorphic
+ // load named. It additionally fills in the fields necessary to generate the
+ // lookup code.
+ bool CanLoadMonomorphic();
+
+ // Checks whether all types behave uniform when loading name. If all maps
+ // behave the same, a single monomorphic load instruction can be emitted,
+ // guarded by a single map-checks instruction that whether the receiver is
+ // an instance of any of the types.
+ // This method skips the first type in types, assuming that this
+ // PropertyAccessInfo is built for types->first().
+ bool CanLoadAsMonomorphic(SmallMapList* types);
+
+ bool IsJSObjectFieldAccessor() {
+ int offset; // unused
+ return Accessors::IsJSObjectFieldAccessor(map_, name_, &offset);
+ }
+
+ bool GetJSObjectFieldAccess(HObjectAccess* access) {
+ if (IsStringLength()) {
+ *access = HObjectAccess::ForStringLength();
+ return true;
+ } else if (IsArrayLength()) {
+ *access = HObjectAccess::ForArrayLength(map_->elements_kind());
+ return true;
+ } else {
+ int offset;
+ if (Accessors::IsJSObjectFieldAccessor(map_, name_, &offset)) {
+ *access = HObjectAccess::ForJSObjectOffset(offset);
+ return true;
+ }
+ return false;
+ }
+ }
+
+ bool has_holder() { return !holder_.is_null(); }
+
+ LookupResult* lookup() { return &lookup_; }
+ Handle<Map> map() { return map_; }
+ Handle<JSObject> holder() { return holder_; }
+ Handle<JSFunction> accessor() { return accessor_; }
+ Handle<Object> constant() { return constant_; }
+ HObjectAccess access() { return access_; }
+
+ private:
+ Isolate* isolate() { return lookup_.isolate(); }
+
+ bool IsStringLength() {
+ return map_->instance_type() < FIRST_NONSTRING_TYPE &&
+ name_->Equals(isolate()->heap()->length_string());
+ }
+
+ bool IsArrayLength() {
+ return map_->instance_type() == JS_ARRAY_TYPE &&
+ name_->Equals(isolate()->heap()->length_string());
+ }
+
+ bool LoadResult(Handle<Map> map);
+ bool LookupDescriptor();
+ bool LookupInPrototypes();
+ bool IsCompatibleForLoad(PropertyAccessInfo* other);
+
+ void GeneralizeRepresentation(Representation r) {
+ access_ = access_.WithRepresentation(
+ access_.representation().generalize(r));
+ }
+
+ LookupResult lookup_;
+ Handle<Map> map_;
+ Handle<String> name_;
+ Handle<JSObject> holder_;
+ Handle<JSFunction> accessor_;
+ Handle<Object> constant_;
+ HObjectAccess access_;
+ };
+
+ HInstruction* BuildLoadMonomorphic(PropertyAccessInfo* info,
+ HValue* object,
+ HInstruction* checked_object,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool can_inline_accessor = true);
+
+ void HandlePolymorphicStoreNamedField(BailoutId assignment_id,
HValue* object,
HValue* value,
SmallMapList* types,
Handle<String> name);
- bool TryStorePolymorphicAsMonomorphic(int position,
- BailoutId assignment_id,
+ bool TryStorePolymorphicAsMonomorphic(BailoutId assignment_id,
HValue* object,
HValue* value,
SmallMapList* types,
@@ -1995,9 +2326,9 @@ class HOptimizedGraphBuilder V8_FINAL
HInstruction* BuildStringCharCodeAt(HValue* string,
HValue* index);
- HInstruction* BuildBinaryOperation(BinaryOperation* expr,
- HValue* left,
- HValue* right);
+ HValue* BuildBinaryOperation(BinaryOperation* expr,
+ HValue* left,
+ HValue* right);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
HInstruction* BuildLoadKeyedGeneric(HValue* object,
@@ -2022,8 +2353,6 @@ class HOptimizedGraphBuilder V8_FINAL
HValue* key,
HValue* val,
SmallMapList* maps,
- BailoutId ast_id,
- int position,
bool is_store,
KeyedAccessStoreMode store_mode,
bool* has_side_effects);
@@ -2032,31 +2361,20 @@ class HOptimizedGraphBuilder V8_FINAL
HValue* key,
HValue* val,
Expression* expr,
- BailoutId ast_id,
- int position,
bool is_store,
bool* has_side_effects);
HInstruction* BuildLoadNamedGeneric(HValue* object,
Handle<String> name,
Property* expr);
- HInstruction* BuildCallGetter(HValue* object,
- Handle<Map> map,
- Handle<JSFunction> getter,
- Handle<JSObject> holder);
- HInstruction* BuildLoadNamedMonomorphic(HValue* object,
- Handle<String> name,
- Handle<Map> map);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
void BuildLoad(Property* property,
- int position,
BailoutId ast_id);
void PushLoad(Property* property,
HValue* object,
- HValue* key,
- int position);
+ HValue* key);
void BuildStoreForEffect(Expression* expression,
Property* prop,
@@ -2093,8 +2411,7 @@ class HOptimizedGraphBuilder V8_FINAL
HInstruction* BuildThisFunction();
HInstruction* BuildFastLiteral(Handle<JSObject> boilerplate_object,
- Handle<Object> allocation_site,
- AllocationSiteMode mode);
+ AllocationSiteUsageContext* site_context);
void BuildEmitObjectHeader(Handle<JSObject> boilerplate_object,
HInstruction* object);
@@ -2104,11 +2421,14 @@ class HOptimizedGraphBuilder V8_FINAL
HInstruction* object_elements);
void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
- HInstruction* object);
+ HInstruction* object,
+ AllocationSiteUsageContext* site_context,
+ PretenureFlag pretenure_flag);
void BuildEmitElements(Handle<JSObject> boilerplate_object,
Handle<FixedArrayBase> elements,
- HValue* object_elements);
+ HValue* object_elements,
+ AllocationSiteUsageContext* site_context);
void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
ElementsKind kind,
@@ -2116,7 +2436,8 @@ class HOptimizedGraphBuilder V8_FINAL
void BuildEmitFixedArray(Handle<FixedArrayBase> elements,
ElementsKind kind,
- HValue* object_elements);
+ HValue* object_elements,
+ AllocationSiteUsageContext* site_context);
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
@@ -2125,11 +2446,6 @@ class HOptimizedGraphBuilder V8_FINAL
HValue* receiver,
Handle<Map> receiver_map);
- bool MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount);
-
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
diff --git a/chromium/v8/src/i18n.cc b/chromium/v8/src/i18n.cc
index 0ae19c8232d..80a739c285e 100644
--- a/chromium/v8/src/i18n.cc
+++ b/chromium/v8/src/i18n.cc
@@ -464,7 +464,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
Handle<String> key = isolate->factory()->NewStringFromAscii(
CStrVector("minimumSignificantDigits"));
- if (resolved->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(resolved, key)) {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(
@@ -477,7 +477,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
key = isolate->factory()->NewStringFromAscii(
CStrVector("maximumSignificantDigits"));
- if (resolved->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(resolved, key)) {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(
@@ -855,7 +855,7 @@ icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("dateFormat"));
- if (obj->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(obj, key)) {
return reinterpret_cast<icu::SimpleDateFormat*>(
obj->GetInternalField(0));
}
@@ -872,7 +872,7 @@ void DateFormat::DeleteDateFormat(v8::Isolate* isolate,
v8::Utils::OpenPersistent(object))->GetInternalField(0));
// Then dispose of the persistent handle to JS object.
- object->Dispose();
+ object->Reset();
}
@@ -920,7 +920,7 @@ icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("numberFormat"));
- if (obj->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(obj, key)) {
return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
}
@@ -936,7 +936,7 @@ void NumberFormat::DeleteNumberFormat(v8::Isolate* isolate,
v8::Utils::OpenPersistent(object))->GetInternalField(0));
// Then dispose of the persistent handle to JS object.
- object->Dispose();
+ object->Reset();
}
@@ -981,7 +981,7 @@ icu::Collator* Collator::UnpackCollator(Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("collator"));
- if (obj->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(obj, key)) {
return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
}
@@ -997,7 +997,7 @@ void Collator::DeleteCollator(v8::Isolate* isolate,
v8::Utils::OpenPersistent(object))->GetInternalField(0));
// Then dispose of the persistent handle to JS object.
- object->Dispose();
+ object->Reset();
}
@@ -1045,7 +1045,7 @@ icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("breakIterator"));
- if (obj->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(obj, key)) {
return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
}
@@ -1064,7 +1064,7 @@ void BreakIterator::DeleteBreakIterator(v8::Isolate* isolate,
v8::Utils::OpenPersistent(object))->GetInternalField(1));
// Then dispose of the persistent handle to JS object.
- object->Dispose();
+ object->Reset();
}
} } // namespace v8::internal
diff --git a/chromium/v8/src/i18n.js b/chromium/v8/src/i18n.js
index a80fd4d9b4a..6b563a00f91 100644
--- a/chromium/v8/src/i18n.js
+++ b/chromium/v8/src/i18n.js
@@ -290,7 +290,7 @@ function addBoundMethod(obj, methodName, implementation, length) {
* Parameter locales is treated as a priority list.
*/
function supportedLocalesOf(service, locales, options) {
- if (service.match(GetServiceRE()) === null) {
+ if (IS_NULL(service.match(GetServiceRE()))) {
throw new $Error('Internal error, wrong service type: ' + service);
}
@@ -447,7 +447,7 @@ function resolveLocale(service, requestedLocales, options) {
* lookup algorithm.
*/
function lookupMatcher(service, requestedLocales) {
- if (service.match(GetServiceRE()) === null) {
+ if (IS_NULL(service.match(GetServiceRE()))) {
throw new $Error('Internal error, wrong service type: ' + service);
}
@@ -463,7 +463,7 @@ function lookupMatcher(service, requestedLocales) {
if (AVAILABLE_LOCALES[service][locale] !== undefined) {
// Return the resolved locale and extension.
var extensionMatch = requestedLocales[i].match(GetUnicodeExtensionRE());
- var extension = (extensionMatch === null) ? '' : extensionMatch[0];
+ var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
return {'locale': locale, 'extension': extension, 'position': i};
}
// Truncate locale if possible.
@@ -535,7 +535,7 @@ function parseExtension(extension) {
* Converts parameter to an Object if possible.
*/
function toObject(value) {
- if (value === undefined || value === null) {
+ if (IS_NULL_OR_UNDEFINED(value)) {
throw new $TypeError('Value cannot be converted to an Object.');
}
@@ -733,7 +733,7 @@ function toTitleCaseWord(word) {
function canonicalizeLanguageTag(localeID) {
// null is typeof 'object' so we have to do extra check.
if (typeof localeID !== 'string' && typeof localeID !== 'object' ||
- localeID === null) {
+ IS_NULL(localeID)) {
throw new $TypeError('Language ID should be string or object.');
}
@@ -1302,10 +1302,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
*/
function formatNumber(formatter, value) {
// Spec treats -0 and +0 as 0.
- var number = $Number(value);
- if (number === -0) {
- number = 0;
- }
+ var number = $Number(value) + 0;
return %InternalNumberFormat(formatter.formatter, number);
}
@@ -1367,7 +1364,7 @@ function toLDMLString(options) {
ldmlString += appendToLDMLString(option, {'2-digit': 'ss', 'numeric': 's'});
option = getOption('timeZoneName', 'string', ['short', 'long']);
- ldmlString += appendToLDMLString(option, {short: 'v', long: 'vv'});
+ ldmlString += appendToLDMLString(option, {short: 'z', long: 'zzzz'});
return ldmlString;
}
@@ -1440,16 +1437,16 @@ function fromLDMLString(ldmlString) {
options = appendToDateTimeObject(
options, 'second', match, {s: 'numeric', ss: '2-digit'});
- match = ldmlString.match(/v{1,2}/g);
+ match = ldmlString.match(/z|zzzz/g);
options = appendToDateTimeObject(
- options, 'timeZoneName', match, {v: 'short', vv: 'long'});
+ options, 'timeZoneName', match, {z: 'short', zzzz: 'long'});
return options;
}
function appendToDateTimeObject(options, option, match, pairs) {
- if (match === null) {
+ if (IS_NULL(match)) {
if (!options.hasOwnProperty(option)) {
defineWEProperty(options, option, undefined);
}
@@ -1751,7 +1748,7 @@ function canonicalizeTimeZoneID(tzID) {
// We expect only _ and / beside ASCII letters.
// All inputs should conform to Area/Location from now on.
var match = GetTimezoneNameCheckRE().exec(tzID);
- if (match === null) {
+ if (IS_NULL(match)) {
throw new $RangeError('Expected Area/Location for time zone, got ' + tzID);
}
@@ -1971,7 +1968,7 @@ $Object.defineProperty($String.prototype, 'localeCompare', {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- if (this === undefined || this === null) {
+ if (IS_NULL_OR_UNDEFINED(this)) {
throw new $TypeError('Method invoked on undefined or null value.');
}
diff --git a/chromium/v8/src/ia32/assembler-ia32-inl.h b/chromium/v8/src/ia32/assembler-ia32-inl.h
index 5a35b207f72..ee5d991e38a 100644
--- a/chromium/v8/src/ia32/assembler-ia32-inl.h
+++ b/chromium/v8/src/ia32/assembler-ia32-inl.h
@@ -47,6 +47,7 @@ namespace internal {
static const byte kCallOpcode = 0xE8;
+static const int kNoCodeAgeSequenceLength = 5;
// The modes possibly affected by apply must be in kApplyMask.
@@ -124,12 +125,6 @@ Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
}
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return &Memory::Object_at(pc_);
-}
-
-
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
@@ -144,9 +139,9 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
}
-Address* RelocInfo::target_reference_address() {
+Address RelocInfo::target_reference() {
ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
+ return Memory::Address_at(pc_);
}
@@ -190,6 +185,13 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
}
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Memory::Object_Handle_at(pc_ + 1);
+}
+
+
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
@@ -241,6 +243,18 @@ Object** RelocInfo::call_object_address() {
}
+void RelocInfo::WipeOut() {
+ if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
+ Memory::Address_at(pc_) = NULL;
+ } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
+ // Effectively write zero into the relocation.
+ Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
bool RelocInfo::IsPatchedReturnSequence() {
return *pc_ == kCallOpcode;
}
@@ -379,7 +393,8 @@ void Assembler::emit(Handle<Object> handle) {
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
- } else if (!RelocInfo::IsNone(rmode)) {
+ } else if (!RelocInfo::IsNone(rmode)
+ && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
RecordRelocInfo(rmode);
}
emit(x);
diff --git a/chromium/v8/src/ia32/assembler-ia32.cc b/chromium/v8/src/ia32/assembler-ia32.cc
index e5456da4746..733432028af 100644
--- a/chromium/v8/src/ia32/assembler-ia32.cc
+++ b/chromium/v8/src/ia32/assembler-ia32.cc
@@ -53,6 +53,7 @@ bool CpuFeatures::initialized_ = false;
#endif
uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
+uint64_t CpuFeatures::cross_compile_ = 0;
ExternalReference ExternalReference::cpu_features() {
@@ -88,8 +89,6 @@ const char* IntelDoubleRegister::AllocationIndexToString(int index) {
}
-// The Probe method needs executable memory, so it uses Heap::CreateCode.
-// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() {
ASSERT(!initialized_);
ASSERT(supported_ == 0);
@@ -552,6 +551,16 @@ void Assembler::mov_w(const Operand& dst, Register src) {
}
+void Assembler::mov_w(const Operand& dst, int16_t imm16) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0xC7);
+ emit_operand(eax, dst);
+ EMIT(static_cast<int8_t>(imm16 & 0xff));
+ EMIT(static_cast<int8_t>(imm16 >> 8));
+}
+
+
void Assembler::mov(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
@@ -1131,30 +1140,21 @@ void Assembler::sub(const Operand& dst, Register src) {
void Assembler::test(Register reg, const Immediate& imm) {
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ test_b(reg, imm.x_);
+ return;
+ }
+
EnsureSpace ensure_space(this);
- // Only use test against byte for registers that have a byte
- // variant: eax, ebx, ecx, and edx.
- if (RelocInfo::IsNone(imm.rmode_) &&
- is_uint8(imm.x_) &&
- reg.is_byte_register()) {
- uint8_t imm8 = imm.x_;
- if (reg.is(eax)) {
- EMIT(0xA8);
- EMIT(imm8);
- } else {
- emit_arith_b(0xF6, 0xC0, reg, imm8);
- }
+ // This is not using emit_arith because test doesn't support
+ // sign-extension of 8-bit operands.
+ if (reg.is(eax)) {
+ EMIT(0xA9);
} else {
- // This is not using emit_arith because test doesn't support
- // sign-extension of 8-bit operands.
- if (reg.is(eax)) {
- EMIT(0xA9);
- } else {
- EMIT(0xF7);
- EMIT(0xC0 | reg.code());
- }
- emit(imm);
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
}
+ emit(imm);
}
@@ -1178,6 +1178,9 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
test(op.reg(), imm);
return;
}
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ return test_b(op, imm.x_);
+ }
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(eax, op);
@@ -1185,9 +1188,26 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
}
+void Assembler::test_b(Register reg, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ // Only use test against byte for registers that have a byte
+ // variant: eax, ebx, ecx, and edx.
+ if (reg.is(eax)) {
+ EMIT(0xA8);
+ EMIT(imm8);
+ } else if (reg.is_byte_register()) {
+ emit_arith_b(0xF6, 0xC0, reg, imm8);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ emit(imm8);
+ }
+}
+
+
void Assembler::test_b(const Operand& op, uint8_t imm8) {
- if (op.is_reg_only() && !op.reg().is_byte_register()) {
- test(op, Immediate(imm8));
+ if (op.is_reg_only()) {
+ test_b(op.reg(), imm8);
return;
}
EnsureSpace ensure_space(this);
@@ -1402,7 +1422,8 @@ void Assembler::call(Handle<Code> code,
TypeFeedbackId ast_id) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ ASSERT(RelocInfo::IsCodeTarget(rmode)
+ || rmode == RelocInfo::CODE_AGE_SEQUENCE);
EMIT(0xE8);
emit(code, rmode, ast_id);
}
@@ -2046,7 +2067,26 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+void Assembler::andps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::orps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::xorps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x57);
@@ -2054,39 +2094,68 @@ void Assembler::xorps(XMMRegister dst, XMMRegister src) {
}
-void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+void Assembler::addps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- EMIT(0xF2);
EMIT(0x0F);
- EMIT(0x51);
+ EMIT(0x58);
emit_sse_operand(dst, src);
}
-void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+void Assembler::subps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
- EMIT(0x66);
EMIT(0x0F);
- EMIT(0x54);
+ EMIT(0x5C);
emit_sse_operand(dst, src);
}
-void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+void Assembler::mulps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divps(XMMRegister dst, const Operand& src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
- EMIT(0x56);
+ EMIT(0x54);
emit_sse_operand(dst, src);
}
-void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
+void Assembler::orpd(XMMRegister dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
- EMIT(0x2E);
+ EMIT(0x56);
emit_sse_operand(dst, src);
}
@@ -2163,6 +2232,17 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
}
+void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
+ ASSERT(IsEnabled(SSE2));
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC6);
+ emit_sse_operand(dst, src);
+ EMIT(imm8);
+}
+
+
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2235,18 +2315,6 @@ void Assembler::prefetch(const Operand& src, int level) {
}
-void Assembler::movdbl(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
-void Assembler::movdbl(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2267,16 +2335,6 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
}
-void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x10);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::movss(const Operand& dst, XMMRegister src ) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2297,16 +2355,6 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
}
-void Assembler::movss(XMMRegister dst, XMMRegister src) {
- ASSERT(IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x10);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::movd(XMMRegister dst, const Operand& src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2335,7 +2383,7 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
EMIT(0x0F);
EMIT(0x3A);
EMIT(0x17);
- emit_sse_operand(dst, src);
+ emit_sse_operand(src, dst);
EMIT(imm8);
}
@@ -2474,6 +2522,11 @@ void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
}
+void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
+ EMIT(0xC0 | (dst.code() << 3) | src.code());
+}
+
+
void Assembler::Print() {
Disassembler::Decode(isolate(), stdout, buffer_, pc_);
}
diff --git a/chromium/v8/src/ia32/assembler-ia32.h b/chromium/v8/src/ia32/assembler-ia32.h
index 55eff931907..6ed0bc6d662 100644
--- a/chromium/v8/src/ia32/assembler-ia32.h
+++ b/chromium/v8/src/ia32/assembler-ia32.h
@@ -535,32 +535,54 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
+ if (Check(f, cross_compile_)) return true;
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, supported_);
}
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- return (found_by_runtime_probing_only_ &
- (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
static bool IsSafeForSnapshot(CpuFeature f) {
- return (IsSupported(f) &&
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ uint64_t mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
+
private:
+ static bool Check(CpuFeature f, uint64_t set) {
+ return (set & flag2set(f)) != 0;
+ }
+
+ static uint64_t flag2set(CpuFeature f) {
+ return static_cast<uint64_t>(1) << f;
+ }
+
#ifdef DEBUG
static bool initialized_;
#endif
static uint64_t supported_;
static uint64_t found_by_runtime_probing_only_;
+ static uint64_t cross_compile_;
+
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -616,13 +638,6 @@ class Assembler : public AssemblerBase {
set_target_address_at(instruction_payload, target);
}
- // This sets the branch destination (which is in the instruction on x86).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
static const int kSpecialTargetSize = kPointerSize;
// Distance between the address of the code target in the call instruction
@@ -713,6 +728,7 @@ class Assembler : public AssemblerBase {
void mov_w(Register dst, const Operand& src);
void mov_w(const Operand& dst, Register src);
+ void mov_w(const Operand& dst, int16_t imm16);
void mov(Register dst, int32_t imm32);
void mov(Register dst, const Immediate& x);
@@ -852,7 +868,7 @@ class Assembler : public AssemblerBase {
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
- void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
+ void test_b(Register reg, uint8_t imm8);
void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
@@ -995,8 +1011,31 @@ class Assembler : public AssemblerBase {
void cpuid();
+ // SSE instructions
+ void movaps(XMMRegister dst, XMMRegister src);
+ void shufps(XMMRegister dst, XMMRegister src, byte imm8);
+
+ void andps(XMMRegister dst, const Operand& src);
+ void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); }
+ void xorps(XMMRegister dst, const Operand& src);
+ void xorps(XMMRegister dst, XMMRegister src) { xorps(dst, Operand(src)); }
+ void orps(XMMRegister dst, const Operand& src);
+ void orps(XMMRegister dst, XMMRegister src) { orps(dst, Operand(src)); }
+
+ void addps(XMMRegister dst, const Operand& src);
+ void addps(XMMRegister dst, XMMRegister src) { addps(dst, Operand(src)); }
+ void subps(XMMRegister dst, const Operand& src);
+ void subps(XMMRegister dst, XMMRegister src) { subps(dst, Operand(src)); }
+ void mulps(XMMRegister dst, const Operand& src);
+ void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); }
+ void divps(XMMRegister dst, const Operand& src);
+ void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); }
+
// SSE2 instructions
void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, XMMRegister src) {
+ cvttss2si(dst, Operand(src));
+ }
void cvttsd2si(Register dst, const Operand& src);
void cvtsd2si(Register dst, XMMRegister src);
@@ -1012,13 +1051,12 @@ class Assembler : public AssemblerBase {
void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
- void ucomisd(XMMRegister dst, XMMRegister src);
+ void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void ucomisd(XMMRegister dst, const Operand& src);
enum RoundingMode {
@@ -1036,8 +1074,6 @@ class Assembler : public AssemblerBase {
void cmpltsd(XMMRegister dst, XMMRegister src);
void pcmpeqd(XMMRegister dst, XMMRegister src);
- void movaps(XMMRegister dst, XMMRegister src);
-
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
@@ -1050,19 +1086,18 @@ class Assembler : public AssemblerBase {
}
}
- // Use either movsd or movlpd.
- void movdbl(XMMRegister dst, const Operand& src);
- void movdbl(const Operand& dst, XMMRegister src);
-
void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src);
void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
void movd(const Operand& dst, XMMRegister src);
- void movsd(XMMRegister dst, XMMRegister src);
+ void movsd(XMMRegister dst, XMMRegister src) { movsd(dst, Operand(src)); }
+ void movsd(XMMRegister dst, const Operand& src);
+ void movsd(const Operand& dst, XMMRegister src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
- void movss(XMMRegister dst, XMMRegister src);
+ void movss(XMMRegister dst, XMMRegister src) { movss(dst, Operand(src)); }
void extractps(Register dst, XMMRegister src, byte imm8);
void pand(XMMRegister dst, XMMRegister src);
@@ -1136,16 +1171,14 @@ class Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
- byte byte_at(int pos) { return buffer_[pos]; }
+ byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
protected:
- void movsd(XMMRegister dst, const Operand& src);
- void movsd(const Operand& dst, XMMRegister src);
-
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(Register dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister dst, Register src);
byte* addr_at(int pos) { return buffer_ + pos; }
diff --git a/chromium/v8/src/ia32/builtins-ia32.cc b/chromium/v8/src/ia32/builtins-ia32.cc
index a1597481aa6..5a3fa78e339 100644
--- a/chromium/v8/src/ia32/builtins-ia32.cc
+++ b/chromium/v8/src/ia32/builtins-ia32.cc
@@ -539,10 +539,12 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 8 * kPointerSize));
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1, ebx);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0), eax);
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
}
__ popad();
__ ret(0);
@@ -561,7 +563,46 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+ __ pushad();
+ __ mov(eax, Operand(esp, 8 * kPointerSize));
+ __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ }
+ __ popad();
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ pop(eax); // Pop return address into scratch register.
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+ __ push(eax); // Push return address after frame prologue.
+
+ // Jump to point after the code-age stub.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -570,7 +611,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ popad();
// Tear down internal frame.
}
@@ -580,6 +621,21 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
}
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ if (Serializer::enabled()) {
+ PlatformFeatureScope sse2(SSE2);
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+ } else {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+ }
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -628,25 +684,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // TODO(kasperl): Do we need to save/restore the XMM registers too?
- // TODO(mvstanton): We should save these regs, do this in a future
- // checkin.
-
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ popad();
- __ ret(0);
-}
-
-
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Factory* factory = masm->isolate()->factory();
@@ -1063,13 +1100,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- eax, // Input.
- ebx, // Result.
- ecx, // Scratch 1.
- edx, // Scratch 2.
- &not_cached);
+ __ LookupNumberStringCache(eax, // Input.
+ ebx, // Result.
+ ecx, // Scratch 1.
+ edx, // Scratch 2.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
// ----------- S t a t e -------------
@@ -1326,6 +1361,24 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ ret(0);
+}
+
#undef __
}
} // namespace v8::internal
diff --git a/chromium/v8/src/ia32/code-stubs-ia32.cc b/chromium/v8/src/ia32/code-stubs-ia32.cc
index a83c1ae91d1..04818149202 100644
--- a/chromium/v8/src/ia32/code-stubs-ia32.cc
+++ b/chromium/v8/src/ia32/code-stubs-ia32.cc
@@ -64,6 +64,17 @@ void ToNumberStub::InitializeInterfaceDescriptor(
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -71,7 +82,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
}
@@ -82,7 +93,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -107,6 +118,17 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -127,6 +149,19 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void KeyedArrayCallStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ecx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -157,15 +192,21 @@ static void InitializeArrayConstructorDescriptor(
// eax -- number of arguments
// edi -- function
// ebx -- type info cell with elements kind
- static Register registers[] = { edi, ebx };
- descriptor->register_param_count_ = 2;
+ static Register registers_variable_args[] = { edi, ebx, eax };
+ static Register registers_no_args[] = { edi, ebx };
- if (constant_stack_parameter_count != 0) {
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &eax;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = eax;
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
@@ -179,15 +220,21 @@ static void InitializeInternalArrayConstructorDescriptor(
// register state
// eax -- number of arguments
// edi -- constructor function
- static Register registers[] = { edi };
- descriptor->register_param_count_ = 1;
+ static Register registers_variable_args[] = { edi, eax };
+ static Register registers_no_args[] = { edi };
- if (constant_stack_parameter_count != 0) {
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &eax;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = eax;
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
@@ -283,6 +330,29 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
+void NewStringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -432,7 +502,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(Operand(esp, i * kDoubleSize), reg);
+ __ movsd(Operand(esp, i * kDoubleSize), reg);
}
}
const int argument_count = 1;
@@ -448,7 +518,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
CpuFeatureScope scope(masm, SSE2);
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(reg, Operand(esp, i * kDoubleSize));
+ __ movsd(reg, Operand(esp, i * kDoubleSize));
}
__ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
}
@@ -470,18 +540,6 @@ class FloatingPointHelper : public AllStatic {
// on FPU stack.
static void LoadFloatOperand(MacroAssembler* masm, Register number);
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float
// operands, jumps to the non_float label otherwise.
@@ -489,32 +547,11 @@ class FloatingPointHelper : public AllStatic {
Label* non_float,
Register scratch);
- // Takes the operands in edx and eax and loads them as integers in eax
- // and ecx.
- static void LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* operand_conversion_failure);
-
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
-
- // Checks that |operand| has an int32 value. If |int32_result| is different
- // from |scratch|, it will contain that int32 value.
- static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch);
};
@@ -658,1259 +695,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- __ push(edx);
- __ push(eax);
- // Left and right arguments are now on top.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- // Left and right arguments are already on top of the stack.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-static void BinaryOpStub_GenerateRegisterArgsPop(MacroAssembler* masm) {
- __ pop(ecx);
- __ pop(eax);
- __ pop(edx);
- __ push(ecx);
-}
-
-
-static void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op == Token::DIV || op == Token::MOD) {
- left = eax;
- right = ebx;
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
-
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, left);
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left);
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ JumpIfNotSmi(combined, &not_smis);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, left); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, left); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, left); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis);
- break;
-
- case Token::SUB:
- __ sub(left, right);
- __ j(overflow, &use_fp_on_smis);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, left); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &use_fp_on_smis);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, edx);
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &not_smis);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- __ ret(0);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- __ ret(2 * kPointerSize);
- break;
- default:
- UNREACHABLE();
- }
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
- __ bind(&use_fp_on_smis);
- switch (op) {
- // Undo the effects of some operations, and some register moves.
- case Token::SHL:
- // The arguments are saved on the stack, and only used from there.
- break;
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division. They should be in eax, ebx for jump to not_smi.
- __ mov(eax, edi);
- break;
- default:
- // No other operators jump to use_fp_on_smis.
- break;
- }
- __ jmp(&not_smis);
- } else {
- ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
- switch (op) {
- case Token::SHL:
- case Token::SHR: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- // It's OK to overwrite the arguments on the stack because we
- // are about to return.
- if (op == Token::SHR) {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
- __ fild_d(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- } else {
- ASSERT_EQ(Token::SHL, op);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, left);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- }
- __ ret(2 * kPointerSize);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- __ ret(0);
- break;
- }
-
- default:
- break;
- }
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- switch (op) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value())));
- __ j(not_equal, &right_arg_changed);
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
- } else {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateTypeTransition(masm);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-// Input:
-// edx: left operand (tagged)
-// eax: right operand (tagged)
-// Output:
-// eax: result (tagged)
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- Label not_floats, not_int32, right_arg_changed;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_int32);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_int32);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm0, ebx, ecx, xmm2);
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm1, edi, ecx, xmm2);
- if (op_ == Token::MOD) {
- if (encoded_right_arg_.has_value) {
- __ cmp(edi, Immediate(fixed_right_arg_value()));
- __ j(not_equal, &right_arg_changed);
- }
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Check result type if it is currently Int32.
- if (result_type_ <= BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm0, ecx, ecx, xmm2);
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- }
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- if (op_ == Token::MOD) {
- // The operands are now on the FPU stack, but we don't need them.
- __ fstp(0);
- __ fstp(0);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label not_int32;
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR hits a hard case, use the runtime system to
- // get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- return; // Handled above.
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- Factory* factory = masm->isolate()->factory();
-
- // Convert odd ball arguments to numbers.
- Label check, done;
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(edx, edx);
- } else {
- __ mov(edx, Immediate(factory->nan_value()));
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(eax, eax);
- } else {
- __ mov(eax, Immediate(factory->nan_value()));
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime;
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_floats);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_floats);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- if (left_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm0, ecx, ecx, xmm2);
- }
- if (right_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm1, ecx, ecx, xmm2);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
-
- __ bind(&not_floats);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label non_smi_result;
- // We do not check the input arguments here, as any value is
- // unconditionally truncated to an int32 anyway. To get the
- // right optimized code, int32 type feedback is just right.
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR or MOD hit a hard case,
- // use the runtime system to get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- __ bind(&not_floats);
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3,
- BinaryOpIC::GENERIC,
- BinaryOpIC::GENERIC,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop the arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If all else fails, use the runtime system to get the correct
- // result.
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- GenerateAddStrings(masm);
- // Fall through.
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &left_not_string, Label::kNear);
-
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
- Label skip_allocation;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, ebx);
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, edx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
-}
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
@@ -2034,7 +818,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -2049,7 +833,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
CpuFeatureScope scope(masm, SSE2);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
+ __ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
@@ -2062,17 +846,17 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
// Skip cache and return answer directly, only in untagged case.
__ bind(&skip_cache);
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
+ __ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
GenerateOperation(masm, type_);
__ fstp_d(Operand(esp, 0));
- __ movdbl(xmm1, Operand(esp, 0));
+ __ movsd(xmm1, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
@@ -2098,13 +882,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(eax);
__ CallRuntime(RuntimeFunction(), 1);
}
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
}
@@ -2221,79 +1005,6 @@ void TranscendentalCacheStub::GenerateOperation(
}
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-// Warning: can clobber inputs even when it jumps to |conversion_failure|!
-void FloatingPointHelper::LoadUnknownsAsIntegers(
- MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- // Test if arg1 is a Smi.
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, conversion_failure);
- } else {
- __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
- }
-
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- Factory* factory = masm->isolate()->factory();
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(edx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg1);
-
- __ TruncateHeapNumberToI(edx, edx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
-
- // Test if arg2 is a Smi.
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, conversion_failure);
- } else {
- __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
- }
-
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(ecx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the eax heap number in ecx.
-
- __ TruncateHeapNumberToI(ecx, eax);
-
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
@@ -2320,7 +1031,7 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Factory* factory = masm->isolate()->factory();
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
__ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1, or branch to not_numbers.
__ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
@@ -2329,109 +1040,20 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
+ __ Cvtsi2sd(xmm0, edx);
__ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
+ __ Cvtsi2sd(xmm1, eax);
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ jmp(&done, Label::kNear);
__ bind(&load_float_eax);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, scratch);
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, scratch);
-}
-
-
-void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch) {
- __ cvttsd2si(int32_result, Operand(operand));
- __ cvtsi2sd(xmm_scratch, int32_result);
- __ pcmpeqd(xmm_scratch, operand);
- __ movmskps(scratch, xmm_scratch);
- // Two least significant bits should be both set.
- __ not_(scratch);
- __ test(scratch, Immediate(3));
- __ j(not_zero, non_int32);
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_1);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ bind(&done);
}
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
@@ -2470,7 +1092,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Save 1 in double_result - we need this several times later on.
__ mov(scratch, Immediate(1));
- __ cvtsi2sd(double_result, scratch);
+ __ Cvtsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
@@ -2485,12 +1107,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
factory->heap_number_map());
__ j(not_equal, &call_runtime);
- __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear);
__ bind(&base_is_smi);
__ SmiUntag(base);
- __ cvtsi2sd(double_base, base);
+ __ Cvtsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2501,7 +1123,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
factory->heap_number_map());
__ j(not_equal, &call_runtime);
- __ movdbl(double_exponent,
+ __ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2509,7 +1131,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
- __ movdbl(double_exponent,
+ __ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
}
@@ -2604,9 +1226,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), double_exponent);
+ __ movsd(Operand(esp, 0), double_exponent);
__ fld_d(Operand(esp, 0)); // E
- __ movdbl(Operand(esp, 0), double_base);
+ __ movsd(Operand(esp, 0), double_base);
__ fld_d(Operand(esp, 0)); // B, E
// Exponent is in st(1) and base is in st(0)
@@ -2629,7 +1251,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ test_b(eax, 0x5F); // We check for all but precision exception.
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ jmp(&done);
@@ -2683,7 +1305,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// and may not have contained the exponent value in the first place when the
// exponent is a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
- __ cvtsi2sd(double_exponent, exponent);
+ __ Cvtsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
@@ -2696,7 +1318,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// as heap number in exponent.
__ bind(&done);
__ AllocateHeapNumber(eax, scratch, base, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
@@ -2704,8 +1326,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(4, scratch);
- __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
- __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+ __ movsd(Operand(esp, 0 * kDoubleSize), double_base);
+ __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 4);
}
@@ -2713,7 +1335,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Store it into the (fixed) result register.
__ sub(esp, Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ bind(&done);
@@ -2756,8 +1378,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
}
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -3495,7 +2116,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ call(edx);
// Drop arguments and come back to JS mode.
- __ LeaveApiExitFrame();
+ __ LeaveApiExitFrame(true);
// Check the result.
Label success;
@@ -3768,106 +2389,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(object, &not_smi, Label::kNear);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated, Label::kNear);
- __ bind(&not_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache, Label::kNear);
-
- __ bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- // Check if the entry is the smi we are looking for.
- __ cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ mov(ebx, Operand(esp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -4205,6 +2726,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // eax : number of arguments to the construct function
// ebx : cache cell for call target
// edi : the function to call
Isolate* isolate = masm->isolate();
@@ -4224,9 +2746,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in ecx.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
__ j(not_equal, &miss);
@@ -4265,6 +2786,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments register must be smi-tagged to call out.
__ SmiTag(eax);
__ push(eax);
__ push(edi);
@@ -4430,20 +2952,19 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated(Isolate* isolate) {
- return (!save_doubles_ || isolate->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ if (Serializer::enabled()) {
+ PlatformFeatureScope sse2(SSE2);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ } else {
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
+ }
}
@@ -4456,7 +2977,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
save_doubles_code = *(save_doubles.GetCode(isolate));
}
- save_doubles_code->set_is_pregenerated(true);
isolate->set_fp_stubs_generated(true);
}
}
@@ -4464,8 +2984,7 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
+ stub.GetCode(isolate);
}
@@ -4508,6 +3027,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// stack alignment is known to be correct. This function takes one argument
// which is passed on the stack, and we know that the stack has been
// prepared to pass at least one argument.
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
__ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
}
@@ -5455,33 +3976,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Drop(2);
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm, ecx);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ CallRuntime(Runtime::kStringAdd, 2);
- }
- __ ret(0);
- } else {
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
- }
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm, ecx);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
- }
- __ ret(0);
- } else {
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
@@ -5517,12 +4016,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
// Check the number to string cache.
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
__ mov(arg, scratch1);
__ mov(Operand(esp, stack_offset), arg);
__ bind(&done);
@@ -6253,24 +4747,24 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ mov(ecx, eax); // Can't clobber eax because we can still jump away.
__ SmiUntag(ecx);
- __ cvtsi2sd(xmm1, ecx);
+ __ Cvtsi2sd(xmm1, ecx);
__ bind(&left);
__ JumpIfSmi(edx, &left_smi, Label::kNear);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&left_smi);
__ mov(ecx, edx); // Can't clobber edx because we can still jump away.
__ SmiUntag(ecx);
- __ cvtsi2sd(xmm0, ecx);
+ __ Cvtsi2sd(xmm0, ecx);
__ bind(&done);
// Compare operands.
@@ -6784,90 +5278,13 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
- { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField and
- // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
- // GenerateStoreField calls the stub with two different permutations of
- // registers. This is the second.
- { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
- // StoreIC::GenerateNormal via GenerateDictionaryStore
- { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
- // KeyedStoreIC::GenerateGeneric.
- { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
- { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
- { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
- { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
- // StoreArrayLiteralElementStub::Generate
- { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub and StringAddStub::Generate
- { REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
- // StringAddStub::Generate
- { REG(ecx), REG(eax), REG(ebx), EMIT_REMEMBERED_SET},
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub(kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub2.GetCode(isolate);
}
}
@@ -7180,12 +5597,26 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ mov(edi, eax);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ mov(eax, MemOperand(ebp, parameter_count_offset));
+ // The parameter count above includes the receiver for the arguments passed to
+ // the deoptimization handler. Subtract the receiver for the parameter count
+ // for the call.
+ __ sub(eax, Immediate(1));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ ParameterCount argument_count(eax);
+ __ InvokeFunction(
+ edi, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- // It's always safe to call the entry hook stub, as the hook itself
- // is not allowed to call back to V8.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
ProfileEntryHookStub stub;
masm->CallStub(&stub);
}
@@ -7300,17 +5731,18 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ inc(edx);
__ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
if (FLAG_debug_code) {
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
__ Assert(equal, kExpectedAllocationSiteInCell);
}
- // Save the resulting elements kind in type info
- __ SmiTag(edx);
- __ mov(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset), edx);
- __ SmiUntag(edx);
+ // Save the resulting elements kind in type info. We can't just store r3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ add(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset),
+ Immediate(Smi::FromInt(kFastElementsKindPackedToHoley)));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -7343,12 +5775,12 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
(!FLAG_track_allocation_sites &&
(kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
}
}
}
@@ -7370,11 +5802,11 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ stubh1.GetCode(isolate);
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ stubh2.GetCode(isolate);
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ stubh3.GetCode(isolate);
}
}
@@ -7447,12 +5879,15 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
- __ cmp(FieldOperand(edx, 0), Immediate(Handle<Map>(
- masm->isolate()->heap()->allocation_site_map())));
+ __ cmp(FieldOperand(edx, 0), Immediate(
+ masm->isolate()->factory()->allocation_site_map()));
__ j(not_equal, &no_info);
+ // Only look at the lower 16 bits of the transition info.
__ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(edx);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ and_(edx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
diff --git a/chromium/v8/src/ia32/code-stubs-ia32.h b/chromium/v8/src/ia32/code-stubs-ia32.h
index 5c8eca37b5b..14259241c85 100644
--- a/chromium/v8/src/ia32/code-stubs-ia32.h
+++ b/chromium/v8/src/ia32/code-stubs-ia32.h
@@ -74,7 +74,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -217,30 +216,6 @@ class StringCompareStub: public PlatformCodeStub {
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
@@ -327,8 +302,6 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
@@ -468,7 +441,7 @@ class RecordWriteStub: public PlatformCodeStub {
// Save all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
+ masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
}
}
}
@@ -480,7 +453,7 @@ class RecordWriteStub: public PlatformCodeStub {
// Restore all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
+ masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
}
masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
diff --git a/chromium/v8/src/ia32/codegen-ia32.cc b/chromium/v8/src/ia32/codegen-ia32.cc
index 84a4d238bd4..ab4029da119 100644
--- a/chromium/v8/src/ia32/codegen-ia32.cc
+++ b/chromium/v8/src/ia32/codegen-ia32.cc
@@ -117,7 +117,7 @@ UnaryMathFunction CreateExpFunction() {
CpuFeatureScope use_sse2(&masm, SSE2);
XMMRegister input = xmm1;
XMMRegister result = xmm2;
- __ movdbl(input, Operand(esp, 1 * kPointerSize));
+ __ movsd(input, Operand(esp, 1 * kPointerSize));
__ push(eax);
__ push(ebx);
@@ -125,7 +125,7 @@ UnaryMathFunction CreateExpFunction() {
__ pop(ebx);
__ pop(eax);
- __ movdbl(Operand(esp, 1 * kPointerSize), result);
+ __ movsd(Operand(esp, 1 * kPointerSize), result);
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
}
@@ -155,9 +155,9 @@ UnaryMathFunction CreateSqrtFunction() {
// Move double input into registers.
{
CpuFeatureScope use_sse2(&masm, SSE2);
- __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
+ __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
__ sqrtsd(xmm0, xmm0);
- __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
+ __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
// Load result into floating point register as return value.
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
@@ -462,10 +462,10 @@ OS::MemMoveFunction CreateMemMoveFunction() {
Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
__ bind(&f9_16);
- __ movdbl(xmm0, Operand(src, 0));
- __ movdbl(xmm1, Operand(src, count, times_1, -8));
- __ movdbl(Operand(dst, 0), xmm0);
- __ movdbl(Operand(dst, count, times_1, -8), xmm1);
+ __ movsd(xmm0, Operand(src, 0));
+ __ movsd(xmm1, Operand(src, count, times_1, -8));
+ __ movsd(Operand(dst, 0), xmm0);
+ __ movsd(Operand(dst, count, times_1, -8), xmm1);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f17_32);
@@ -666,8 +666,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
- __ TestJSArrayForAllocationMemento(edx, edi);
- __ j(equal, allocation_memento_found);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
}
// Set transitioned map.
@@ -694,8 +693,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(edx, edi);
- __ j(equal, fail);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -743,7 +741,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
XMMRegister the_hole_nan = xmm1;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(the_hole_nan,
+ __ movsd(the_hole_nan,
Operand::StaticVariable(canonical_the_hole_nan_reference));
}
__ jmp(&entry);
@@ -768,8 +766,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ SmiUntag(ebx);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ __ Cvtsi2sd(xmm0, ebx);
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
xmm0);
} else {
__ push(ebx);
@@ -789,7 +787,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
the_hole_nan);
} else {
__ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
@@ -833,8 +831,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label loop, entry, convert_hole, gc_required, only_change_map, success;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(edx, edi);
- __ j(equal, fail);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -899,9 +896,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// edx: new heap number
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(masm, SSE2);
- __ movdbl(xmm0,
+ __ movsd(xmm0,
FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
__ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
@@ -1081,20 +1078,20 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
Label done;
- __ movdbl(double_scratch, ExpConstant(0));
+ __ movsd(double_scratch, ExpConstant(0));
__ xorpd(result, result);
__ ucomisd(double_scratch, input);
__ j(above_equal, &done);
__ ucomisd(input, ExpConstant(1));
- __ movdbl(result, ExpConstant(2));
+ __ movsd(result, ExpConstant(2));
__ j(above_equal, &done);
- __ movdbl(double_scratch, ExpConstant(3));
- __ movdbl(result, ExpConstant(4));
+ __ movsd(double_scratch, ExpConstant(3));
+ __ movsd(result, ExpConstant(4));
__ mulsd(double_scratch, input);
__ addsd(double_scratch, result);
__ movd(temp2, double_scratch);
__ subsd(double_scratch, result);
- __ movdbl(result, ExpConstant(6));
+ __ movsd(result, ExpConstant(6));
__ mulsd(double_scratch, ExpConstant(5));
__ subsd(double_scratch, input);
__ subsd(result, double_scratch);
@@ -1111,16 +1108,15 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ shl(temp1, 20);
__ movd(input, temp1);
__ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
- __ movdbl(double_scratch, Operand::StaticArray(
+ __ movsd(double_scratch, Operand::StaticArray(
temp2, times_8, ExternalReference::math_exp_log_table()));
- __ por(input, double_scratch);
+ __ orps(input, double_scratch);
__ mulsd(result, input);
__ bind(&done);
}
#undef __
-static const int kNoCodeAgeSequenceLength = 5;
static byte* GetNoCodeAgeSequence(uint32_t* length) {
static bool initialized = false;
@@ -1153,7 +1149,7 @@ bool Code::IsYoungSequence(byte* sequence) {
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
- *age = kNoAge;
+ *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
sequence++; // Skip the kCallOpcode byte
@@ -1165,16 +1161,17 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
+ if (age == kNoAgeCodeAge) {
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
diff --git a/chromium/v8/src/ia32/deoptimizer-ia32.cc b/chromium/v8/src/ia32/deoptimizer-ia32.cc
index 13a70afe521..5300dde9a21 100644
--- a/chromium/v8/src/ia32/deoptimizer-ia32.cc
+++ b/chromium/v8/src/ia32/deoptimizer-ia32.cc
@@ -177,87 +177,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x11;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-// The back edge bookkeeping code matches the pattern:
-//
-// sub <profiling_counter>, <delta>
-// jns ok
-// call <interrupt stub>
-// ok:
-//
-// The patched back edge looks like this:
-//
-// sub <profiling_counter>, <delta> ;; Not changed
-// nop
-// nop
-// call <on-stack replacment>
-// ok:
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- // Turn the jump into nops.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- // Replace the call address.
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- // Restore the original jump.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- // Restore the original call address.
- Assembler::set_target_address_at(call_target_address,
- interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT_EQ(osr_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT_EQ(interrupt_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -283,16 +202,14 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
intptr_t handler =
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
+ int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(eax.code(), params);
output_frame->SetRegister(ebx.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ if (!CpuFeatures::IsSupported(SSE2)) return;
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
@@ -314,6 +231,13 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ Builtins::Name name = CpuFeatures::IsSupported(SSE2) ?
+ Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure;
+ return isolate_->builtins()->builtin(name);
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
@@ -330,7 +254,7 @@ void Deoptimizer::EntryGenerator::Generate() {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
- __ movdbl(Operand(esp, offset), xmm_reg);
+ __ movsd(Operand(esp, offset), xmm_reg);
}
}
@@ -382,8 +306,8 @@ void Deoptimizer::EntryGenerator::Generate() {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize;
- __ movdbl(xmm0, Operand(esp, src_offset));
- __ movdbl(Operand(ebx, dst_offset), xmm0);
+ __ movsd(xmm0, Operand(esp, src_offset));
+ __ movsd(Operand(ebx, dst_offset), xmm0);
}
}
@@ -468,7 +392,7 @@ void Deoptimizer::EntryGenerator::Generate() {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
- __ movdbl(xmm_reg, Operand(ebx, src_offset));
+ __ movsd(xmm_reg, Operand(ebx, src_offset));
}
}
diff --git a/chromium/v8/src/ia32/disasm-ia32.cc b/chromium/v8/src/ia32/disasm-ia32.cc
index 01fa9996456..057a558e28f 100644
--- a/chromium/v8/src/ia32/disasm-ia32.cc
+++ b/chromium/v8/src/ia32/disasm-ia32.cc
@@ -942,13 +942,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case SHORT_IMMEDIATE_INSTR: {
byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("%s eax, %s", idesc.mnem, NameOfAddress(addr));
+ AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
case BYTE_IMMEDIATE_INSTR: {
- AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
+ AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]);
data += 2;
break;
}
@@ -1042,14 +1042,30 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
- } else if (f0byte == 0x57) {
+ } else if (f0byte >= 0x53 && f0byte <= 0x5F) {
+ const char* const pseudo_op[] = {
+ "rcpps",
+ "andps",
+ "andnps",
+ "orps",
+ "xorps",
+ "addps",
+ "mulps",
+ "cvtps2pd",
+ "cvtdq2ps",
+ "subps",
+ "minps",
+ "divps",
+ "maxps",
+ };
+
data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xorps %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
+ AppendToBuffer("%s %s,",
+ pseudo_op[f0byte - 0x53],
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (f0byte == 0x50) {
data += 2;
int mod, regop, rm;
@@ -1058,6 +1074,17 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte== 0xC6) {
+ // shufps xmm, xmm/m128, imm8
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("shufps %s,%s,%d",
+ NameOfXMMRegister(rm),
+ NameOfXMMRegister(regop),
+ static_cast<int>(imm8));
+ data += 2;
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
@@ -1189,6 +1216,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("mov_w ");
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else if (*data == 0xC7) {
+ data++;
+ AppendToBuffer("%s ", "mov_w");
+ data += PrintRightOperand(data);
+ int imm = *reinterpret_cast<int16_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 2;
} else if (*data == 0x0F) {
data++;
if (*data == 0x38) {
@@ -1239,8 +1273,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("extractps %s,%s,%d",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm),
+ NameOfCPURegister(rm),
+ NameOfXMMRegister(regop),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x22) {
diff --git a/chromium/v8/src/ia32/full-codegen-ia32.cc b/chromium/v8/src/ia32/full-codegen-ia32.cc
index 6d39cc1e6e5..3c5d4aa2788 100644
--- a/chromium/v8/src/ia32/full-codegen-ia32.cc
+++ b/chromium/v8/src/ia32/full-codegen-ia32.cc
@@ -158,10 +158,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
+ __ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -1117,7 +1114,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
__ j(above, &non_proxy);
- __ mov(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
+ __ Set(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
__ bind(&non_proxy);
__ push(ebx); // Smi
__ push(eax); // Array
@@ -1577,6 +1574,8 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
@@ -1586,21 +1585,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -1711,6 +1704,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
@@ -1722,6 +1720,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
@@ -1734,35 +1740,27 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
- } else if (expr->depth() > 1) {
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_elements));
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ __ push(Immediate(Smi::FromInt(flags)));
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
if (has_constant_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -2247,7 +2245,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2332,7 +2330,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(edx);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
@@ -3062,6 +3060,32 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(eax, map, if_false, DO_SMI_CHECK);
+ __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x80000000));
+ __ j(not_equal, if_false);
+ __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x00000000));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -3282,57 +3306,6 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(edi, eax);
-
- __ bind(&heapnumber_allocated);
-
- __ PrepareCallCFunction(1, ebx);
- __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- // This is implemented on both SSE2 and FPU.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(masm(), SSE2);
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, ebx);
- __ movd(xmm0, eax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorps(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
- } else {
- // 0x4130000000000000 is 1.0 x 2^20 as a double.
- __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
- Immediate(0x41300000));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ fsubp(1);
- __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
- }
- __ mov(eax, edi);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
@@ -3424,32 +3397,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask) {
- __ test(index, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiIndex);
- __ test(value, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiValue);
-
- __ cmp(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, kIndexIsTooLarge);
-
- __ cmp(index, Immediate(Smi::FromInt(0)));
- __ Check(greater_equal, kIndexIsNegative);
-
- __ push(value);
- __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- __ cmp(value, Immediate(encoding_mask));
- __ Check(equal, kUnexpectedStringType);
- __ pop(value);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
@@ -3460,18 +3407,26 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(value);
- __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
+ __ pop(value);
+ __ pop(index);
if (FLAG_debug_code) {
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ test(value, Immediate(kSmiTagMask));
+ __ ThrowIf(not_zero, kNonSmiValue);
+ __ test(index, Immediate(kSmiTagMask));
+ __ ThrowIf(not_zero, kNonSmiValue);
}
__ SmiUntag(value);
__ SmiUntag(index);
+
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
__ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
value);
context()->Plug(string);
@@ -3488,13 +3443,19 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
__ pop(value);
__ pop(index);
- VisitForAccumulatorValue(args->at(0)); // string
if (FLAG_debug_code) {
+ __ test(value, Immediate(kSmiTagMask));
+ __ ThrowIf(not_zero, kNonSmiValue);
+ __ test(index, Immediate(kSmiTagMask));
+ __ ThrowIf(not_zero, kNonSmiValue);
+ __ SmiUntag(index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ SmiTag(index);
}
__ SmiUntag(value);
@@ -3555,8 +3516,8 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into eax and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@@ -3681,11 +3642,20 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
+ if (FLAG_new_string_add) {
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
- __ CallStub(&stub);
+ __ pop(edx);
+ NewStringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+ } else {
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
+ __ CallStub(&stub);
+ }
context()->Plug(eax);
}
@@ -3703,42 +3673,6 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -4408,14 +4342,50 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
+ // Inline smi case if we are in a loop.
+ Label done, stub_call;
+ JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(expr->op())) {
- __ JumpIfSmi(eax, &no_conversion, Label::kNear);
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(eax, &slow, Label::kNear);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(eax);
+ break;
+ case NAMED_PROPERTY:
+ __ mov(Operand(esp, kPointerSize), eax);
+ break;
+ case KEYED_PROPERTY:
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ break;
+ }
+ }
+ }
+
+ if (expr->op() == Token::INC) {
+ __ add(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ }
+ __ j(no_overflow, &done, Label::kNear);
+ // Call stub. Undo operation first.
+ if (expr->op() == Token::INC) {
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(eax, Immediate(Smi::FromInt(1)));
+ }
+ __ jmp(&stub_call, Label::kNear);
+ __ bind(&slow);
}
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4437,37 +4407,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // Inline smi case if we are in a loop.
- Label done, stub_call;
- JumpPatchSite patch_site(masm_);
-
- if (ShouldInlineSmiCase(expr->op())) {
- if (expr->op() == Token::INC) {
- __ add(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- }
- __ j(overflow, &stub_call, Label::kNear);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(eax, &done, Label::kNear);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ add(eax, Immediate(Smi::FromInt(1)));
- }
- }
-
// Record position before stub call.
SetSourcePosition(expr->position());
// Call stub for +1/-1.
+ __ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+ BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
CallIC(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
expr->CountBinOpFeedbackId());
@@ -4897,6 +4844,79 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x11;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ Address jns_offset_address = call_target_address - 2;
+
+ switch (target_state) {
+ case INTERRUPT:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // jns ok
+ // call <interrupt stub>
+ // ok:
+ *jns_instr_address = kJnsInstruction;
+ *jns_offset_address = kJnsOffset;
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // ok:
+ *jns_instr_address = kNopByteOne;
+ *jns_offset_address = kNopByteTwo;
+ break;
+ }
+
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+
+ if (*jns_instr_address == kJnsInstruction) {
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return INTERRUPT;
+ }
+
+ ASSERT_EQ(kNopByteOne, *jns_instr_address);
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+
+ if (Assembler::target_address_at(call_target_address) ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/chromium/v8/src/ia32/ic-ia32.cc b/chromium/v8/src/ia32/ic-ia32.cc
index 327ac57623e..2973beb3e46 100644
--- a/chromium/v8/src/ia32/ic-ia32.cc
+++ b/chromium/v8/src/ia32/ic-ia32.cc
@@ -611,7 +611,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -653,7 +653,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ TailCallExternalReference(ref, 2, 1);
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -678,7 +678,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(eax, unmapped_location);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -707,7 +707,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -733,6 +733,19 @@ static void KeyedStoreGenerateGenericHelper(
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
__ j(not_equal, fast_double);
}
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ cmp(CodeGenerator::FixedArrayElementOperand(ebx, ecx),
+ masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, &holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+
+ __ bind(&holecheck_passed1);
+
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(eax, &non_smi_value);
@@ -773,6 +786,16 @@ static void KeyedStoreGenerateGenericHelper(
// If the value is a number, store it as a double in the FastDoubleElements
// array.
}
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(ebx, ecx, times_4, offset), Immediate(kHoleNanUpper32));
+ __ j(not_equal, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(eax, ebx, ecx, edi, xmm0,
&transition_double_elements, false);
@@ -851,10 +874,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ JumpIfSmi(edx, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
+ 1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(ecx, &slow);
@@ -929,7 +952,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
@@ -1038,7 +1061,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
void CallICBase::GenerateMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1109,7 +1132,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1226,7 +1249,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&lookup_monomorphic_cache);
__ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
+ kNoExtraICState);
// Fall through on miss.
__ bind(&slow_call);
@@ -1304,7 +1327,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::HANDLER, MONOMORPHIC, kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, eax);
@@ -1373,7 +1396,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
@@ -1388,10 +1411,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ push(ebx); // return address
// Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
}
@@ -1414,16 +1435,15 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
-
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, strict_mode,
+ Code::HANDLER, MONOMORPHIC, extra_ic_state,
Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, no_reg);
@@ -1528,7 +1548,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -1543,10 +1563,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ push(ebx);
// Do tail-call to runtime routine.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
diff --git a/chromium/v8/src/ia32/lithium-codegen-ia32.cc b/chromium/v8/src/ia32/lithium-codegen-ia32.cc
index 025740d4575..df2d4c5294d 100644
--- a/chromium/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-codegen-ia32.cc
@@ -120,24 +120,6 @@ void LCodeGen::Abort(BailoutReason reason) {
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
#ifdef _MSC_VER
void LCodeGen::MakeSureStackPagesMapped(int offset) {
const int kPageSize = 4 * KB;
@@ -148,6 +130,40 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) {
#endif
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ CpuFeatureScope scope(masm(), SSE2);
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ movsd(MemOperand(esp, count * kDoubleSize),
+ XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ CpuFeatureScope scope(masm(), SSE2);
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(esp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
@@ -178,7 +194,7 @@ bool LCodeGen::GeneratePrologue() {
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
- __ mov(edx, Immediate(kNoAlignmentPadding));
+ __ Set(edx, Immediate(kNoAlignmentPadding));
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
@@ -206,15 +222,8 @@ bool LCodeGen::GeneratePrologue() {
if (NeedsEagerFrame()) {
ASSERT(!frame_is_built_);
frame_is_built_ = true;
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
info()->AddNoFrameRange(0, masm_->pc_offset());
- __ push(esi); // Callee's context.
- if (info()->IsStub()) {
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- } else {
- __ push(edi); // Callee's JS function.
- }
}
if (info()->IsOptimizing() &&
@@ -269,17 +278,7 @@ bool LCodeGen::GeneratePrologue() {
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- Comment(";;; Save clobbered callee double registers");
- CpuFeatureScope scope(masm(), SSE2);
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ movdbl(MemOperand(esp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
- save_iterator.Advance();
- count++;
- }
+ SaveCallerDoubles();
}
}
@@ -340,12 +339,41 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
+ // Move state of dynamic frame alignment into edx.
+ __ Set(edx, Immediate(kNoAlignmentPadding));
+
+ if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+ Label do_not_pad, align_loop;
+ // Align ebp + 4 to a multiple of 2 * kPointerSize.
+ __ test(ebp, Immediate(kPointerSize));
+ __ j(zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+
+ // Move all parts of the frame over one word. The frame consists of:
+ // unoptimized frame slots, alignment state, context, frame pointer, return
+ // address, receiver, and the arguments.
+ __ mov(ecx, Immediate(scope()->num_parameters() +
+ 5 + graph()->osr()->UnoptimizedFrameSlots()));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ sub(Operand(ebp), Immediate(kPointerSize));
+ __ bind(&do_not_pad);
+ }
+
// Save the first local, which is overwritten by the alignment state.
Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
__ push(alignment_loc);
- // Set the dynamic frame alignment state to "not aligned".
- __ mov(alignment_loc, Immediate(kNoAlignmentPadding));
+ // Set the dynamic frame alignment state.
+ __ mov(alignment_loc, edx);
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
@@ -355,44 +383,27 @@ void LCodeGen::GenerateOsrPrologue() {
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
-
- // Don't emit code for basic blocks with a replacement.
- if (instr->IsLabel()) {
- emit_instructions = !LLabel::cast(instr)->HasReplacement();
- }
- if (!emit_instructions) continue;
-
- if (FLAG_code_comments && instr->HasInterestingComment(this)) {
- Comment(";;; <@%d,#%d> %s",
- current_instruction_,
- instr->hydrogen_value()->id(),
- instr->Mnemonic());
- }
-
- if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
-
- RecordAndUpdatePosition(instr->position());
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
+}
- instr->CompileToNative(this);
- if (!CpuFeatures::IsSupported(SSE2)) {
- if (instr->IsGoto()) {
- x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
- } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
- !instr->IsGap() && !instr->IsReturn()) {
- __ VerifyX87StackDepth(x87_stack_.depth());
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ if (instr->IsGoto()) {
+ x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
+ } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
+ !instr->IsGap() && !instr->IsReturn()) {
+ if (instr->ClobbersDoubleRegisters()) {
+ if (instr->HasDoubleRegisterResult()) {
+ ASSERT_EQ(1, x87_stack_.depth());
+ } else {
+ ASSERT_EQ(0, x87_stack_.depth());
+ }
}
+ __ VerifyX87StackDepth(x87_stack_.depth());
}
}
- EnsureSpaceForLazyDeopt();
- return !is_aborted();
}
@@ -412,6 +423,7 @@ bool LCodeGen::GenerateJumpTable() {
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
}
if (jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
if (needs_frame.is_bound()) {
__ jmp(&needs_frame);
@@ -438,6 +450,9 @@ bool LCodeGen::GenerateJumpTable() {
__ ret(0); // Call the continuation without clobbering registers.
}
} else {
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
+ RestoreCallerDoubles();
+ }
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
}
@@ -453,8 +468,9 @@ bool LCodeGen::GenerateDeferredCode() {
X87Stack copy(code->x87_stack());
x87_stack_ = copy;
- int pos = instructions_->at(code->instruction_index())->position();
- RecordAndUpdatePosition(pos);
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -532,6 +548,16 @@ void LCodeGen::X87LoadForUsage(X87Register reg) {
}
+void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
+ ASSERT(x87_stack_.Contains(reg1));
+ ASSERT(x87_stack_.Contains(reg2));
+ x87_stack_.Fxch(reg1, 1);
+ x87_stack_.Fxch(reg2);
+ x87_stack_.pop();
+ x87_stack_.pop();
+}
+
+
void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
ASSERT(is_mutable_);
ASSERT(Contains(reg) && stack_depth_ > other_slot);
@@ -783,17 +809,36 @@ bool LCodeGen::IsSmi(LConstantOperand* op) const {
}
+static int ArgumentsOffsetWithoutFrame(int index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize + kPCOnStackSize;
+}
+
+
Operand LCodeGen::ToOperand(LOperand* op) const {
if (op->IsRegister()) return Operand(ToRegister(op));
if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return Operand(ebp, StackSlotOffset(op->index()));
+ if (NeedsEagerFrame()) {
+ return Operand(ebp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
}
Operand LCodeGen::HighOperand(LOperand* op) {
ASSERT(op->IsDoubleStackSlot());
- return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+ if (NeedsEagerFrame()) {
+ return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return Operand(
+ esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ }
}
@@ -931,8 +976,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
@@ -954,13 +997,12 @@ void LCodeGen::CallCode(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ CallRuntime(fun, argc);
+ __ CallRuntime(fun, argc, save_doubles);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
@@ -1048,7 +1090,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return;
}
- if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ if (DeoptEveryNTimes()) {
ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt;
__ pushfd();
@@ -1122,26 +1164,31 @@ void LCodeGen::DeoptimizeIf(Condition cc,
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
@@ -1246,7 +1293,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, mode);
}
@@ -1258,17 +1305,10 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::RecordAndUpdatePosition(int position) {
- if (position >= 0 && position != old_position_) {
- masm()->positions_recorder()->RecordPosition(position);
- old_position_ = position;
- }
+ masm()->positions_recorder()->WriteRecordedPositions();
}
@@ -1336,11 +1376,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1392,36 +1427,6 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&left_is_not_negative);
__ and_(left_reg, divisor - 1);
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
- Register right_reg = ToRegister(instr->right());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- __ cmp(right_reg, Immediate(divisor));
- DeoptimizeIf(not_equal, instr->environment());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ neg(left_reg);
- __ and_(left_reg, divisor - 1);
- __ neg(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&left_is_not_negative);
- __ and_(left_reg, divisor - 1);
- __ bind(&done);
-
} else {
Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(eax));
@@ -1733,9 +1738,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
case 9:
__ lea(left, Operand(left, left, times_8, 0));
break;
- case 16:
- __ shl(left, 4);
- break;
+ case 16:
+ __ shl(left, 4);
+ break;
default:
__ imul(left, left, constant);
break;
@@ -1967,9 +1972,10 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
__ movd(res, Operand(temp));
__ psllq(res, 32);
if (lower != 0) {
+ XMMRegister xmm_scratch = double_scratch0();
__ Set(temp, Immediate(lower));
- __ movd(xmm0, Operand(temp));
- __ por(res, xmm0);
+ __ movd(xmm_scratch, Operand(temp));
+ __ orps(res, xmm_scratch);
}
}
}
@@ -2058,7 +2064,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
__ j(not_equal, &runtime, Label::kNear);
__ mov(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch);
@@ -2070,32 +2076,87 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+Operand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToRepresentation(LConstantOperand::cast(index),
+ Representation::Integer32());
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldOperand(string, SeqString::kHeaderSize + offset);
+ }
+ return FieldOperand(
+ string, ToRegister(index),
+ encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
+ SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register result = ToRegister(instr->result());
Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register value = ToRegister(instr->value());
- String::Encoding encoding = instr->encoding();
if (FLAG_debug_code) {
- __ push(value);
- __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+ __ push(string);
+ __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
- __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
+ __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
- __ pop(value);
+ __ pop(string);
}
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (encoding == String::ONE_BYTE_ENCODING) {
- __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
+ __ movzx_b(result, operand);
+ } else {
+ __ movzx_w(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+
+ if (FLAG_debug_code) {
+ Register value = ToRegister(instr->value());
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (instr->value()->IsConstantOperand()) {
+ int value = ToRepresentation(LConstantOperand::cast(instr->value()),
+ Representation::Integer32());
+ ASSERT_LE(0, value);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ ASSERT_LE(value, String::kMaxOneByteCharCode);
+ __ mov_b(operand, static_cast<int8_t>(value));
+ } else {
+ ASSERT_LE(value, String::kMaxUtf16CodeUnit);
+ __ mov_w(operand, static_cast<int16_t>(value));
+ }
} else {
- __ mov_w(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
- value);
+ Register value = ToRegister(instr->value());
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov_w(operand, value);
+ }
}
}
@@ -2178,7 +2239,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
@@ -2195,7 +2256,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ ucomisd(left_reg, left_reg); // NaN check.
__ j(parity_even, &return_left, Label::kNear); // left == NaN.
__ bind(&return_right);
- __ movsd(left_reg, right_reg);
+ __ movaps(left_reg, right_reg);
__ bind(&return_left);
}
@@ -2208,8 +2269,6 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
- // Modulo uses a fixed result register.
- ASSERT(instr->op() == Token::MOD || left.is(result));
switch (instr->op()) {
case Token::ADD:
__ addsd(left, right);
@@ -2229,17 +2288,17 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
case Token::MOD: {
// Pass two doubles as arguments on the stack.
__ PrepareCallCFunction(4, eax);
- __ movdbl(Operand(esp, 0 * kDoubleSize), left);
- __ movdbl(Operand(esp, 1 * kDoubleSize), right);
+ __ movsd(Operand(esp, 0 * kDoubleSize), left);
+ __ movsd(Operand(esp, 1 * kDoubleSize), right);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
4);
// Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
+ // Store it into the result register.
__ sub(Operand(esp), Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movdbl(result, Operand(esp, 0));
+ __ movsd(result, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
break;
}
@@ -2272,6 +2331,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ PrepareCallCFunction(4, eax);
X87Mov(Operand(esp, 1 * kDoubleSize), right);
X87Mov(Operand(esp, 0), left);
+ X87Free(right);
+ ASSERT(left.is(result));
X87PrepareToWrite(result);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
@@ -2295,20 +2356,12 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
-int LCodeGen::GetNextEmittedBlock() const {
- for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
- if (!chunk_->GetLabel(i)->HasReplacement()) return i;
- }
- return -1;
-}
-
-
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
int left_block = instr->TrueDestination(chunk_);
@@ -2340,25 +2393,6 @@ void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, no_condition);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, no_condition);
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- EmitBranch(instr, equal);
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32()) {
@@ -2369,8 +2403,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
@@ -2390,8 +2425,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (type.IsHeapNumber()) {
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
@@ -2476,8 +2512,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(not_equal, &not_heap_number, Label::kNear);
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
} else {
__ fldz();
__ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
@@ -2521,6 +2558,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = equal;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = not_equal;
+ break;
case Token::LT:
cond = is_unsigned ? below : less;
break;
@@ -2556,10 +2597,15 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
- CpuFeatureScope scope(masm(), SSE2);
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ } else {
+ X87LoadForUsage(ToX87Register(right), ToX87Register(left));
+ __ FCmp();
+ }
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, instr->FalseLabel(chunk_));
} else {
if (right->IsConstantOperand()) {
@@ -2615,7 +2661,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
__ fld(0);
__ FCmp();
Label ok;
- __ j(parity_even, &ok);
+ __ j(parity_even, &ok, Label::kNear);
__ fstp(0);
EmitFalseBranch(instr, no_condition);
__ bind(&ok);
@@ -2626,7 +2672,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ movdbl(MemOperand(esp, 0), input_reg);
+ __ movsd(MemOperand(esp, 0), input_reg);
} else {
__ fstp_d(MemOperand(esp, 0));
}
@@ -2638,6 +2684,35 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ CpuFeatureScope use_sse2(masm(), SSE2);
+ XMMRegister value = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, value);
+ EmitFalseBranch(instr, not_equal);
+ __ movmskpd(scratch, value);
+ __ test(scratch, Immediate(1));
+ EmitBranch(instr, not_zero);
+ } else {
+ Register value = ToRegister(instr->value());
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
+ Immediate(0x80000000));
+ EmitFalseBranch(instr, not_equal);
+ __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
+ Immediate(0x00000000));
+ EmitBranch(instr, equal);
+ }
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -2939,7 +3014,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register temp = ToRegister(instr->temp());
// A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result);
+ __ JumpIfSmi(object, &false_result, Label::kNear);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
@@ -2952,18 +3027,18 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
__ j(not_equal, &cache_miss, Label::kNear);
__ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// The inlined call site cache did not match. Check for null and string
// before calling the deferred code.
__ bind(&cache_miss);
// Null is not an instance of anything.
__ cmp(object, factory()->null_value());
- __ j(equal, &false_result);
+ __ j(equal, &false_result, Label::kNear);
// String values are not instances of anything.
Condition is_string = masm_->IsObjectStringType(object, temp, temp);
- __ j(is_string, &false_result);
+ __ j(is_string, &false_result, Label::kNear);
// Go to the deferred code.
__ jmp(deferred->entry());
@@ -3016,14 +3091,6 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceSizeOffset));
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
@@ -3090,17 +3157,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- ASSERT(NeedsEagerFrame());
- CpuFeatureScope scope(masm(), SSE2);
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(esp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ RestoreCallerDoubles();
}
if (dynamic_frame_alignment_) {
// Fetch the state of the dynamic frame alignment.
@@ -3116,7 +3173,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
if (dynamic_frame_alignment_) {
Label no_padding;
__ cmp(edx, Immediate(kNoAlignmentPadding));
- __ j(equal, &no_padding);
+ __ j(equal, &no_padding, Label::kNear);
EmitReturn(instr, true);
__ bind(&no_padding);
@@ -3131,7 +3188,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ mov(result, Operand::ForCell(instr->hydrogen()->cell()));
+ __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
@@ -3154,7 +3211,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
- Handle<PropertyCell> cell_handle = instr->hydrogen()->cell();
+ Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -3245,13 +3302,11 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (access.IsExternalMemory()) {
Register result = ToRegister(instr->result());
- if (instr->object()->IsConstantOperand()) {
- ExternalReference external_reference = ToExternalReference(
- LConstantOperand::cast(instr->object()));
- __ mov(result, MemOperand::StaticVariable(external_reference));
- } else {
- __ mov(result, MemOperand(ToRegister(instr->object()), offset));
- }
+ MemOperand operand = instr->object()->IsConstantOperand()
+ ? MemOperand::StaticVariable(ToExternalReference(
+ LConstantOperand::cast(instr->object())))
+ : MemOperand(ToRegister(instr->object()), offset);
+ __ Load(result, operand, access.representation());
return;
}
@@ -3261,7 +3316,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
- __ movdbl(result, FieldOperand(object, offset));
+ __ movsd(result, FieldOperand(object, offset));
} else {
X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
}
@@ -3269,12 +3324,11 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (access.IsInobject()) {
- __ mov(result, FieldOperand(object, offset));
- } else {
+ if (!access.IsInobject()) {
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, offset));
+ object = result;
}
+ __ Load(result, FieldOperand(object, offset), access.representation());
}
@@ -3349,6 +3403,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@@ -3405,7 +3465,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ movdbl(ToDoubleRegister(instr->result()), operand);
+ __ movsd(ToDoubleRegister(instr->result()), operand);
} else {
X87Mov(ToX87Register(instr->result()), operand);
}
@@ -3476,7 +3536,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
- __ movdbl(result, double_load_operand);
+ __ movsd(result, double_load_operand);
} else {
X87Mov(ToX87Register(instr->result()), double_load_operand);
}
@@ -3621,6 +3681,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok;
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
// Do not transform the receiver to object for strict mode
// functions.
@@ -3628,12 +3689,12 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &receiver_ok); // A near jump is not sufficient here!
+ __ j(not_equal, &receiver_ok, dist);
// Do not transform the receiver to object for builtins.
__ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &receiver_ok);
+ __ j(not_equal, &receiver_ok, dist);
// Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
@@ -3693,7 +3754,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
@@ -3778,9 +3838,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
if (can_invoke_directly) {
if (edi_state == EDI_UNINITIALIZED) {
__ LoadHeapObject(edi, function);
@@ -3805,6 +3862,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
} else {
// We need to adapt arguments.
+ LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
@@ -3845,7 +3903,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// |result| are the same register and |input| will be restored
// unchanged by popping safepoint registers.
__ test(tmp, Immediate(HeapNumber::kSignMask));
- __ j(zero, &done);
+ __ j(zero, &done, Label::kNear);
__ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
__ jmp(&allocated, Label::kNear);
@@ -3903,11 +3961,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
CpuFeatureScope scope(masm(), SSE2);
if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
+ XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
- __ pand(input_reg, scratch);
+ __ andps(input_reg, scratch);
} else if (r.IsSmiOrInteger32()) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
@@ -3924,7 +3982,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathFloor(LMathFloor* instr) {
CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3977,7 +4035,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ bind(&negative_sign);
// Truncate, then compare and compensate.
__ cvttsd2si(output_reg, Operand(input_reg));
- __ cvtsi2sd(xmm_scratch, output_reg);
+ __ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
@@ -3992,16 +4050,18 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
CpuFeatureScope scope(masm(), SSE2);
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_temp = ToDoubleRegister(instr->temp());
ExternalReference one_half = ExternalReference::address_of_one_half();
ExternalReference minus_one_half =
ExternalReference::address_of_minus_one_half();
Label done, round_to_zero, below_one_half, do_not_compensate;
- __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+
+ __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
__ ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_one_half);
+ __ j(above, &below_one_half, Label::kNear);
// CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
__ addsd(xmm_scratch, input_reg);
@@ -4010,16 +4070,16 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cmp(output_reg, 0x80000000u);
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ jmp(&done);
+ __ jmp(&done, dist);
__ bind(&below_one_half);
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
+ __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
__ ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &round_to_zero);
+ __ j(below_equal, &round_to_zero, Label::kNear);
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
// compare and compensate.
- __ movsd(input_temp, input_reg); // Do not alter input_reg.
+ __ movaps(input_temp, input_reg); // Do not alter input_reg.
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
@@ -4027,12 +4087,12 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ cvtsi2sd(xmm_scratch, output_reg);
+ __ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
- __ j(equal, &done);
+ __ j(equal, &done, dist);
__ sub(output_reg, Immediate(1));
// No overflow because we already ruled out minint.
- __ jmp(&done);
+ __ jmp(&done, dist);
__ bind(&round_to_zero);
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
@@ -4059,7 +4119,7 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
@@ -4125,97 +4185,33 @@ void LCodeGen::DoPower(LPower* instr) {
}
-void LCodeGen::DoRandom(LRandom* instr) {
- CpuFeatureScope scope(masm(), SSE2);
-
- // Assert that the register size is indeed the size of each seed.
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- // Load native context
- Register global_object = ToRegister(instr->global_object());
- Register native_context = global_object;
- __ mov(native_context, FieldOperand(
- global_object, GlobalObject::kNativeContextOffset));
-
- // Load state (FixedArray of the native context's random seeds)
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- Register state = native_context;
- __ mov(state, FieldOperand(native_context, kRandomSeedOffset));
-
- // Load state[0].
- Register state0 = ToRegister(instr->scratch());
- __ mov(state0, FieldOperand(state, ByteArray::kHeaderSize));
- // Load state[1].
- Register state1 = ToRegister(instr->scratch2());
- __ mov(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- Register scratch3 = ToRegister(instr->scratch3());
- __ movzx_w(scratch3, state0);
- __ imul(scratch3, scratch3, 18273);
- __ shr(state0, 16);
- __ add(state0, scratch3);
- // Save state[0].
- __ mov(FieldOperand(state, ByteArray::kHeaderSize), state0);
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzx_w(scratch3, state1);
- __ imul(scratch3, scratch3, 36969);
- __ shr(state1, 16);
- __ add(state1, scratch3);
- // Save state[1].
- __ mov(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- Register random = state0;
- __ shl(random, 14);
- __ and_(state1, Immediate(0x3FFFF));
- __ add(random, state1);
-
- // Convert 32 random bits in random to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- XMMRegister result = ToDoubleRegister(instr->result());
- // We use xmm0 as fixed scratch register here.
- XMMRegister scratch4 = xmm0;
- __ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(scratch4, scratch3);
- __ movd(result, random);
- __ cvtss2sd(scratch4, scratch4);
- __ xorps(result, scratch4);
- __ subsd(result, scratch4);
-}
-
-
void LCodeGen::DoMathLog(LMathLog* instr) {
CpuFeatureScope scope(masm(), SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
Label positive, done, zero;
- __ xorps(xmm0, xmm0);
- __ ucomisd(input_reg, xmm0);
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
__ j(equal, &zero, Label::kNear);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(input_reg, Operand::StaticVariable(nan));
+ __ movsd(input_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
__ bind(&zero);
- __ push(Immediate(0xFFF00000));
- __ push(Immediate(0));
- __ movdbl(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+ ExternalReference ninf =
+ ExternalReference::address_of_negative_infinity();
+ __ movsd(input_reg, Operand::StaticVariable(ninf));
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
__ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
+ __ movsd(Operand(esp, 0), input_reg);
__ fld_d(Operand(esp, 0));
__ fyl2x();
__ fstp_d(Operand(esp, 0));
- __ movdbl(input_reg, Operand(esp, 0));
+ __ movsd(input_reg, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
}
@@ -4225,10 +4221,11 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister temp0 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+ MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
@@ -4273,7 +4270,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
@@ -4321,7 +4317,12 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ leave();
+ __ jmp(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ } else {
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4388,13 +4389,13 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
// look at the first argument
__ mov(ecx, Operand(esp, 0));
__ test(ecx, ecx);
- __ j(zero, &packed_case);
+ __ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
@@ -4409,7 +4410,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
+ ASSERT(ToRegister(instr->context()).is(esi));
+ CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
}
@@ -4424,7 +4426,13 @@ void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
- __ lea(result, Operand(base, instr->offset()));
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ lea(result, Operand(base, ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ lea(result, Operand(base, offset, times_1, 0));
+ }
}
@@ -4445,7 +4453,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ mov(operand, Immediate(ToInteger32(operand_value)));
} else {
Register value = ToRegister(instr->value());
- __ mov(operand, value);
+ __ Store(value, operand, representation);
}
return;
}
@@ -4480,7 +4488,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
- __ movdbl(FieldOperand(object, offset), value);
+ __ movsd(FieldOperand(object, offset), value);
} else {
X87Register value = ToX87Register(instr->value());
X87Mov(FieldOperand(object, offset), value);
@@ -4518,17 +4526,24 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
}
+ MemOperand operand = FieldOperand(write_register, offset);
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (operand_value->IsRegister()) {
- __ mov(FieldOperand(write_register, offset), ToRegister(operand_value));
+ Register value = ToRegister(operand_value);
+ __ Store(value, operand, representation);
+ } else if (representation.IsInteger32()) {
+ Immediate immediate = ToImmediate(operand_value, representation);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ __ mov(operand, immediate);
} else {
Handle<Object> handle_value = ToHandle(operand_value);
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- __ mov(FieldOperand(write_register, offset), handle_value);
+ __ mov(operand, handle_value);
}
} else {
- __ mov(FieldOperand(write_register, offset), ToRegister(instr->value()));
+ Register value = ToRegister(instr->value());
+ __ Store(value, operand, representation);
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -4609,8 +4624,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
- __ movss(operand, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
+ __ movss(operand, xmm_scratch);
} else {
__ fld(0);
__ fstp_s(operand);
@@ -4618,7 +4634,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ movdbl(operand, ToDoubleRegister(instr->value()));
+ __ movsd(operand, ToDoubleRegister(instr->value()));
} else {
X87Mov(operand, ToX87Register(instr->value()));
}
@@ -4674,13 +4690,13 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Label have_value;
__ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
+ __ j(parity_odd, &have_value, Label::kNear); // NaN.
- __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+ __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
__ bind(&have_value);
}
- __ movdbl(double_store_operand, value);
+ __ movsd(double_store_operand, value);
} else {
// Can't use SSE2 in the serializer
if (instr->hydrogen()->IsConstantHoleStore()) {
@@ -4710,15 +4726,15 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ fld(0);
__ FCmp();
- __ j(parity_odd, &no_special_nan_handling);
+ __ j(parity_odd, &no_special_nan_handling, Label::kNear);
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(MemOperand(esp, 0));
__ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
Immediate(kHoleNanUpper32));
__ add(esp, Immediate(kDoubleSize));
Label canonicalize;
- __ j(not_equal, &canonicalize);
- __ jmp(&no_special_nan_handling);
+ __ j(not_equal, &canonicalize, Label::kNear);
+ __ jmp(&no_special_nan_handling, Label::kNear);
__ bind(&canonicalize);
__ fstp(0);
__ fld_d(Operand::StaticVariable(canonical_nan_reference));
@@ -4803,8 +4819,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationMemento(object, temp);
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(equal, instr->environment());
+ __ bind(&no_memento_found);
}
@@ -4825,22 +4843,18 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ j(not_equal, &not_applicable, branch_distance);
if (is_simple_map_transition) {
Register new_map_reg = ToRegister(instr->new_map_temp());
- Handle<Map> map = instr->hydrogen()->transitioned_map();
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
- Immediate(map));
+ Immediate(to_map));
// Write barrier.
ASSERT_NE(instr->temp(), NULL);
__ RecordWriteForMap(object_reg, to_map, new_map_reg,
ToRegister(instr->temp()),
kDontSaveFPRegs);
} else {
+ ASSERT(ToRegister(instr->context()).is(esi));
PushSafepointRegistersScope scope(this);
if (!object_reg.is(eax)) {
- __ push(object_reg);
- }
- LoadContextFromDeferred(instr->context());
- if (!object_reg.is(eax)) {
- __ pop(eax);
+ __ mov(eax, object_reg);
}
__ mov(ebx, to_map);
TransitionElementsKindStub stub(from_kind, to_kind);
@@ -4964,10 +4978,19 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- EmitPushTaggedOperand(instr->left());
- EmitPushTaggedOperand(instr->right());
- StringAddStub stub(instr->hydrogen()->flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ ASSERT(ToRegister(instr->context()).is(esi));
+ if (FLAG_new_string_add) {
+ ASSERT(ToRegister(instr->left()).is(edx));
+ ASSERT(ToRegister(instr->right()).is(eax));
+ NewStringAddStub stub(instr->hydrogen()->flags(),
+ isolate()->heap()->GetPretenureMode());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ } else {
+ EmitPushTaggedOperand(instr->left());
+ EmitPushTaggedOperand(instr->right());
+ StringAddStub stub(instr->hydrogen()->flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4978,7 +5001,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
ASSERT(output->IsDoubleRegister());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
} else if (input->IsRegister()) {
Register input_reg = ToRegister(input);
__ push(input_reg);
@@ -5001,14 +5024,21 @@ void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatureScope scope(masm(), SSE2);
LOperand* input = instr->value();
LOperand* output = instr->result();
- LOperand* temp = instr->temp();
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ LOperand* temp = instr->temp();
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
+ __ LoadUint32(ToDoubleRegister(output),
+ ToRegister(input),
+ ToDoubleRegister(temp));
+ } else {
+ X87Register res = ToX87Register(output);
+ X87PrepareToWrite(res);
+ __ LoadUint32NoSSE2(ToRegister(input));
+ X87CommitWrite(res);
+ }
}
@@ -5084,6 +5114,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
Label slow;
Register reg = ToRegister(value);
Register tmp = reg.is(eax) ? ecx : eax;
+ XMMRegister xmm_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
@@ -5098,7 +5129,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ xor_(reg, 0x80000000);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ cvtsi2sd(xmm0, Operand(reg));
+ __ Cvtsi2sd(xmm_scratch, Operand(reg));
} else {
__ push(reg);
__ fild_s(Operand(esp, 0));
@@ -5107,7 +5138,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
} else {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ LoadUint32(xmm0, reg,
+ __ LoadUint32(xmm_scratch, reg,
ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
} else {
// There's no fild variant for unsigned values, so zero-extend to a 64-bit
@@ -5143,12 +5174,12 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
if (!reg.is(eax)) __ mov(reg, eax);
- // Done. Put the value in xmm0 into the value of the allocated heap
+ // Done. Put the value in xmm_scratch into the value of the allocated heap
// number.
__ bind(&done);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
@@ -5192,7 +5223,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
@@ -5319,7 +5350,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
- Label load_smi, done;
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
@@ -5328,28 +5359,17 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, env);
+ if (can_convert_undefined_to_nan) {
+ __ j(not_equal, &convert, Label::kNear);
} else {
- Label heap_number, convert;
- __ j(equal, &heap_number, Label::kNear);
-
- // Convert undefined (and hole) to NaN.
- __ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, env);
-
- __ bind(&convert);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(nan));
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
}
+
// Heap number to XMM conversion.
- __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(result_reg, xmm_scratch);
__ j(not_zero, &done, Label::kNear);
@@ -5358,6 +5378,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
+
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+
+ // Convert undefined (and hole) to NaN.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, env);
+
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ movsd(result_reg, Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
+ }
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
@@ -5367,7 +5400,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// input register since we avoid dependencies.
__ mov(temp_reg, input_reg);
__ SmiUntag(temp_reg); // Untag smi before converting to float.
- __ cvtsi2sd(result_reg, Operand(temp_reg));
+ __ Cvtsi2sd(result_reg, Operand(temp_reg));
__ bind(&done);
}
@@ -5375,25 +5408,36 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
-
if (instr->truncating()) {
- Label heap_number, slow_case;
+ Label no_heap_number, check_bools, check_false;
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
+ __ j(not_equal, &no_heap_number, Label::kNear);
+ __ TruncateHeapNumberToI(input_reg, input_reg);
+ __ jmp(done);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
+ __ bind(&no_heap_number);
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
__ cmp(input_reg, factory()->undefined_value());
+ __ j(not_equal, &check_bools, Label::kNear);
+ __ Set(input_reg, Immediate(0));
+ __ jmp(done);
+
+ __ bind(&check_bools);
+ __ cmp(input_reg, factory()->true_value());
+ __ j(not_equal, &check_false, Label::kNear);
+ __ Set(input_reg, Immediate(1));
+ __ jmp(done);
+
+ __ bind(&check_false);
+ __ cmp(input_reg, factory()->false_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
- __ mov(input_reg, 0);
+ __ Set(input_reg, Immediate(0));
__ jmp(done);
-
- __ bind(&heap_number);
- __ TruncateHeapNumberToI(input_reg, input_reg);
} else {
Label bailout;
XMMRegister scratch = (instr->temp() != NULL)
@@ -5428,12 +5472,16 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
ASSERT(input_reg.is(ToRegister(instr->result())));
- DeferredTaggedToI* deferred =
- new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred =
+ new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -5498,7 +5546,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
} else {
X87Register input_reg = ToX87Register(input);
@@ -5525,7 +5574,8 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
} else {
X87Register input_reg = ToX87Register(input);
@@ -5605,7 +5655,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Handle<HeapObject> object = instr->hydrogen()->object();
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
if (instr->hydrogen()->object_in_new_space()) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(object);
@@ -5660,23 +5710,22 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
-
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->has_migration_target()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, map, &success);
- __ j(equal, &success);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
+ __ CompareMap(reg, map);
+ __ j(equal, &success, Label::kNear);
}
- Handle<Map> map = map_set->last();
- __ CompareMap(reg, map, &success);
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ __ CompareMap(reg, map);
if (instr->hydrogen()->has_migration_target()) {
__ j(not_equal, deferred->entry());
} else {
@@ -5690,8 +5739,9 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+ __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
@@ -5707,6 +5757,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
+ XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
+ XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
@@ -5725,8 +5777,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
+ __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
@@ -5751,13 +5803,13 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// Check for heap number
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kFar);
+ __ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
- __ jmp(&zero_result);
+ __ jmp(&zero_result, Label::kNear);
// Heap number
__ bind(&heap_number);
@@ -5772,15 +5824,15 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// Test for negative values --> clamp to zero
__ test(scratch, scratch);
- __ j(negative, &zero_result);
+ __ j(negative, &zero_result, Label::kNear);
// Get exponent alone in scratch2.
__ mov(scratch2, scratch);
__ and_(scratch2, HeapNumber::kExponentMask);
__ shr(scratch2, HeapNumber::kExponentShift);
- __ j(zero, &zero_result);
+ __ j(zero, &zero_result, Label::kNear);
__ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
- __ j(negative, &zero_result);
+ __ j(negative, &zero_result, Label::kNear);
const uint32_t non_int8_exponent = 7;
__ cmp(scratch2, Immediate(non_int8_exponent + 1));
@@ -5811,18 +5863,18 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
__ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
__ cmp(scratch2, Immediate(1 << one_half_bit_shift));
Label no_round;
- __ j(less, &no_round);
+ __ j(less, &no_round, Label::kNear);
Label round_up;
__ mov(scratch2, Immediate(1 << one_half_bit_shift));
- __ j(greater, &round_up);
+ __ j(greater, &round_up, Label::kNear);
__ test(scratch3, scratch3);
- __ j(not_zero, &round_up);
+ __ j(not_zero, &round_up, Label::kNear);
__ mov(scratch2, scratch);
__ and_(scratch2, Immediate(1 << one_bit_shift));
__ shr(scratch2, 1);
__ bind(&round_up);
__ add(scratch, scratch2);
- __ j(overflow, &largest_value);
+ __ j(overflow, &largest_value, Label::kNear);
__ bind(&no_round);
__ shr(scratch, 23);
__ mov(result_reg, scratch);
@@ -5837,7 +5889,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// bit is set.
__ and_(scratch, HeapNumber::kMantissaMask);
__ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
- __ j(not_zero, &zero_result); // M!=0 --> NaN
+ __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
// Infinity -> Fall through to map to 255.
__ bind(&largest_value);
@@ -5846,7 +5898,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
__ bind(&zero_result);
__ xor_(result_reg, result_reg);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// smi
__ bind(&is_smi);
@@ -5896,7 +5948,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
+ }
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
@@ -5929,7 +5985,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ mov(result, Immediate(Smi::FromInt(0)));
+ __ Set(result, Immediate(Smi::FromInt(0)));
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
@@ -5942,19 +5998,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ push(Immediate(Smi::FromInt(size)));
}
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(
- Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(
- Runtime::kAllocateInOldDataSpace, 1, instr, instr->context());
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
} else {
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
+ __ push(Immediate(Smi::FromInt(flags)));
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -5994,7 +6053,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
__ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
+ __ jmp(&allocated, Label::kNear);
__ bind(&runtime_allocate);
__ push(ebx);
@@ -6039,6 +6098,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
LOperand* input = instr->value();
EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
@@ -6047,43 +6107,48 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
-
- Condition final_branch_condition =
- EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
- input, instr->type_literal());
+ Condition final_branch_condition = EmitTypeofIs(instr, input);
if (final_branch_condition != no_condition) {
EmitBranch(instr, final_branch_condition);
}
}
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
+Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Handle<String> type_name = instr->type_literal();
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+ int next_block = GetNextEmittedBlock();
+
+ Label::Distance true_distance = left_block == next_block ? Label::kNear
+ : Label::kFar;
+ Label::Distance false_distance = right_block == next_block ? Label::kNear
+ : Label::kFar;
Condition final_branch_condition = no_condition;
if (type_name->Equals(heap()->number_string())) {
- __ JumpIfSmi(input, true_label);
+ __ JumpIfSmi(input, true_label, true_distance);
__ cmp(FieldOperand(input, HeapObject::kMapOffset),
factory()->heap_number_map());
final_branch_condition = equal;
} else if (type_name->Equals(heap()->string_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label);
+ __ j(above_equal, false_label, false_distance);
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
final_branch_condition = zero;
} else if (type_name->Equals(heap()->symbol_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->boolean_string())) {
__ cmp(input, factory()->true_value());
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
__ cmp(input, factory()->false_value());
final_branch_condition = equal;
@@ -6093,8 +6158,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (type_name->Equals(heap()->undefined_string())) {
__ cmp(input, factory()->undefined_value());
- __ j(equal, true_label);
- __ JumpIfSmi(input, false_label);
+ __ j(equal, true_label, true_distance);
+ __ JumpIfSmi(input, false_label, false_distance);
// Check for undetectable objects => true.
__ mov(input, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
@@ -6103,29 +6168,29 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->object_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
if (!FLAG_harmony_typeof) {
__ cmp(input, factory()->null_value());
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
}
__ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
- __ j(below, false_label);
+ __ j(below, false_label, false_distance);
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, false_label);
+ __ j(above, false_label, false_distance);
// Check for undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
final_branch_condition = zero;
} else {
- __ jmp(false_label);
+ __ jmp(false_label, false_distance);
}
return final_branch_condition;
}
@@ -6157,14 +6222,13 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
__ Nop(padding_size);
}
}
@@ -6173,7 +6237,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -6195,6 +6259,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
}
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
void LCodeGen::DoDummyUse(LDummyUse* instr) {
// Nothing to see here, move on!
}
@@ -6244,7 +6313,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -6257,7 +6326,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -6284,6 +6353,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
__ cmp(eax, isolate()->factory()->undefined_value());
DeoptimizeIf(equal, instr->environment());
@@ -6321,9 +6391,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Label load_cache, done;
__ EnumLength(result, map);
__ cmp(result, Immediate(Smi::FromInt(0)));
- __ j(not_equal, &load_cache);
+ __ j(not_equal, &load_cache, Label::kNear);
__ mov(result, isolate()->factory()->empty_fixed_array());
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
@@ -6351,7 +6421,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
Label out_of_object, done;
__ cmp(index, Immediate(0));
- __ j(less, &out_of_object);
+ __ j(less, &out_of_object, Label::kNear);
__ mov(object, FieldOperand(object,
index,
times_half_pointer_size,
diff --git a/chromium/v8/src/ia32/lithium-codegen-ia32.h b/chromium/v8/src/ia32/lithium-codegen-ia32.h
index 769917f7e24..638f80c3549 100644
--- a/chromium/v8/src/ia32/lithium-codegen-ia32.h
+++ b/chromium/v8/src/ia32/lithium-codegen-ia32.h
@@ -33,6 +33,7 @@
#include "checks.h"
#include "deoptimizer.h"
#include "ia32/lithium-gap-resolver-ia32.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
@@ -45,45 +46,28 @@ class LDeferredCode;
class LGapNode;
class SafepointGenerator;
-class LCodeGen V8_FINAL BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
dynamic_frame_alignment_(false),
support_aligned_spilled_doubles_(false),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
frame_is_built_(false),
x87_stack_(assembler),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- old_position_(RelocInfo::kNoPosition) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -129,12 +113,17 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
X87Register left, X87Register right, X87Register result);
void X87LoadForUsage(X87Register reg);
+ void X87LoadForUsage(X87Register reg1, X87Register reg2);
void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
void X87Fxch(X87Register reg, int other_slot = 0) {
x87_stack_.Fxch(reg, other_slot);
}
+ void X87Free(X87Register reg) {
+ x87_stack_.Free(reg);
+ }
+
bool X87StackEmpty() {
return x87_stack_.depth() == 0;
@@ -188,27 +177,13 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk()->graph(); }
- int GetNextEmittedBlock() const;
+ XMMRegister double_scratch0() const { return xmm0; }
void EmitClassOfTest(Label* if_true,
Label* if_false,
@@ -220,14 +195,17 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
- void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
// Code generation passes. Returns true if code generation should
// continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
@@ -251,7 +229,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int argc,
@@ -292,6 +271,10 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
+ bool DeoptEveryNTimes() {
+ return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
+ }
+
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
@@ -319,6 +302,10 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
uint32_t offset,
uint32_t additional_index = 0);
+ Operand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
@@ -331,12 +318,13 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
- void RecordAndUpdatePosition(int position);
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
@@ -362,10 +350,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
+ Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
@@ -395,7 +380,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int* offset,
AllocationSiteMode mode);
- void EnsureSpaceForLazyDeopt();
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -425,26 +410,16 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void MakeSureStackPagesMapped(int offset);
#endif
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
bool dynamic_frame_alignment_;
bool support_aligned_spilled_doubles_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
bool frame_is_built_;
class X87Stack {
@@ -505,8 +480,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- int old_position_;
-
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
diff --git a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc b/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc
index b5bc18bdc96..d621bd261d6 100644
--- a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -326,7 +326,7 @@ void LGapResolver::EmitMove(int index) {
} else {
__ push(Immediate(upper));
__ push(Immediate(lower));
- __ movdbl(dst, Operand(esp, 0));
+ __ movsd(dst, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
} else {
@@ -360,7 +360,7 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
+ __ movsd(dst, src);
}
} else {
// load from the register onto the stack, store in destination, which must
@@ -378,12 +378,12 @@ void LGapResolver::EmitMove(int index) {
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movdbl(dst, src);
+ __ movsd(dst, src);
} else {
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = cgen_->ToOperand(destination);
- __ movdbl(xmm0, src);
- __ movdbl(dst, xmm0);
+ __ movsd(xmm0, src);
+ __ movsd(dst, xmm0);
}
} else {
// load from the stack slot on top of the floating point stack, and then
@@ -486,9 +486,9 @@ void LGapResolver::EmitSwap(int index) {
: destination);
Operand other =
cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
- __ movdbl(xmm0, other);
- __ movdbl(other, reg);
- __ movdbl(reg, Operand(xmm0));
+ __ movsd(xmm0, other);
+ __ movsd(other, reg);
+ __ movaps(reg, xmm0);
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), SSE2);
// Double-width memory-to-memory. Spill on demand to use a general
@@ -499,12 +499,12 @@ void LGapResolver::EmitSwap(int index) {
Operand src1 = cgen_->HighOperand(source);
Operand dst0 = cgen_->ToOperand(destination);
Operand dst1 = cgen_->HighOperand(destination);
- __ movdbl(xmm0, dst0); // Save destination in xmm0.
+ __ movsd(xmm0, dst0); // Save destination in xmm0.
__ mov(tmp, src0); // Then use tmp to copy source to destination.
__ mov(dst0, tmp);
__ mov(tmp, src1);
__ mov(dst1, tmp);
- __ movdbl(src0, xmm0);
+ __ movsd(src0, xmm0);
} else {
// No other combinations are possible.
diff --git a/chromium/v8/src/ia32/lithium-ia32.cc b/chromium/v8/src/ia32/lithium-ia32.cc
index a5acb9fa9e4..aa35e9d6b40 100644
--- a/chromium/v8/src/ia32/lithium-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-ia32.cc
@@ -302,7 +302,8 @@ void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
- stream->Add(" + %d", offset());
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
}
@@ -386,9 +387,9 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (is_double) {
+ if (kind == DOUBLE_REGISTERS) {
spill_slot_count_++;
spill_slot_count_ |= 1;
num_double_slots_++;
@@ -397,11 +398,12 @@ int LPlatformChunk::GetNextSpillIndex(bool is_double) {
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -479,7 +481,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// Reserve the first spill slot for the state of dynamic alignment.
if (info()->IsOptimizing()) {
- int alignment_state_index = chunk_->GetNextSpillIndex(false);
+ int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
ASSERT_EQ(alignment_state_index, 0);
USE(alignment_state_index);
}
@@ -488,7 +490,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// which will be subsumed into this frame.
if (graph()->has_osr()) {
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(false);
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
}
}
@@ -560,29 +562,42 @@ LOperand* LChunkBuilder::UseAtStart(HValue* value) {
}
+static inline bool CanBeImmediateConstant(HValue* value) {
+ return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
+}
+
+
LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: Use(value);
}
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseAtStart(value);
}
+LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value,
+ Register fixed_register) {
+ return CanBeImmediateConstant(value)
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseFixed(value, fixed_register);
+}
+
+
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegister(value);
}
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegisterAtStart(value);
}
@@ -707,7 +722,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -762,52 +777,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseFixed(right_value, ecx);
}
- } else {
- right = UseFixed(right_value, ecx);
- }
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
}
- }
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -816,21 +823,22 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
@@ -914,10 +922,33 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
@@ -947,7 +978,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
- instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@@ -964,7 +994,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
clobber->set_hydrogen_value(current);
chunk_->AddInstruction(clobber, current_block_);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -1061,21 +1090,15 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- ASSERT(value->IsConstant());
- ASSERT(!value->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor);
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
ToBooleanStub::Types expected = instr->expected_input_types();
// Tagged values that are not known smis or booleans require a
// deoptimization environment. If the instruction is generic no
// environment is needed since all cases are handled.
+ HValue* value = instr->value();
Representation rep = value->representation();
HType type = value->type();
if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
@@ -1141,12 +1164,6 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegister(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -1171,7 +1188,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = UseAny(instr->argument());
return new(zone()) LPushArgument(argument);
}
@@ -1186,11 +1202,11 @@ LInstruction* LChunkBuilder::DoStoreCodeEntry(
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* inner_object) {
- LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
- LInnerAllocatedObject* result =
- new(zone()) LInnerAllocatedObject(base_object);
- return DefineAsRegister(result);
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
}
@@ -1238,7 +1254,6 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
}
@@ -1246,7 +1261,6 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1279,10 +1293,9 @@ LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
- LOperand* context = UseAny(instr->context());
LOperand* input = UseRegister(instr->value());
LOperand* temp = FixedTemp(xmm4);
- LMathRound* result = new(zone()) LMathRound(context, input, temp);
+ LMathRound* result = new(zone()) LMathRound(input, temp);
return AssignEnvironment(DefineAsRegister(result));
}
@@ -1344,10 +1357,9 @@ LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
- LOperand* context = UseAny(instr->context());
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
- LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
return DefineSameAsFirst(result);
}
@@ -1356,7 +1368,6 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseFixed(instr->key(), ecx);
- argument_count_ -= instr->argument_count();
LCallKeyed* result = new(zone()) LCallKeyed(context, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1364,7 +1375,6 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallNamed* result = new(zone()) LCallNamed(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1372,14 +1382,12 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallGlobal* result = new(zone()) LCallGlobal(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
}
@@ -1387,7 +1395,6 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1396,7 +1403,6 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1405,14 +1411,14 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
- LCallFunction* result = new(zone()) LCallFunction(context, function);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ LInstruction* result = DefineFixed(call, eax);
+ if (instr->IsTailCall()) return result;
+ return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
}
@@ -1442,29 +1448,19 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(instr->op(), context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
@@ -1481,8 +1477,9 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LOperand* divisor = UseRegister(instr->right());
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1562,10 +1559,6 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegisterAtStart(right),
- NULL);
return AssignEnvironment(DefineSameAsFirst(mod));
} else {
// The temporary operand is necessary to ensure that right is not
@@ -1584,17 +1577,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsSmiOrTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, xmm2),
- UseFixedDouble(right, xmm1));
- return MarkAsCall(DefineFixedDouble(mod, xmm1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1618,7 +1604,6 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
@@ -1639,7 +1624,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
@@ -1670,8 +1654,22 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ bool use_lea = LAddI::UseLea(instr);
+ LOperand* left = UseRegisterAtStart(instr->left());
+ HValue* right_candidate = instr->right();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ return result;
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1713,19 +1711,6 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseTempRegister(instr->global_object());
- LOperand* scratch = TempRegister();
- LOperand* scratch2 = TempRegister();
- LOperand* scratch3 = TempRegister();
- LRandom* result = new(zone()) LRandom(
- global_object, scratch, scratch2, scratch3);
- return DefineFixedDouble(result, xmm1);
-}
-
-
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsSmiOrTagged());
ASSERT(instr->right()->representation().IsSmiOrTagged());
@@ -1752,9 +1737,12 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
ASSERT(instr->right()->representation().IsDouble());
LOperand* left;
LOperand* right;
- if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
- left = UseRegisterOrConstantAtStart(instr->left());
- right = UseRegisterOrConstantAtStart(instr->right());
+ if (CanBeImmediateConstant(instr->left()) &&
+ CanBeImmediateConstant(instr->right())) {
+ // The code generator requires either both inputs to be constant
+ // operands, or neither.
+ left = UseConstant(instr->left());
+ right = UseConstant(instr->right());
} else {
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
@@ -1766,6 +1754,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1774,8 +1764,18 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
}
@@ -1884,14 +1884,43 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) {
+ if (instr->encoding() == String::ONE_BYTE_ENCODING) {
+ if (FLAG_debug_code) {
+ return UseFixed(instr->value(), eax);
+ } else {
+ return UseFixedOrConstant(instr->value(), eax);
+ }
+ } else {
+ if (FLAG_debug_code) {
+ return UseRegisterAtStart(instr->value());
+ } else {
+ return UseRegisterOrConstantAtStart(instr->value());
+ }
+ }
+}
+
+
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- ASSERT(ecx.is_byte_register());
- LOperand* value = UseFixed(instr->value(), ecx);
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineSameAsFirst(result);
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = GetSeqStringSetCharOperand(instr);
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL;
+ LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
+ index, value);
+ if (FLAG_debug_code) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -1909,6 +1938,13 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), eax);
@@ -1944,7 +1980,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
// building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
LOperand* temp = TempRegister();
@@ -2051,12 +2086,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone())
- LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
@@ -2121,12 +2150,10 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub()
- ? UseFixed(instr->context(), esi)
- : NULL;
+ LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), eax), context,
- parameter_count);
+ return new(zone()) LReturn(
+ UseFixed(instr->value(), eax), context, parameter_count);
}
@@ -2235,6 +2262,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2389,7 +2421,7 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
new_map_reg, temp_reg);
return result;
} else {
- LOperand* context = UseRegister(instr->context());
+ LOperand* context = UseFixed(instr->context(), esi);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
return AssignPointerMap(result);
@@ -2436,7 +2468,12 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
!(FLAG_track_double_fields && instr->field_representation().IsDouble());
LOperand* val;
- if (needs_write_barrier) {
+ if (instr->field_representation().IsInteger8() ||
+ instr->field_representation().IsUInteger8()) {
+ // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
+ // Just force the value to be in eax and we're safe here.
+ val = UseFixed(instr->value(), eax);
+ } else if (needs_write_barrier) {
val = UseTempRegister(instr->value());
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
@@ -2482,8 +2519,12 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
+ LOperand* left = FLAG_new_string_add
+ ? UseFixed(instr->left(), edx)
+ : UseOrConstantAtStart(instr->left());
+ LOperand* right = FLAG_new_string_add
+ ? UseFixed(instr->right(), eax)
+ : UseOrConstantAtStart(instr->right());
LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
return MarkAsCall(DefineFixed(string_add, eax), instr);
}
@@ -2552,7 +2593,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
int index = static_cast<int>(instr->index());
- Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2583,7 +2624,6 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallStub* result = new(zone()) LCallStub(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2638,6 +2678,8 @@ LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
@@ -2712,7 +2754,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
diff --git a/chromium/v8/src/ia32/lithium-ia32.h b/chromium/v8/src/ia32/lithium-ia32.h
index aa5c0bbeed7..ea4fef8a710 100644
--- a/chromium/v8/src/ia32/lithium-ia32.h
+++ b/chromium/v8/src/ia32/lithium-ia32.h
@@ -74,6 +74,7 @@ class LCodeGen;
V(ClampTToUint8NoSSE2) \
V(ClassOfTestAndBranch) \
V(ClobberDoubles) \
+ V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -93,6 +94,7 @@ class LCodeGen;
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
+ V(Dummy) \
V(DummyUse) \
V(ElementsKind) \
V(ForInCacheArray) \
@@ -107,7 +109,6 @@ class LCodeGen;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
@@ -116,7 +117,6 @@ class LCodeGen;
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
- V(IsNumberAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
@@ -130,6 +130,7 @@ class LCodeGen;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
+ V(LoadRoot) \
V(MapEnumLength) \
V(MathAbs) \
V(MathCos) \
@@ -153,10 +154,10 @@ class LCodeGen;
V(OuterContext) \
V(Parameter) \
V(Power) \
- V(Random) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringGetChar) \
V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
@@ -216,7 +217,6 @@ class LInstruction : public ZoneObject {
: environment_(NULL),
hydrogen_value_(NULL),
bit_field_(IsCallBits::encode(false)) {
- set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() {}
@@ -257,15 +257,6 @@ class LInstruction : public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- // The 31 bits PositionBits is used to store the int position value. And the
- // position value may be RelocInfo::kNoPosition (-1). The accessor always
- // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
- // and can fit into the 31 bits PositionBits.
- void set_position(int pos) {
- bit_field_ = PositionBits::update(bit_field_, pos + 1);
- }
- int position() { return PositionBits::decode(bit_field_) - 1; }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -311,7 +302,6 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
- class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -443,6 +433,13 @@ class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
@@ -753,15 +750,13 @@ class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LMathRound(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[1] = context;
+ LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
@@ -864,15 +859,13 @@ class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[1] = context;
+ LMathPowHalf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
@@ -907,9 +900,9 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -917,22 +910,25 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
- explicit LIsNumberAndBranch(LOperand* value) {
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
@@ -994,6 +990,7 @@ class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
inputs_[2] = right;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
@@ -1098,6 +1095,7 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+ LOperand* context() { return inputs_[0]; }
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1124,6 +1122,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
@@ -1145,19 +1144,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1310,7 +1296,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1375,27 +1361,39 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
- inputs_[2] = value;
}
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ }
+
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
};
@@ -1463,28 +1461,6 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
- public:
- LRandom(LOperand* global_object,
- LOperand* scratch,
- LOperand* scratch2,
- LOperand* scratch3) {
- inputs_[0] = global_object;
- temps_[0] = scratch;
- temps_[1] = scratch2;
- temps_[2] = scratch3;
- }
-
- LOperand* global_object() const { return inputs_[0]; }
- LOperand* scratch() const { return temps_[0]; }
- LOperand* scratch2() const { return temps_[1]; }
- LOperand* scratch3() const { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1540,7 +1516,8 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value, LOperand* context,
+ explicit LReturn(LOperand* value,
+ LOperand* context,
LOperand* parameter_count) {
inputs_[0] = value;
inputs_[1] = context;
@@ -1606,6 +1583,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -1635,11 +1621,6 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
return hydrogen()->is_external();
}
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
- return !CpuFeatures::IsSupported(SSE2) &&
- !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
- }
-
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -1820,19 +1801,19 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInnerAllocatedObject(LOperand* base_object) {
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
+ inputs_[1] = offset;
}
- LOperand* base_object() { return inputs_[0]; }
- int offset() { return hydrogen()->offset(); }
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
- DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@@ -2062,8 +2043,13 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
@@ -2203,7 +2189,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2378,8 +2364,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2529,12 +2517,13 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* value, LOperand* temp) {
+ LClampTToUint8(LOperand* value, LOperand* temp_xmm) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp_xmm;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp_xmm() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
@@ -2756,8 +2745,8 @@ class LPlatformChunk V8_FINAL : public LChunk {
: LChunk(info, graph),
num_double_slots_(0) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
int num_double_slots() const { return num_double_slots_; }
@@ -2779,13 +2768,14 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
next_block_(NULL),
argument_count_(0),
allocator_(allocator),
- position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2857,6 +2847,10 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+ // An input operand in a fixed register or a constant operand.
+ MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value,
+ Register fixed_register);
+
// An input operand in a register or a constant operand.
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
@@ -2902,6 +2896,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+ LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr);
+
// Marks a call for the register allocator. Assigns a pointer map to
// support GC and lazy deoptimization. Assigns an environment to support
// eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
@@ -2921,7 +2917,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
@@ -2935,7 +2931,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HBasicBlock* next_block_;
int argument_count_;
LAllocator* allocator_;
- int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.cc b/chromium/v8/src/ia32/macro-assembler-ia32.cc
index b65d328435e..52d42f6ca87 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.cc
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.cc
@@ -33,6 +33,7 @@
#include "codegen.h"
#include "cpu-profiler.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime.h"
#include "serialize.h"
@@ -45,7 +46,6 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
// TODO(titzer): should we just use a null handle here instead?
@@ -55,6 +55,34 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
+void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ movsx_b(dst, src);
+ } else if (r.IsUInteger8()) {
+ movzx_b(dst, src);
+ } else if (r.IsInteger16()) {
+ movsx_w(dst, src);
+ } else if (r.IsUInteger16()) {
+ movzx_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ mov_b(dst, src);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ mov_w(dst, src);
+ } else {
+ mov(dst, src);
+ }
+}
+
+
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
@@ -232,7 +260,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
j(not_equal, &done, Label::kNear);
sub(esp, Immediate(kDoubleSize));
- movdbl(MemOperand(esp, 0), input_reg);
+ movsd(MemOperand(esp, 0), input_reg);
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
bind(&done);
@@ -253,8 +281,8 @@ void MacroAssembler::X87TOSToI(Register result_reg,
Label::Distance dst) {
Label done;
sub(esp, Immediate(kPointerSize));
- fist_s(MemOperand(esp, 0));
fld(0);
+ fist_s(MemOperand(esp, 0));
fild_s(MemOperand(esp, 0));
pop(result_reg);
FCmp();
@@ -283,7 +311,7 @@ void MacroAssembler::DoubleToI(Register result_reg,
Label::Distance dst) {
ASSERT(!input_reg.is(scratch));
cvttsd2si(result_reg, Operand(input_reg));
- cvtsi2sd(scratch, Operand(result_reg));
+ Cvtsi2sd(scratch, Operand(result_reg));
ucomisd(scratch, input_reg);
j(not_equal, conversion_failed, dst);
j(parity_even, conversion_failed, dst); // NaN.
@@ -344,7 +372,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
}
} else if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(this, SSE2);
- movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
cmp(result_reg, 0x80000000u);
j(not_equal, &done, Label::kNear);
@@ -361,7 +389,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
if (input_reg.is(result_reg)) {
// Input is clobbered. Restore number from double scratch.
sub(esp, Immediate(kDoubleSize));
- movdbl(MemOperand(esp, 0), xmm0);
+ movsd(MemOperand(esp, 0), xmm0);
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
} else {
@@ -390,9 +418,9 @@ void MacroAssembler::TaggedToI(Register result_reg,
ASSERT(!temp.is(no_xmm_reg));
CpuFeatureScope scope(this, SSE2);
- movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
- cvtsi2sd(temp, Operand(result_reg));
+ Cvtsi2sd(temp, Operand(result_reg));
ucomisd(xmm0, temp);
RecordComment("Deferred TaggedToI: lost precision");
j(not_equal, lost_precision, Label::kNear);
@@ -445,25 +473,36 @@ void MacroAssembler::TaggedToI(Register result_reg,
}
-
-static double kUint32Bias =
- static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-
-
void MacroAssembler::LoadUint32(XMMRegister dst,
Register src,
XMMRegister scratch) {
Label done;
cmp(src, Immediate(0));
- movdbl(scratch,
- Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32));
- cvtsi2sd(dst, src);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ movsd(scratch, Operand::StaticVariable(uint32_bias));
+ Cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
addsd(dst, scratch);
bind(&done);
}
+void MacroAssembler::LoadUint32NoSSE2(Register src) {
+ Label done;
+ push(src);
+ fild_s(Operand(esp, 0));
+ cmp(src, Immediate(0));
+ j(not_sign, &done, Label::kNear);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ fld_d(Operand::StaticVariable(uint32_bias));
+ faddp(1);
+ bind(&done);
+ add(esp, Immediate(kPointerSize));
+}
+
+
void MacroAssembler::RecordWriteArray(Register object,
Register value,
Register index,
@@ -574,6 +613,10 @@ void MacroAssembler::RecordWriteForMap(
return;
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// A single check of the map's pages interesting flag suffices, since it is
// only set during incremental collection, and then it's also guaranteed that
// the from object's page's interesting flag is also set. This optimization
@@ -630,6 +673,10 @@ void MacroAssembler::RecordWrite(Register object,
bind(&ok);
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
@@ -676,6 +723,12 @@ void MacroAssembler::DebugBreak() {
#endif
+void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtsi2sd(dst, src);
+}
+
+
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
xor_(dst, dst); // Shorter than mov.
@@ -799,9 +852,9 @@ void MacroAssembler::StoreNumberToDoubleElements(
ExternalReference::address_of_canonical_non_hole_nan();
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope use_sse2(this, SSE2);
- movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4,
+ movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
@@ -821,7 +874,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&is_nan);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope use_sse2(this, SSE2);
- movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+ movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
} else {
fld_d(Operand::StaticVariable(canonical_nan_reference));
}
@@ -834,8 +887,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
SmiUntag(scratch1);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope fscope(this, SSE2);
- cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4,
+ Cvtsi2sd(scratch2, scratch1);
+ movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
@@ -849,9 +902,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
}
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success) {
+void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}
@@ -864,10 +915,8 @@ void MacroAssembler::CheckMap(Register obj,
JumpIfSmi(obj, fail);
}
- Label success;
- CompareMap(obj, map, &success);
+ CompareMap(obj, map);
j(not_equal, fail);
- bind(&success);
}
@@ -996,6 +1045,30 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
+ } else {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(edi); // Callee's JS function.
+ }
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
@@ -1033,10 +1106,8 @@ void MacroAssembler::EnterExitFramePrologue() {
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
- isolate());
- ExternalReference context_address(Isolate::kContextAddress,
- isolate());
+ ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
+ ExternalReference context_address(Isolate::kContextAddress, isolate());
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
}
@@ -1051,7 +1122,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+ movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
sub(esp, Immediate(argc * kPointerSize));
@@ -1095,7 +1166,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+ movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
}
}
@@ -1109,14 +1180,16 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Push the return address to get ready to return.
push(ecx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
- mov(esi, Operand::StaticVariable(context_address));
+ if (restore_context) {
+ mov(esi, Operand::StaticVariable(context_address));
+ }
#ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
@@ -1128,11 +1201,11 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
}
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
mov(esp, ebp);
pop(ebp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
@@ -1344,8 +1417,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
-// Compute the hash code from the untagged key. This must be kept in sync
-// with ComputeIntegerHash in utils.h.
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
//
// Note: r0 will contain hash code
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
@@ -1421,8 +1495,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
dec(r1);
// Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
@@ -1440,7 +1513,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
+ if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
@@ -1942,30 +2015,48 @@ void MacroAssembler::CopyBytes(Register source,
Register destination,
Register length,
Register scratch) {
- Label loop, done, short_string, short_loop;
- // Experimentation shows that the short string loop is faster if length < 10.
- cmp(length, Immediate(10));
- j(less_equal, &short_string);
-
+ Label short_loop, len4, len8, len12, done, short_string;
ASSERT(source.is(esi));
ASSERT(destination.is(edi));
ASSERT(length.is(ecx));
+ cmp(length, Immediate(4));
+ j(below, &short_string, Label::kNear);
// Because source is 4-byte aligned in our uses of this function,
// we keep source aligned for the rep_movs call by copying the odd bytes
// at the end of the ranges.
mov(scratch, Operand(source, length, times_1, -4));
mov(Operand(destination, length, times_1, -4), scratch);
+
+ cmp(length, Immediate(8));
+ j(below_equal, &len4, Label::kNear);
+ cmp(length, Immediate(12));
+ j(below_equal, &len8, Label::kNear);
+ cmp(length, Immediate(16));
+ j(below_equal, &len12, Label::kNear);
+
mov(scratch, ecx);
shr(ecx, 2);
rep_movs();
and_(scratch, Immediate(0x3));
add(destination, scratch);
- jmp(&done);
+ jmp(&done, Label::kNear);
+
+ bind(&len12);
+ mov(scratch, Operand(source, 8));
+ mov(Operand(destination, 8), scratch);
+ bind(&len8);
+ mov(scratch, Operand(source, 4));
+ mov(Operand(destination, 4), scratch);
+ bind(&len4);
+ mov(scratch, Operand(source, 0));
+ mov(Operand(destination, 0), scratch);
+ add(destination, length);
+ jmp(&done, Label::kNear);
bind(&short_string);
test(length, length);
- j(zero, &done);
+ j(zero, &done, Label::kNear);
bind(&short_loop);
mov_b(scratch, Operand(source, 0));
@@ -2096,8 +2187,6 @@ void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ ||
- stub->CompilingCallsToThisStubIsGCSafe(isolate()));
jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -2109,8 +2198,7 @@ void MacroAssembler::StubReturn(int argc) {
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -2141,23 +2229,9 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(eax, Immediate(function->nargs));
- mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
- : kDontSaveFPRegs);
- CallStub(&ces);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -2172,7 +2246,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(eax, Immediate(num_arguments));
mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1);
+ CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
+ : kDontSaveFPRegs);
CallStub(&ces);
}
@@ -2221,11 +2296,13 @@ void MacroAssembler::PrepareCallApiFunction(int argc) {
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
- Operand thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Address function_address,
+ Address thunk_address,
+ Operand thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
ExternalReference limit_address =
@@ -2281,9 +2358,10 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Label prologue;
// Load the value from ReturnValue
- mov(eax, Operand(ebp, return_value_offset * kPointerSize));
+ mov(eax, return_value_operand);
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -2303,6 +2381,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate()->factory()->the_hole_value()));
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
@@ -2339,11 +2418,19 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&ok);
#endif
- LeaveApiExitFrame();
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ mov(esi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
@@ -2543,7 +2630,7 @@ void MacroAssembler::InvokeFunction(Register fun,
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
@@ -2552,18 +2639,25 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
- // Get the function and setup the context.
- LoadHeapObject(edi, function);
+ ASSERT(fun.is(edi));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
expected, actual, flag, call_wrapper, call_kind);
}
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ LoadHeapObject(edi, function);
+ InvokeFunction(edi, expected, actual, flag, call_wrapper, call_kind);
+}
+
+
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -2980,6 +3074,40 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
+void MacroAssembler::Throw(BailoutReason reason) {
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ push(eax);
+ push(Immediate(Smi::FromInt(reason)));
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // will not return here
+ int3();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label L;
+ j(NegateCondition(cc), &L);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
@@ -3003,6 +3131,88 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst,
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
+ sub(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfNotSmi(object, &not_smi, Label::kNear);
+ mov(scratch, object);
+ SmiUntag(scratch);
+ jmp(&smi_hash_calculated, Label::kNear);
+ bind(&not_smi);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ Register index = scratch;
+ Register probe = mask;
+ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope fscope(this, SSE2);
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ } else {
+ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ FCmp();
+ }
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache, Label::kNear);
+
+ bind(&smi_hash_calculated);
+ // Object is smi and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ // Check if the entry is the smi we are looking for.
+ cmp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ mov(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
@@ -3063,6 +3273,42 @@ void MacroAssembler::JumpIfNotUniqueName(Operand operand,
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ JumpIfNotSmi(string, &is_object, Label::kNear);
+ Throw(kNonObject);
+ bind(&is_object);
+
+ push(value);
+ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ cmp(value, Immediate(encoding_mask));
+ pop(value);
+ ThrowIf(not_equal, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ SmiTag(index);
+ // Can't use overflow here directly, compiler can't seem to disambiguate.
+ ThrowIf(NegateCondition(no_overflow), kIndexIsTooLarge);
+
+ cmp(index, FieldOperand(string, String::kLengthOffset));
+ ThrowIf(greater_equal, kIndexIsTooLarge);
+
+ cmp(index, Immediate(Smi::FromInt(0)));
+ ThrowIf(less, kIndexIsNegative);
+
+ // Restore the index
+ SmiUntag(index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
@@ -3379,7 +3625,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
+ cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
j(equal, call_runtime);
jmp(&start);
@@ -3408,9 +3654,8 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
- Register scratch_reg) {
- Label no_memento_available;
-
+ Register scratch_reg,
+ Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
@@ -3419,15 +3664,40 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
lea(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Immediate(new_space_start));
- j(less, &no_memento_available);
+ j(less, no_memento_found);
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- j(greater, &no_memento_available);
+ j(greater, no_memento_found);
cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
- Immediate(Handle<Map>(isolate()->heap()->allocation_memento_map())));
- bind(&no_memento_available);
+ Immediate(isolate()->factory()->allocation_memento_map()));
}
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // scratch contained elements pointer.
+ mov(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ mov(current, FieldOperand(current, HeapObject::kMapOffset));
+ mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ and_(scratch1, Map::kElementsKindMask);
+ shr(scratch1, Map::kElementsKindShift);
+ cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ j(equal, found);
+ mov(current, FieldOperand(current, Map::kPrototypeOffset));
+ cmp(current, Immediate(factory->null_value()));
+ j(not_equal, &loop_again);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.h b/chromium/v8/src/ia32/macro-assembler-ia32.h
index e4e4533bf5f..054b164846d 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.h
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.h
@@ -61,6 +61,9 @@ class MacroAssembler: public Assembler {
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
+ void Load(Register dst, const Operand& src, Representation r);
+ void Store(Register src, const Operand& dst, Representation r);
+
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
@@ -225,6 +228,9 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
@@ -240,7 +246,7 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
- void LeaveApiExitFrame();
+ void LeaveApiExitFrame(bool restore_context);
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
@@ -343,6 +349,13 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
+
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
@@ -366,6 +379,12 @@ class MacroAssembler: public Assembler {
void Set(Register dst, const Immediate& x);
void Set(const Operand& dst, const Immediate& x);
+ // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+ // hinders register renaming and makes dependence chains longer. So we use
+ // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
+ void Cvtsi2sd(XMMRegister dst, const Operand& src);
+
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
void SafeSet(Register dst, const Immediate& x);
@@ -408,13 +427,8 @@ class MacroAssembler: public Assembler {
bool specialize_for_processor,
int offset = 0);
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
- // result of map compare. If multiple map compares are required, the compare
- // sequences branches to early_success.
- void CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success);
+ // Compare an object's map with the specified map.
+ void CompareMap(Register obj, Handle<Map> map);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -509,6 +523,7 @@ class MacroAssembler: public Assembler {
}
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
+ void LoadUint32NoSSE2(Register src);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value,
@@ -575,6 +590,12 @@ class MacroAssembler: public Assembler {
// Throw past all JS frames to the top JS entry frame.
void ThrowUncatchable(Register value);
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason);
+
// ---------------------------------------------------------------------------
// Inline caching support
@@ -754,11 +775,20 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
@@ -807,7 +837,8 @@ class MacroAssembler: public Assembler {
Address thunk_address,
Operand thunk_last_arg,
int stack_space,
- int return_value_offset_from_ebp);
+ Operand return_value_operand,
+ Operand* context_restore_operand);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext);
@@ -881,8 +912,6 @@ class MacroAssembler: public Assembler {
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
@@ -890,6 +919,17 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found);
+
// Check whether the instance type represents a flat ASCII string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
@@ -914,6 +954,11 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
Label::Distance distance = Label::kFar);
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask);
+
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
}
@@ -931,13 +976,27 @@ class MacroAssembler: public Assembler {
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
- // If allocation info is present, conditional code is set to equal
+ // If allocation info is present, conditional code is set to equal.
void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg);
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ j(equal, memento_found);
+ bind(&no_memento_found);
+ }
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
private:
bool generating_stub_;
- bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
@@ -957,7 +1016,7 @@ class MacroAssembler: public Assembler {
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
- void LeaveExitFrameEpilogue();
+ void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
diff --git a/chromium/v8/src/ia32/simulator-ia32.cc b/chromium/v8/src/ia32/simulator-ia32.cc
index ab8169375c0..b6f2847332e 100644
--- a/chromium/v8/src/ia32/simulator-ia32.cc
+++ b/chromium/v8/src/ia32/simulator-ia32.cc
@@ -27,4 +27,3 @@
// Since there is no simulator for the ia32 architecture this file is empty.
-
diff --git a/chromium/v8/src/ia32/stub-cache-ia32.cc b/chromium/v8/src/ia32/stub-cache-ia32.cc
index bebd7bebc9a..9efedc67325 100644
--- a/chromium/v8/src/ia32/stub-cache-ia32.cc
+++ b/chromium/v8/src/ia32/stub-cache-ia32.cc
@@ -325,32 +325,28 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length from the string and convert to a smi.
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, JS_VALUE_TYPE);
+ __ j(not_equal, miss);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
}
@@ -409,11 +405,11 @@ static void CompileCallLoadPropertyWithInterceptor(
Register receiver,
Register holder,
Register name,
- Handle<JSObject> holder_obj) {
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate()),
+ ExternalReference(IC_Utility(id), masm->isolate()),
StubCache::kInterceptorArgsLength);
}
@@ -455,53 +451,151 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
}
+static void GenerateFastApiCallBody(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc,
+ bool restore_context);
+
+
// Generates call to API function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
int argc) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- esp[8] : api function
- // (first fast api call extra argument)
- // -- esp[12] : api call data
- // -- esp[16] : isolate
- // -- esp[20] : ReturnValue default value
- // -- esp[24] : ReturnValue
- // -- esp[28] : last argument
- // -- ...
- // -- esp[(argc + 6) * 4] : first argument
- // -- esp[(argc + 7) * 4] : receiver
- // -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+ // Save calling context.
+ __ mov(Operand(esp, (1 + FCA::kContextSaveIndex) * kPointerSize), esi);
+
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ LoadHeapObject(edi, function);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // Pass the additional arguments.
- __ mov(Operand(esp, 2 * kPointerSize), edi);
+ // Construct the FunctionCallbackInfo.
+ __ mov(Operand(esp, (1 + FCA::kCalleeIndex) * kPointerSize), edi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ mov(ecx, api_call_info);
__ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
+ __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize), ebx);
} else {
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
+ __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize),
+ Immediate(call_data));
}
- __ mov(Operand(esp, 4 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kIsolateIndex) * kPointerSize),
Immediate(reinterpret_cast<int>(masm->isolate())));
- __ mov(Operand(esp, 5 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kReturnValueOffset) * kPointerSize),
masm->isolate()->factory()->undefined_value());
- __ mov(Operand(esp, 6 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kReturnValueDefaultValueIndex) * kPointerSize),
masm->isolate()->factory()->undefined_value());
// Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 6);
- __ lea(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
+ STATIC_ASSERT(kFastApiCallArguments == 7);
+ __ lea(eax, Operand(esp, 1 * kPointerSize));
+
+ GenerateFastApiCallBody(masm, optimization, argc, false);
+}
+
+
+// Generate call to api function.
+// This function uses push() to generate smaller, faster code than
+// the version above. It is an optimization that should will be removed
+// when api call ICs are generated in hydrogen.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+
+ // Copy return value.
+ __ pop(scratch1);
+
+ // receiver
+ __ push(receiver);
+
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch1.is(arg));
+ ASSERT(!scratch2.is(arg));
+ ASSERT(!scratch3.is(arg));
+ __ push(arg);
+ }
+
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // context save
+ __ push(esi);
+
+ // Get the function and setup the context.
+ Handle<JSFunction> function = optimization.constant_function();
+ __ LoadHeapObject(scratch2, function);
+ __ mov(esi, FieldOperand(scratch2, JSFunction::kContextOffset));
+ // callee
+ __ push(scratch2);
+
+ Isolate* isolate = masm->isolate();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data(), isolate);
+ // Push data from ExecutableAccessorInfo.
+ if (isolate->heap()->InNewSpace(*call_data)) {
+ __ mov(scratch2, api_call_info);
+ __ mov(scratch3, FieldOperand(scratch2, CallHandlerInfo::kDataOffset));
+ __ push(scratch3);
+ } else {
+ __ push(Immediate(call_data));
+ }
+ // return value
+ __ push(Immediate(isolate->factory()->undefined_value()));
+ // return value default
+ __ push(Immediate(isolate->factory()->undefined_value()));
+ // isolate
+ __ push(Immediate(reinterpret_cast<int>(isolate)));
+ // holder
+ __ push(receiver);
+
+ // store receiver address for GenerateFastApiCallBody
+ ASSERT(!scratch1.is(eax));
+ __ mov(eax, esp);
+ // return address
+ __ push(scratch1);
+
+ GenerateFastApiCallBody(masm, optimization, argc, true);
+}
+
+
+static void GenerateFastApiCallBody(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc,
+ bool restore_context) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] - esp[28] : FunctionCallbackInfo, incl.
+ // : object passing the type check
+ // (set by CheckPrototypes)
+ // -- esp[32] : last argument
+ // -- ...
+ // -- esp[(argc + 7) * 4] : first argument
+ // -- esp[(argc + 8) * 4] : receiver
+ //
+ // -- eax : receiver address
+ // -----------------------------------
+ typedef FunctionCallbackArguments FCA;
// API function gets reference to the v8::Arguments. If CPU profiler
// is enabled wrapper function will be called and we need to pass
@@ -513,18 +607,20 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// it's not controlled by GC.
const int kApiStackSpace = 4;
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+
// Function address is a foreign pointer outside V8's heap.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
- // v8::Arguments::implicit_args_.
+ // FunctionCallbackInfo::implicit_args_.
__ mov(ApiParameterOperand(2), eax);
- __ add(eax, Immediate(argc * kPointerSize));
- // v8::Arguments::values_.
+ __ add(eax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
__ mov(ApiParameterOperand(3), eax);
- // v8::Arguments::length_.
+ // FunctionCallbackInfo::length_.
__ Set(ApiParameterOperand(4), Immediate(argc));
- // v8::Arguments::is_construct_call_.
+ // FunctionCallbackInfo::is_construct_call_.
__ Set(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
@@ -533,57 +629,29 @@ static void GenerateFastApiCall(MacroAssembler* masm,
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ Operand context_restore_operand(ebp,
+ (2 + FCA::kContextSaveIndex) * kPointerSize);
+ Operand return_value_operand(ebp,
+ (2 + FCA::kReturnValueOffset) * kPointerSize);
__ CallApiFunctionAndReturn(function_address,
thunk_address,
ApiParameterOperand(1),
argc + kFastApiCallArguments + 1,
- kFastApiCallArguments + 1);
-}
-
-
-// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Register receiver,
- Register scratch,
- int argc,
- Register* values) {
- ASSERT(optimization.is_simple_api_call());
- ASSERT(!receiver.is(scratch));
-
- const int stack_space = kFastApiCallArguments + argc + 1;
- // Copy return value.
- __ mov(scratch, Operand(esp, 0));
- // Assign stack space for the call arguments.
- __ sub(esp, Immediate(stack_space * kPointerSize));
- // Move the return address on top of the stack.
- __ mov(Operand(esp, 0), scratch);
- // Write holder to stack frame.
- __ mov(Operand(esp, 1 * kPointerSize), receiver);
- // Write receiver to stack frame.
- int index = stack_space;
- __ mov(Operand(esp, index-- * kPointerSize), receiver);
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- ASSERT(!receiver.is(values[i]));
- ASSERT(!scratch.is(values[i]));
- __ mov(Operand(esp, index-- * kPointerSize), values[i]);
- }
-
- GenerateFastApiCall(masm, optimization, argc);
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
}
class CallInterceptorCompiler BASE_EMBEDDED {
public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
+ CallInterceptorCompiler(CallStubCompiler* stub_compiler,
const ParameterCount& arguments,
Register name,
- Code::ExtraICState extra_state)
+ ExtraICState extra_state)
: stub_compiler_(stub_compiler),
arguments_(arguments),
- name_(name),
- extra_state_(extra_state) {}
+ name_(name) {}
void Compile(MacroAssembler* masm,
Handle<JSObject> object,
@@ -654,9 +722,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(object, masm->isolate()), receiver,
+ interceptor_holder, scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -670,10 +739,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(interceptor_holder, masm->isolate()), holder,
+ handle(lookup->holder()), scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -686,13 +755,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- Handle<JSFunction> function = optimization.constant_function();
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ Handle<JSFunction> fun = optimization.constant_function();
+ stub_compiler_->GenerateJumpFunction(object, fun);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -719,20 +783,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(object, masm->isolate()), receiver,
+ interceptor_holder, scratch1, scratch2, scratch3, name, miss_label);
FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- StubCache::kInterceptorArgsLength);
+ CompileCallLoadPropertyWithInterceptor(
+ masm, receiver, holder, name_, interceptor_holder,
+ IC::kLoadPropertyWithInterceptorForCall);
// Restore the name_ register.
__ pop(name_);
@@ -747,17 +808,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label* interceptor_succeeded) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
+ __ push(receiver);
+ __ push(holder);
+ __ push(name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
+ CompileCallLoadPropertyWithInterceptor(
+ masm, receiver, holder, name_, holder_obj,
+ IC::kLoadPropertyWithInterceptorOnly);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
+ __ pop(name_);
+ __ pop(holder);
+ __ pop(receiver);
// Leave the internal frame.
}
@@ -765,16 +826,15 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ j(not_equal, interceptor_succeeded);
}
- StubCompiler* stub_compiler_;
+ CallStubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
- Code::ExtraICState extra_state_;
};
-void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ mov(this->name(), Immediate(name));
@@ -805,7 +865,7 @@ void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+void StoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -823,19 +883,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
// store is successful.
-void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register unused,
- Label* miss_label,
- Label* slow) {
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
@@ -858,7 +918,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ SmiUntag(value_reg);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, value_reg);
+ __ Cvtsi2sd(xmm0, value_reg);
} else {
__ push(value_reg);
__ fild_s(Operand(esp, 0));
@@ -872,7 +932,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
} else {
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
}
@@ -880,7 +940,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ bind(&do_store);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
}
@@ -994,15 +1054,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
-void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -1037,7 +1097,7 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
__ SmiUntag(value_reg);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, value_reg);
+ __ Cvtsi2sd(xmm0, value_reg);
} else {
__ push(value_reg);
__ fild_s(Operand(esp, 0));
@@ -1050,14 +1110,14 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
} else {
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
}
__ bind(&do_store);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
}
@@ -1115,26 +1175,6 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<JSGlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ jmp(code, RelocInfo::CODE_TARGET);
}
@@ -1144,7 +1184,7 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
@@ -1154,11 +1194,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ mov(scratch1, Handle<Map>(object->map()));
+ __ mov(scratch1, receiver_map);
- Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1166,31 +1206,38 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Keep track of the current object in register reg.
Register reg = object_reg;
- Handle<JSObject> current = object;
int depth = 0;
+ const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1;
if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
}
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
- while (!current.is_identical_to(holder)) {
+ while (!current_map.is_identical_to(holder_map)) {
++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
if (!name->IsUniqueName()) {
ASSERT(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
+ ASSERT(current.is_null() ||
+ current->property_dictionary()->FindEntry(*name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -1201,16 +1248,19 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
__ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
bool in_new_space = heap()->InNewSpace(*prototype);
- Handle<Map> current_map(current->map());
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
__ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
- if (current->IsJSGlobalProxy()) {
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
}
if (in_new_space) {
@@ -1231,70 +1281,65 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
}
// Go to the next object in the prototype chain.
current = prototype;
+ current_map = handle(current->map());
}
- ASSERT(current.is_identical_to(holder));
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()), miss, DONT_DO_SMI_CHECK);
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
}
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
// Return the register containing the holder.
return reg;
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ jmp(success);
+ Label success;
+ __ jmp(&success);
__ bind(miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ jmp(success);
+ Label success;
+ __ jmp(&success);
GenerateRestoreName(masm(), miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
+Register LoadStubCompiler::CallbackHandlerFrontend(
+ Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
- Label* success,
Handle<Object> callback) {
Label miss;
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
ASSERT(!reg.is(scratch2()));
@@ -1340,15 +1385,15 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
__ j(not_equal, &miss);
}
- HandlerFrontendFooter(name, success, &miss);
+ HandlerFrontendFooter(name, &miss);
return reg;
}
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
@@ -1364,34 +1409,33 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+ masm(), call_optimization, receiver(), scratch1(),
+ scratch2(), name(), 0, NULL);
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch3().is(reg));
__ pop(scratch3()); // Get return address to place it below.
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
__ push(receiver()); // receiver
- __ mov(scratch2(), esp);
- ASSERT(!scratch2().is(reg));
// Push data from ExecutableAccessorInfo.
if (isolate()->heap()->InNewSpace(callback->data())) {
- Register scratch = reg.is(scratch1()) ? receiver() : scratch1();
- __ mov(scratch, Immediate(callback));
- __ push(FieldOperand(scratch, ExecutableAccessorInfo::kDataOffset));
+ ASSERT(!scratch2().is(reg));
+ __ mov(scratch2(), Immediate(callback));
+ __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
} else {
__ push(Immediate(Handle<Object>(callback->data(), isolate())));
}
@@ -1401,9 +1445,9 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ push(Immediate(reinterpret_cast<int>(isolate())));
__ push(reg); // holder
- // Save a pointer to where we pushed the arguments pointer. This will be
- // passed as the const ExecutableAccessorInfo& to the C++ callback.
- __ push(scratch2());
+ // Save a pointer to where we pushed the arguments. This will be
+ // passed as the const PropertyAccessorInfo& to the C++ callback.
+ __ push(esp);
__ push(name()); // name
__ mov(ebx, esp); // esp points to reference to name (handler).
@@ -1434,20 +1478,21 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
thunk_address,
ApiParameterOperand(2),
kStackSpace,
- 7);
+ Operand(ebp, 7 * kPointerSize),
+ NULL);
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ LoadObject(eax, value);
__ ret(0);
}
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
+void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
- Handle<JSObject> object,
+ Handle<Object> object,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Handle<Name> name) {
@@ -1498,11 +1543,9 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -1554,22 +1597,12 @@ void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(edx, miss);
- CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss);
+void CallStubCompiler::GenerateFunctionCheck(Register function,
+ Register scratch,
+ Label* miss) {
+ __ JumpIfSmi(function, miss);
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
+ __ j(not_equal, miss);
}
@@ -1592,9 +1625,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
- __ JumpIfSmi(edi, miss);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, miss);
+ GenerateFunctionCheck(edi, ebx, miss);
// Check the shared function info. Make sure it hasn't changed.
__ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
@@ -1610,7 +1641,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
- extra_state_);
+ extra_state());
__ jmp(code, RelocInfo::CODE_TARGET);
}
@@ -1619,57 +1650,20 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
PropertyIndex index,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
- name, &miss);
+ Register reg = HandlerFrontendHeader(
+ object, holder, name, RECEIVER_MAP_CHECK, &miss);
GenerateFastPropertyLoad(
masm(), edi, reg, index.is_inobject(holder),
index.translate(holder), Representation::Tagged());
+ GenerateJumpFunction(object, edi, &miss);
- // Check that the function really is a function.
- __ JumpIfSmi(edi, &miss);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
- return GetCode(Code::FIELD, name);
+ return GetCode(Code::FAST, name);
}
@@ -1682,28 +1676,16 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
Code::StubType type) {
Label miss;
- // Check that function is still array
- const int argc = arguments().immediate();
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ site->SetElementsKind(GetInitialFastElementsKind());
Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
+ const int argc = arguments().immediate();
__ mov(eax, Immediate(argc));
__ mov(ebx, site_feedback_cell);
__ mov(edi, function);
@@ -1711,8 +1693,7 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
ArrayConstructorStub stub(isolate());
__ TailCallStub(&stub);
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -1726,33 +1707,20 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) {
+ // If object is not an array or is observed or sealed, bail out to regular
+ // call.
+ if (!object->IsJSArray() ||
+ !cell.is_null() ||
+ Handle<JSArray>::cast(object)->map()->is_observed() ||
+ !Handle<JSArray>::cast(object)->map()->is_extensible()) {
return Handle<Code>::null();
}
Label miss;
- GenerateNameCheck(name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
-
if (argc == 0) {
// Noop, return the length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@@ -1970,8 +1938,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
1);
}
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -1985,31 +1952,18 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) {
+ // If object is not an array or is observed or sealed, bail out to regular
+ // call.
+ if (!object->IsJSArray() ||
+ !cell.is_null() ||
+ Handle<JSArray>::cast(object)->map()->is_observed() ||
+ !Handle<JSArray>::cast(object)->map()->is_extensible()) {
return Handle<Code>::null();
}
Label miss, return_undefined, call_builtin;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -2041,6 +1995,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
ecx, times_half_pointer_size,
FixedArray::kHeaderSize),
Immediate(factory()->the_hole_value()));
+ const int argc = arguments().immediate();
__ ret((argc + 1) * kPointerSize);
__ bind(&return_undefined);
@@ -2053,8 +2008,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
argc + 1,
1);
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2068,14 +2022,6 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
// If object is not a string, bail out to regular call.
if (!object->IsString() || !cell.is_null()) {
return Handle<Code>::null();
@@ -2089,22 +2035,12 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
+ (CallICBase::StringStubState::decode(extra_state()) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
+ HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
Register receiver = ebx;
Register index = edi;
@@ -2138,8 +2074,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in ecx.
__ Set(ecx, Immediate(name));
- __ bind(&name_miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&name_miss);
// Return the generated code.
return GetCode(type, name);
@@ -2153,14 +2088,6 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
// If object is not a string, bail out to regular call.
if (!object->IsString() || !cell.is_null()) {
return Handle<Code>::null();
@@ -2174,22 +2101,12 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
+ (CallICBase::StringStubState::decode(extra_state()) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
+ HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
Register receiver = eax;
Register index = edi;
@@ -2225,8 +2142,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
__ bind(&miss);
// Restore function name in ecx.
__ Set(ecx, Immediate(name));
- __ bind(&name_miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&name_miss);
// Return the generated code.
return GetCode(type, name);
@@ -2240,14 +2156,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
@@ -2257,18 +2165,10 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
}
Label miss;
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2291,19 +2191,12 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2317,14 +2210,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
if (!CpuFeatures::IsSupported(SSE2)) {
return Handle<Code>::null();
}
@@ -2340,20 +2225,10 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
}
Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2368,7 +2243,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
__ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
// Check if the argument is strictly positive. Note this also
// discards NaN.
@@ -2418,7 +2293,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Return a new heap number.
__ AllocateHeapNumber(eax, ebx, edx, &slow);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(2 * kPointerSize);
// Return the argument (when it's an already round heap number).
@@ -2426,16 +2301,12 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ ret(2 * kPointerSize);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2449,14 +2320,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
@@ -2466,20 +2329,10 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
}
Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2535,16 +2388,12 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
__ ret(2 * kPointerSize);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2588,8 +2437,8 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
__ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, depth, &miss);
+ CheckPrototypes(IC::CurrentTypeOf(object, isolate()), edx, holder,
+ ebx, eax, edi, name, depth, &miss);
// Move the return address on top of the stack.
__ mov(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
@@ -2602,36 +2451,50 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
__ bind(&miss);
__ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss_before_stack_reserved);
// Return the generated code.
return GetCode(function);
}
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
+ Label success;
+ // Check that the object is a boolean.
+ __ cmp(object, factory()->true_value());
+ __ j(equal, &success);
+ __ cmp(object, factory()->false_value());
+ __ j(not_equal, miss);
+ __ bind(&success);
+}
+
+
+void CallStubCompiler::PatchGlobalProxy(Handle<Object> object) {
+ if (object->IsGlobalObject()) {
+ const int argc = arguments().immediate();
+ const int receiver_offset = (argc + 1) * kPointerSize;
+ __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+ __ mov(Operand(esp, receiver_offset), edx);
+ }
+}
+
+
+Register CallStubCompiler::HandlerFrontendHeader(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Label* miss) {
+ GenerateNameCheck(name, miss);
+
+ Register reg = edx;
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ const int receiver_offset = (argc + 1) * kPointerSize;
+ __ mov(reg, Operand(esp, receiver_offset));
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ JumpIfSmi(edx, &miss);
+ __ JumpIfSmi(reg, miss);
}
// Make sure that it's okay not to patch the on stack receiver
@@ -2642,129 +2505,79 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
__ IncrementCounter(isolate()->counters()->call_const(), 1);
// Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax,
- edi, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
+ reg = CheckPrototypes(IC::CurrentTypeOf(object, isolate()), reg, holder,
+ ebx, eax, edi, name, miss);
+
break;
- case STRING_CHECK:
+ case STRING_CHECK: {
// Check that the object is a string.
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
- __ j(above_equal, &miss);
+ __ CmpObjectType(reg, FIRST_NONSTRING_TYPE, eax);
+ __ j(above_equal, miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
+ masm(), Context::STRING_FUNCTION_INDEX, eax, miss);
break;
-
- case SYMBOL_CHECK:
+ }
+ case SYMBOL_CHECK: {
// Check that the object is a symbol.
- __ CmpObjectType(edx, SYMBOL_TYPE, eax);
- __ j(not_equal, &miss);
+ __ CmpObjectType(reg, SYMBOL_TYPE, eax);
+ __ j(not_equal, miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
+ masm(), Context::SYMBOL_FUNCTION_INDEX, eax, miss);
break;
-
+ }
case NUMBER_CHECK: {
Label fast;
// Check that the object is a smi or a heap number.
- __ JumpIfSmi(edx, &fast);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
- __ j(not_equal, &miss);
+ __ JumpIfSmi(reg, &fast);
+ __ CmpObjectType(reg, HEAP_NUMBER_TYPE, eax);
+ __ j(not_equal, miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
+ masm(), Context::NUMBER_FUNCTION_INDEX, eax, miss);
break;
}
case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ cmp(edx, factory()->true_value());
- __ j(equal, &fast);
- __ cmp(edx, factory()->false_value());
- __ j(not_equal, &miss);
- __ bind(&fast);
+ GenerateBooleanCheck(reg, miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, miss);
break;
}
}
- __ jmp(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
+ if (check != RECEIVER_MAP_CHECK) {
+ Handle<Object> prototype(object->GetPrototype(isolate()), isolate());
+ reg = CheckPrototypes(
+ IC::CurrentTypeOf(prototype, isolate()),
+ eax, holder, ebx, edx, edi, name, miss);
+ }
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ return reg;
}
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Handle<JSFunction> function) {
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<Cell>::null(),
- function, Handle<String>::cast(name),
- Code::CONSTANT);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label success;
+void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
+ Register function,
+ Label* miss) {
+ // Check that the function really is a function.
+ GenerateFunctionCheck(function, ebx, miss);
- CompileHandlerFrontend(object, holder, name, check, &success);
- __ bind(&success);
- CompileHandlerBackend(function);
+ if (!function.is(edi)) __ mov(edi, function);
+ PatchGlobalProxy(object);
- // Return the generated code.
- return GetCode(function);
+ // Invoke the function.
+ __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind());
}
Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
Label miss;
GenerateNameCheck(name, &miss);
@@ -2778,39 +2591,19 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state_);
+ CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state());
compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
&miss);
// Restore receiver.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- // Check that the function really is a function.
- __ JumpIfSmi(eax, &miss);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss);
+ GenerateJumpFunction(object, eax, &miss);
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Invoke the function.
- __ mov(edi, eax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle load cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
+ return GetCode(Code::FAST, name);
}
@@ -2820,14 +2613,6 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<PropertyCell> cell,
Handle<JSFunction> function,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(
object, holder, cell, function, Handle<String>::cast(name),
@@ -2837,40 +2622,13 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
}
Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ // Potentially loads a closure that matches the shared function info of the
+ // function, rather than function.
GenerateLoadFunctionFromCell(cell, function, &miss);
+ GenerateJumpFunction(object, edi, function);
- // Patch the receiver on the stack with the global proxy.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Set up the context (function already in edi).
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(Code::NORMAL, name);
@@ -2882,9 +2640,8 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
__ pop(scratch1()); // remove the return address
__ push(receiver());
@@ -2899,7 +2656,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ TailCallExternalReference(store_callback_property, 4, 1);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2908,16 +2665,16 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
Register values[] = { value() };
GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch1(), 1, values);
+ masm(), call_optimization, receiver(), scratch1(),
+ scratch2(), this->name(), 1, values);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2975,16 +2732,15 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ push(receiver());
__ push(this->name());
__ push(value());
- __ push(Immediate(Smi::FromInt(strict_mode())));
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
+ __ TailCallExternalReference(store_ic_property, 3, 1);
// Return the generated code.
- return GetCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -3016,23 +2772,18 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Handle<JSGlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<Type> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
- __ bind(&success);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ mov(eax, isolate()->factory()->undefined_value());
__ ret(0);
// Return the generated code.
- return GetCode(kind(), Code::NONEXISTENT, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -3085,18 +2836,14 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(edx);
+ __ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
@@ -3119,16 +2866,14 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
+ Handle<Type> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
- Label success, miss;
+ Label miss;
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
if (Serializer::enabled()) {
__ mov(eax, Immediate(cell));
@@ -3146,8 +2891,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
}
- HandlerFrontendFooter(name, &success, &miss);
- __ bind(&success);
+ HandlerFrontendFooter(name, &miss);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
@@ -3155,12 +2899,12 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ ret(0);
// Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), Code::NORMAL, name);
}
Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
+ TypeHandleList* types,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -3171,16 +2915,24 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
GenerateNameCheck(name, this->name(), &miss);
}
- __ JumpIfSmi(receiver(), &miss);
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
Register map_reg = scratch1();
__ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
+ int receiver_count = types->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = receiver_maps->at(current);
+ Handle<Type> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
__ cmp(map_reg, map);
+ if (type->Is(Type::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ bind(&number_case);
+ }
__ j(equal, handlers->at(current));
}
}
@@ -3207,11 +2959,11 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label slow, miss_force_generic;
+ Label slow, miss;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
- __ JumpIfNotSmi(ecx, &miss_force_generic);
+ __ JumpIfNotSmi(ecx, &miss);
__ mov(ebx, ecx);
__ SmiUntag(ebx);
__ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3234,13 +2986,13 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
- __ bind(&miss_force_generic);
+ __ bind(&miss);
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
diff --git a/chromium/v8/src/ic-inl.h b/chromium/v8/src/ic-inl.h
index e6ff2daa62b..24a939dedbf 100644
--- a/chromium/v8/src/ic-inl.h
+++ b/chromium/v8/src/ic-inl.h
@@ -86,8 +86,8 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
// ICs as strict mode. The strict-ness of the IC must be preserved.
if (old_target->kind() == Code::STORE_IC ||
old_target->kind() == Code::KEYED_STORE_IC) {
- ASSERT(Code::GetStrictMode(old_target->extra_ic_state()) ==
- Code::GetStrictMode(target->extra_ic_state()));
+ ASSERT(StoreIC::GetStrictMode(old_target->extra_ic_state()) ==
+ StoreIC::GetStrictMode(target->extra_ic_state()));
}
#endif
Assembler::set_target_address_at(address, target->instruction_start());
@@ -100,11 +100,9 @@ void IC::SetTargetAtAddress(Address address, Code* target) {
}
-InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
- JSObject* holder) {
- if (object->IsJSObject()) {
- return GetCodeCacheForObject(JSObject::cast(object), holder);
- }
+InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object) {
+ if (object->IsJSObject()) return OWN_MAP;
+
// If the object is a value, we use the prototype map for the cache.
ASSERT(object->IsString() || object->IsSymbol() ||
object->IsNumber() || object->IsBoolean());
@@ -112,30 +110,46 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
}
-InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
- JSObject* holder) {
- // Fast-properties and global objects store stubs in their own maps.
- // Slow properties objects use prototype's map (unless the property is its own
- // when holder == object). It works because slow properties objects having
- // the same prototype (or a prototype with the same map) and not having
- // the property are interchangeable for such a stub.
- if (holder != object &&
- !object->HasFastProperties() &&
- !object->IsJSGlobalProxy() &&
- !object->IsJSGlobalObject()) {
+HeapObject* IC::GetCodeCacheHolder(Isolate* isolate,
+ Object* object,
+ InlineCacheHolderFlag holder) {
+ if (object->IsSmi()) holder = PROTOTYPE_MAP;
+ Object* map_owner = holder == OWN_MAP
+ ? object : object->GetPrototype(isolate);
+ return HeapObject::cast(map_owner);
+}
+
+
+InlineCacheHolderFlag IC::GetCodeCacheFlag(Type* type) {
+ if (type->Is(Type::Boolean()) ||
+ type->Is(Type::Number()) ||
+ type->Is(Type::String()) ||
+ type->Is(Type::Symbol())) {
return PROTOTYPE_MAP;
}
return OWN_MAP;
}
-JSObject* IC::GetCodeCacheHolder(Isolate* isolate,
- Object* object,
- InlineCacheHolderFlag holder) {
- Object* map_owner =
- holder == OWN_MAP ? object : object->GetPrototype(isolate);
- ASSERT(map_owner->IsJSObject());
- return JSObject::cast(map_owner);
+Handle<Map> IC::GetCodeCacheHolder(InlineCacheHolderFlag flag,
+ Type* type,
+ Isolate* isolate) {
+ if (flag == PROTOTYPE_MAP) {
+ Context* context = isolate->context()->native_context();
+ JSFunction* constructor;
+ if (type->Is(Type::Boolean())) {
+ constructor = context->boolean_function();
+ } else if (type->Is(Type::Number())) {
+ constructor = context->number_function();
+ } else if (type->Is(Type::String())) {
+ constructor = context->string_function();
+ } else {
+ ASSERT(type->Is(Type::Symbol()));
+ constructor = context->symbol_function();
+ }
+ return handle(JSObject::cast(constructor->instance_prototype())->map());
+ }
+ return TypeToMap(type, isolate);
}
diff --git a/chromium/v8/src/ic.cc b/chromium/v8/src/ic.cc
index 55187514f90..fc1ca53290e 100644
--- a/chromium/v8/src/ic.cc
+++ b/chromium/v8/src/ic.cc
@@ -71,19 +71,16 @@ const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
void IC::TraceIC(const char* type,
- Handle<Object> name,
- State old_state,
- Code* new_target) {
+ Handle<Object> name) {
if (FLAG_trace_ic) {
- Object* undef = new_target->GetHeap()->undefined_value();
- State new_state = StateFrom(new_target, undef, undef);
- PrintF("[%s in ", type);
- Isolate* isolate = new_target->GetIsolate();
- StackFrameIterator it(isolate);
+ Code* new_target = raw_target();
+ State new_state = new_target->ic_state();
+ PrintF("[%s%s in ", new_target->is_keyed_stub() ? "Keyed" : "", type);
+ StackFrameIterator it(isolate());
while (it.frame()->fp() != this->fp()) it.Advance();
StackFrame* raw_frame = it.frame();
if (raw_frame->is_internal()) {
- Code* apply_builtin = isolate->builtins()->builtin(
+ Code* apply_builtin = isolate()->builtins()->builtin(
Builtins::kFunctionApply);
if (raw_frame->unchecked_code() == apply_builtin) {
PrintF("apply from ");
@@ -91,12 +88,13 @@ void IC::TraceIC(const char* type,
raw_frame = it.frame();
}
}
- JavaScriptFrame::PrintTop(isolate, stdout, false, true);
- Code::ExtraICState state = new_target->extra_ic_state();
+ JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ ExtraICState extra_state = new_target->extra_ic_state();
const char* modifier =
- GetTransitionMarkModifier(Code::GetKeyedAccessStoreMode(state));
+ GetTransitionMarkModifier(
+ KeyedStoreIC::GetKeyedAccessStoreMode(extra_state));
PrintF(" (%c->%c%s)",
- TransitionMarkFromState(old_state),
+ TransitionMarkFromState(state()),
TransitionMarkFromState(new_state),
modifier);
name->Print();
@@ -117,10 +115,12 @@ void IC::TraceIC(const char* type,
#define TRACE_GENERIC_IC(isolate, type, reason)
#endif // DEBUG
-#define TRACE_IC(type, name, old_state, new_target) \
- ASSERT((TraceIC(type, name, old_state, new_target), true))
+#define TRACE_IC(type, name) \
+ ASSERT((TraceIC(type, name), true))
-IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
+IC::IC(FrameDepth depth, Isolate* isolate)
+ : isolate_(isolate),
+ target_set_(false) {
// To improve the performance of the (much used) IC code, we unfold a few
// levels of the stack frame iteration code. This yields a ~35% speedup when
// running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
@@ -145,6 +145,8 @@ IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
#endif
fp_ = fp;
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
+ target_ = handle(raw_target(), isolate);
+ state_ = target_->ic_state();
}
@@ -179,32 +181,128 @@ Address IC::OriginalCodeAddress() const {
#endif
-static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
- Object* receiver,
- Object* name) {
- if (target->is_keyed_load_stub() ||
- target->is_keyed_call_stub() ||
- target->is_keyed_store_stub()) {
+static bool HasInterceptorGetter(JSObject* object) {
+ return !object->GetNamedInterceptor()->getter()->IsUndefined();
+}
+
+
+static bool HasInterceptorSetter(JSObject* object) {
+ return !object->GetNamedInterceptor()->setter()->IsUndefined();
+}
+
+
+static void LookupForRead(Handle<Object> object,
+ Handle<String> name,
+ LookupResult* lookup) {
+ // Skip all the objects with named interceptors, but
+ // without actual getter.
+ while (true) {
+ object->Lookup(*name, lookup);
+ // Besides normal conditions (property not found or it's not
+ // an interceptor), bail out if lookup is not cacheable: we won't
+ // be able to IC it anyway and regular lookup should work fine.
+ if (!lookup->IsInterceptor() || !lookup->IsCacheable()) {
+ return;
+ }
+
+ Handle<JSObject> holder(lookup->holder(), lookup->isolate());
+ if (HasInterceptorGetter(*holder)) {
+ return;
+ }
+
+ holder->LocalLookupRealNamedProperty(*name, lookup);
+ if (lookup->IsFound()) {
+ ASSERT(!lookup->IsInterceptor());
+ return;
+ }
+
+ Handle<Object> proto(holder->GetPrototype(), lookup->isolate());
+ if (proto->IsNull()) {
+ ASSERT(!lookup->IsFound());
+ return;
+ }
+
+ object = proto;
+ }
+}
+
+
+bool CallIC::TryUpdateExtraICState(LookupResult* lookup,
+ Handle<Object> object) {
+ if (!lookup->IsConstantFunction()) return false;
+ JSFunction* function = lookup->GetConstantFunction();
+ if (!function->shared()->HasBuiltinFunctionId()) return false;
+
+ // Fetch the arguments passed to the called function.
+ const int argc = target()->arguments_count();
+ Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
+ Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
+ Arguments args(argc + 1,
+ &Memory::Object_at(fp +
+ StandardFrameConstants::kCallerSPOffset +
+ argc * kPointerSize));
+ switch (function->shared()->builtin_function_id()) {
+ case kStringCharCodeAt:
+ case kStringCharAt:
+ if (object->IsString()) {
+ String* string = String::cast(*object);
+ // Check there's the right string value or wrapper in the receiver slot.
+ ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
+ // If we're in the default (fastest) state and the index is
+ // out of bounds, update the state to record this fact.
+ if (StringStubState::decode(extra_ic_state()) == DEFAULT_STRING_STUB &&
+ argc >= 1 && args[1]->IsNumber()) {
+ double index = DoubleToInteger(args.number_at(1));
+ if (index < 0 || index >= string->length()) {
+ extra_ic_state_ =
+ StringStubState::update(extra_ic_state(),
+ STRING_INDEX_OUT_OF_BOUNDS);
+ return true;
+ }
+ }
+ }
+ break;
+ default:
+ return false;
+ }
+ return false;
+}
+
+
+bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
+ Handle<String> name) {
+ if (target()->is_call_stub()) {
+ LookupResult lookup(isolate());
+ LookupForRead(receiver, name, &lookup);
+ if (static_cast<CallIC*>(this)->TryUpdateExtraICState(&lookup, receiver)) {
+ return true;
+ }
+ }
+
+ if (target()->is_keyed_stub()) {
// Determine whether the failure is due to a name failure.
if (!name->IsName()) return false;
- Name* stub_name = target->FindFirstName();
- if (Name::cast(name) != stub_name) return false;
+ Name* stub_name = target()->FindFirstName();
+ if (*name != stub_name) return false;
}
InlineCacheHolderFlag cache_holder =
- Code::ExtractCacheHolderFromFlags(target->flags());
+ Code::ExtractCacheHolderFromFlags(target()->flags());
- Isolate* isolate = target->GetIsolate();
- if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
- // The stub was generated for JSObject but called for non-JSObject.
- // IC::GetCodeCacheHolder is not applicable.
- return false;
- } else if (cache_holder == PROTOTYPE_MAP &&
- receiver->GetPrototype(isolate)->IsNull()) {
- // IC::GetCodeCacheHolder is not applicable.
- return false;
+ switch (cache_holder) {
+ case OWN_MAP:
+ // The stub was generated for JSObject but called for non-JSObject.
+ // IC::GetCodeCacheHolder is not applicable.
+ if (!receiver->IsJSObject()) return false;
+ break;
+ case PROTOTYPE_MAP:
+ // IC::GetCodeCacheHolder is not applicable.
+ if (receiver->GetPrototype(isolate())->IsNull()) return false;
+ break;
}
- Map* map = IC::GetCodeCacheHolder(isolate, receiver, cache_holder)->map();
+
+ Handle<Map> map(
+ IC::GetCodeCacheHolder(isolate(), *receiver, cache_holder)->map());
// Decide whether the inline cache failed because of changes to the
// receiver itself or changes to one of its prototypes.
@@ -214,20 +312,11 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
// the receiver map's code cache. Therefore, if the current target
// is in the receiver map's code cache, the inline cache failed due
// to prototype check failure.
- int index = map->IndexInCodeCache(name, target);
+ int index = map->IndexInCodeCache(*name, *target());
if (index >= 0) {
- map->RemoveFromCodeCache(String::cast(name), target, index);
- // For loads and stores, handlers are stored in addition to the ICs on the
- // map. Remove those, too.
- if ((target->is_load_stub() || target->is_keyed_load_stub() ||
- target->is_store_stub() || target->is_keyed_store_stub()) &&
- target->type() != Code::NORMAL) {
- Code* handler = target->FindFirstCode();
- index = map->IndexInCodeCache(name, handler);
- if (index >= 0) {
- map->RemoveFromCodeCache(String::cast(name), handler, index);
- }
- }
+ map->RemoveFromCodeCache(*name, *target(), index);
+ // Handlers are stored in addition to the ICs on the map. Remove those, too.
+ TryRemoveInvalidHandlers(map, name);
return true;
}
@@ -240,8 +329,8 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
// If the IC is shared between multiple receivers (slow dictionary mode), then
// the map cannot be deprecated and the stub invalidated.
if (cache_holder == OWN_MAP) {
- Map* old_map = target->FindFirstMap();
- if (old_map == map) return true;
+ Map* old_map = target()->FindFirstMap();
+ if (old_map == *map) return true;
if (old_map != NULL) {
if (old_map->is_deprecated()) return true;
if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
@@ -252,11 +341,9 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
}
if (receiver->IsGlobalObject()) {
- if (!name->IsName()) return false;
- Isolate* isolate = target->GetIsolate();
- LookupResult lookup(isolate);
- GlobalObject* global = GlobalObject::cast(receiver);
- global->LocalLookupRealNamedProperty(Name::cast(name), &lookup);
+ LookupResult lookup(isolate());
+ GlobalObject* global = GlobalObject::cast(*receiver);
+ global->LocalLookupRealNamedProperty(*name, &lookup);
if (!lookup.IsFound()) return false;
PropertyCell* cell = global->GetPropertyCell(&lookup);
return cell->type()->IsConstant();
@@ -266,21 +353,38 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
}
-IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
- IC::State state = target->ic_state();
+void IC::TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name) {
+ CodeHandleList handlers;
+ target()->FindHandlers(&handlers);
+ for (int i = 0; i < handlers.length(); i++) {
+ Handle<Code> handler = handlers.at(i);
+ int index = map->IndexInCodeCache(*name, *handler);
+ if (index >= 0) {
+ map->RemoveFromCodeCache(*name, *handler, index);
+ return;
+ }
+ }
+}
+
- if (state != MONOMORPHIC || !name->IsString()) return state;
- if (receiver->IsUndefined() || receiver->IsNull()) return state;
+void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
+ if (!name->IsString()) return;
+ if (state() != MONOMORPHIC) {
+ if (state() == POLYMORPHIC && receiver->IsHeapObject()) {
+ TryRemoveInvalidHandlers(
+ handle(Handle<HeapObject>::cast(receiver)->map()),
+ Handle<String>::cast(name));
+ }
+ return;
+ }
+ if (receiver->IsUndefined() || receiver->IsNull()) return;
- Code::Kind kind = target->kind();
// Remove the target from the code cache if it became invalid
// because of changes in the prototype chain to avoid hitting it
// again.
- // Call stubs handle this later to allow extra IC state
- // transitions.
- if (kind != Code::CALL_IC && kind != Code::KEYED_CALL_IC &&
- TryRemoveInvalidPrototypeDependentStub(target, receiver, name)) {
- return MONOMORPHIC_PROTOTYPE_FAILURE;
+ if (TryRemoveInvalidPrototypeDependentStub(
+ receiver, Handle<String>::cast(name))) {
+ return MarkMonomorphicPrototypeFailure();
}
// The builtins object is special. It only changes when JavaScript
@@ -289,11 +393,7 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
// an inline cache miss for the builtins object after lazily loading
// JavaScript builtins, we return uninitialized as the state to
// force the inline cache back to monomorphic state.
- if (receiver->IsJSBuiltinsObject()) {
- return UNINITIALIZED;
- }
-
- return MONOMORPHIC;
+ if (receiver->IsJSBuiltinsObject()) state_ = UNINITIALIZED;
}
@@ -403,7 +503,7 @@ void IC::Clear(Isolate* isolate, Address address) {
void CallICBase::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+ if (IsCleared(target)) return;
bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
Code* code =
target->GetIsolate()->stub_cache()->FindCallInitialize(
@@ -415,35 +515,33 @@ void CallICBase::Clear(Address address, Code* target) {
void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+ if (IsCleared(target)) return;
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
- SetTargetAtAddress(address, *initialize_stub(isolate));
+ SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
}
void LoadIC::Clear(Isolate* isolate, Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- SetTargetAtAddress(address, *initialize_stub(isolate));
+ if (IsCleared(target)) return;
+ SetTargetAtAddress(address, *pre_monomorphic_stub(isolate));
}
void StoreIC::Clear(Isolate* isolate, Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+ if (IsCleared(target)) return;
SetTargetAtAddress(address,
- (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? *initialize_stub_strict(isolate)
- : *initialize_stub(isolate));
+ *pre_monomorphic_stub(
+ isolate, StoreIC::GetStrictMode(target->extra_ic_state())));
}
void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
+ if (IsCleared(target)) return;
SetTargetAtAddress(address,
- (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? *initialize_stub_strict(isolate)
- : *initialize_stub(isolate));
+ *pre_monomorphic_stub(
+ isolate, StoreIC::GetStrictMode(target->extra_ic_state())));
}
@@ -460,47 +558,6 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target) {
}
-static bool HasInterceptorGetter(JSObject* object) {
- return !object->GetNamedInterceptor()->getter()->IsUndefined();
-}
-
-
-static void LookupForRead(Handle<Object> object,
- Handle<String> name,
- LookupResult* lookup) {
- // Skip all the objects with named interceptors, but
- // without actual getter.
- while (true) {
- object->Lookup(*name, lookup);
- // Besides normal conditions (property not found or it's not
- // an interceptor), bail out if lookup is not cacheable: we won't
- // be able to IC it anyway and regular lookup should work fine.
- if (!lookup->IsInterceptor() || !lookup->IsCacheable()) {
- return;
- }
-
- Handle<JSObject> holder(lookup->holder(), lookup->isolate());
- if (HasInterceptorGetter(*holder)) {
- return;
- }
-
- holder->LocalLookupRealNamedProperty(*name, lookup);
- if (lookup->IsFound()) {
- ASSERT(!lookup->IsInterceptor());
- return;
- }
-
- Handle<Object> proto(holder->GetPrototype(), lookup->isolate());
- if (proto->IsNull()) {
- ASSERT(!lookup->IsFound());
- return;
- }
-
- object = proto;
- }
-}
-
-
Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
Handle<Object> delegate = Execution::GetFunctionDelegate(isolate(), object);
@@ -545,16 +602,18 @@ void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
}
-MaybeObject* CallICBase::LoadFunction(State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
+static bool MigrateDeprecated(Handle<Object> object) {
+ if (!object->IsJSObject()) return false;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (!receiver->map()->is_deprecated()) return false;
+ JSObject::MigrateInstance(Handle<JSObject>::cast(object));
+ return true;
+}
+
+
+MaybeObject* CallICBase::LoadFunction(Handle<Object> object,
Handle<String> name) {
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->map()->is_deprecated()) {
- JSObject::MigrateInstance(receiver);
- }
- }
+ bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
@@ -590,9 +649,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
}
// Lookup is valid: Update inline cache and stub cache.
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, extra_ic_state, object, name);
- }
+ if (use_ic) UpdateCaches(&lookup, object, name);
// Get the property.
PropertyAttributes attr;
@@ -637,53 +694,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
}
-bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
- Handle<Object> object,
- Code::ExtraICState* extra_ic_state) {
- ASSERT(kind_ == Code::CALL_IC);
- if (!lookup->IsConstantFunction()) return false;
- JSFunction* function = lookup->GetConstantFunction();
- if (!function->shared()->HasBuiltinFunctionId()) return false;
-
- // Fetch the arguments passed to the called function.
- const int argc = target()->arguments_count();
- Address entry = isolate()->c_entry_fp(isolate()->thread_local_top());
- Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- Arguments args(argc + 1,
- &Memory::Object_at(fp +
- StandardFrameConstants::kCallerSPOffset +
- argc * kPointerSize));
- switch (function->shared()->builtin_function_id()) {
- case kStringCharCodeAt:
- case kStringCharAt:
- if (object->IsString()) {
- String* string = String::cast(*object);
- // Check there's the right string value or wrapper in the receiver slot.
- ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
- // If we're in the default (fastest) state and the index is
- // out of bounds, update the state to record this fact.
- if (StringStubState::decode(*extra_ic_state) == DEFAULT_STRING_STUB &&
- argc >= 1 && args[1]->IsNumber()) {
- double index = DoubleToInteger(args.number_at(1));
- if (index < 0 || index >= string->length()) {
- *extra_ic_state =
- StringStubState::update(*extra_ic_state,
- STRING_INDEX_OUT_OF_BOUNDS);
- return true;
- }
- }
- }
- break;
- default:
- return false;
- }
- return false;
-}
-
-
Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_state,
Handle<Object> object,
Handle<String> name) {
int argc = target()->arguments_count();
@@ -692,7 +703,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
case FIELD: {
PropertyIndex index = lookup->GetFieldIndex();
return isolate()->stub_cache()->ComputeCallField(
- argc, kind_, extra_state, name, object, holder, index);
+ argc, kind_, extra_ic_state(), name, object, holder, index);
}
case CONSTANT: {
if (!lookup->IsConstantFunction()) return Handle<Code>::null();
@@ -701,7 +712,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
// that the code stub is in the stub cache.
Handle<JSFunction> function(lookup->GetConstantFunction(), isolate());
return isolate()->stub_cache()->ComputeCallConstant(
- argc, kind_, extra_state, name, object, holder, function);
+ argc, kind_, extra_ic_state(), name, object, holder, function);
}
case NORMAL: {
// If we return a null handle, the IC will not be patched.
@@ -715,7 +726,8 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
if (!cell->value()->IsJSFunction()) return Handle<Code>::null();
Handle<JSFunction> function(JSFunction::cast(cell->value()));
return isolate()->stub_cache()->ComputeCallGlobal(
- argc, kind_, extra_state, name, receiver, global, cell, function);
+ argc, kind_, extra_ic_state(), name,
+ receiver, global, cell, function);
} else {
// There is only one shared stub for calling normalized
// properties. It does not traverse the prototype chain, so the
@@ -723,130 +735,106 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
// applicable.
if (!holder.is_identical_to(receiver)) return Handle<Code>::null();
return isolate()->stub_cache()->ComputeCallNormal(
- argc, kind_, extra_state);
+ argc, kind_, extra_ic_state());
}
break;
}
case INTERCEPTOR:
ASSERT(HasInterceptorGetter(*holder));
return isolate()->stub_cache()->ComputeCallInterceptor(
- argc, kind_, extra_state, name, object, holder);
+ argc, kind_, extra_ic_state(), name, object, holder);
default:
return Handle<Code>::null();
}
}
+Handle<Code> CallICBase::megamorphic_stub() {
+ return isolate()->stub_cache()->ComputeCallMegamorphic(
+ target()->arguments_count(), kind_, extra_ic_state());
+}
+
+
+Handle<Code> CallICBase::pre_monomorphic_stub() {
+ return isolate()->stub_cache()->ComputeCallPreMonomorphic(
+ target()->arguments_count(), kind_, extra_ic_state());
+}
+
+
void CallICBase::UpdateCaches(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
- // Compute the number of arguments.
- int argc = target()->arguments_count();
- Handle<Code> code;
- if (state == UNINITIALIZED) {
- // This is the first time we execute this inline cache.
- // Set the target to the pre monomorphic stub to delay
- // setting the monomorphic state.
- code = isolate()->stub_cache()->ComputeCallPreMonomorphic(
- argc, kind_, extra_ic_state);
- } else if (state == MONOMORPHIC) {
- if (kind_ == Code::CALL_IC &&
- TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
- code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
- object, name);
- } else if (TryRemoveInvalidPrototypeDependentStub(target(),
- *object,
- *name)) {
- state = MONOMORPHIC_PROTOTYPE_FAILURE;
- code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
- object, name);
- } else {
- code = isolate()->stub_cache()->ComputeCallMegamorphic(
- argc, kind_, extra_ic_state);
- }
- } else {
- code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
- object, name);
+ if (state() == UNINITIALIZED) {
+ set_target(*pre_monomorphic_stub());
+ TRACE_IC("CallIC", name);
+ return;
}
+ Handle<Code> code = ComputeMonomorphicStub(lookup, object, name);
// If there's no appropriate stub we simply avoid updating the caches.
+ // TODO(verwaest): Install a slow fallback in this case to avoid not learning,
+ // and deopting Crankshaft code.
if (code.is_null()) return;
- // Patch the call site depending on the state of the cache.
- switch (state) {
- case UNINITIALIZED:
- case MONOMORPHIC_PROTOTYPE_FAILURE:
- case PREMONOMORPHIC:
- case MONOMORPHIC:
- set_target(*code);
- break;
- case MEGAMORPHIC: {
- // Cache code holding map should be consistent with
- // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
- Handle<JSObject> cache_object = object->IsJSObject()
- ? Handle<JSObject>::cast(object)
- : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())),
- isolate());
- // Update the stub cache.
- UpdateMegamorphicCache(cache_object->map(), *name, *code);
- break;
- }
- case DEBUG_STUB:
- break;
- case POLYMORPHIC:
- case GENERIC:
- UNREACHABLE();
- break;
- }
+ Handle<JSObject> cache_object = object->IsJSObject()
+ ? Handle<JSObject>::cast(object)
+ : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())),
+ isolate());
- TRACE_IC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
- name, state, target());
+ PatchCache(CurrentTypeOf(cache_object, isolate()), name, code);
+ TRACE_IC("CallIC", name);
}
-MaybeObject* KeyedCallIC::LoadFunction(State state,
- Handle<Object> object,
+MaybeObject* KeyedCallIC::LoadFunction(Handle<Object> object,
Handle<Object> key) {
if (key->IsInternalizedString()) {
- return CallICBase::LoadFunction(state,
- Code::kNoExtraICState,
- object,
- Handle<String>::cast(key));
- }
-
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->map()->is_deprecated()) {
- JSObject::MigrateInstance(receiver);
- }
+ return CallICBase::LoadFunction(object, Handle<String>::cast(key));
}
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_call", object, key);
}
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
- ASSERT(!(use_ic && object->IsJSGlobalProxy()));
+ bool use_ic = MigrateDeprecated(object)
+ ? false : FLAG_use_ic && !object->IsAccessCheckNeeded();
- if (use_ic && state != MEGAMORPHIC) {
+ if (use_ic && state() != MEGAMORPHIC) {
+ ASSERT(!object->IsJSGlobalProxy());
int argc = target()->arguments_count();
- Handle<Code> stub = isolate()->stub_cache()->ComputeCallMegamorphic(
- argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = isolate()->stub_cache()->ComputeCallArguments(argc);
+ Handle<Code> stub;
+
+ // Use the KeyedArrayCallStub if the call is of the form array[smi](...),
+ // where array is an instance of one of the initial array maps (without
+ // extra named properties).
+ // TODO(verwaest): Also support keyed calls on instances of other maps.
+ if (object->IsJSArray() && key->IsSmi()) {
+ Handle<JSArray> array = Handle<JSArray>::cast(object);
+ ElementsKind kind = array->map()->elements_kind();
+ if (IsFastObjectElementsKind(kind) &&
+ array->map() == isolate()->get_initial_js_array_map(kind)) {
+ KeyedArrayCallStub stub_gen(IsHoleyElementsKind(kind), argc);
+ stub = stub_gen.GetCode(isolate());
}
}
- ASSERT(!stub.is_null());
+
+ if (stub.is_null()) {
+ stub = isolate()->stub_cache()->ComputeCallMegamorphic(
+ argc, Code::KEYED_CALL_IC, kNoExtraICState);
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->elements()->map() ==
+ isolate()->heap()->non_strict_arguments_elements_map()) {
+ stub = isolate()->stub_cache()->ComputeCallArguments(argc);
+ }
+ }
+ ASSERT(!stub.is_null());
+ }
set_target(*stub);
- TRACE_IC("KeyedCallIC", key, state, target());
+ TRACE_IC("CallIC", key);
}
Handle<Object> result = GetProperty(isolate(), object, key);
@@ -865,8 +853,7 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
}
-MaybeObject* LoadIC::Load(State state,
- Handle<Object> object,
+MaybeObject* LoadIC::Load(Handle<Object> object,
Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
@@ -879,32 +866,25 @@ MaybeObject* LoadIC::Load(State state,
// string wrapper objects. The length property of string wrapper
// objects is read-only and therefore always returns the length of
// the underlying string value. See ECMA-262 15.5.5.1.
- if ((object->IsString() || object->IsStringWrapper()) &&
+ if (object->IsStringWrapper() &&
name->Equals(isolate()->heap()->length_string())) {
Handle<Code> stub;
- if (state == UNINITIALIZED) {
+ if (state() == UNINITIALIZED) {
stub = pre_monomorphic_stub();
- } else if (state == PREMONOMORPHIC) {
- StringLengthStub string_length_stub(kind(), !object->IsString());
- stub = string_length_stub.GetCode(isolate());
- } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
- StringLengthStub string_length_stub(kind(), true);
+ } else if (state() == PREMONOMORPHIC || state() == MONOMORPHIC) {
+ StringLengthStub string_length_stub(kind());
stub = string_length_stub.GetCode(isolate());
- } else if (state != MEGAMORPHIC) {
- ASSERT(state != GENERIC);
+ } else if (state() != MEGAMORPHIC) {
+ ASSERT(state() != GENERIC);
stub = megamorphic_stub();
}
if (!stub.is_null()) {
set_target(*stub);
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
-#endif
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /stringwrapper]\n");
}
// Get the string if we have a string wrapper object.
- Handle<Object> string = object->IsJSValue()
- ? Handle<Object>(Handle<JSValue>::cast(object)->value(), isolate())
- : object;
- return Smi::FromInt(String::cast(*string)->length());
+ String* string = String::cast(JSValue::cast(*object)->value());
+ return Smi::FromInt(string->length());
}
// Use specialized code for getting prototype of functions.
@@ -912,20 +892,18 @@ MaybeObject* LoadIC::Load(State state,
name->Equals(isolate()->heap()->prototype_string()) &&
Handle<JSFunction>::cast(object)->should_have_prototype()) {
Handle<Code> stub;
- if (state == UNINITIALIZED) {
+ if (state() == UNINITIALIZED) {
stub = pre_monomorphic_stub();
- } else if (state == PREMONOMORPHIC) {
+ } else if (state() == PREMONOMORPHIC) {
FunctionPrototypeStub function_prototype_stub(kind());
stub = function_prototype_stub.GetCode(isolate());
- } else if (state != MEGAMORPHIC) {
- ASSERT(state != GENERIC);
+ } else if (state() != MEGAMORPHIC) {
+ ASSERT(state() != GENERIC);
stub = megamorphic_stub();
}
if (!stub.is_null()) {
set_target(*stub);
-#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
-#endif
}
return *Accessors::FunctionGetPrototype(Handle<JSFunction>::cast(object));
}
@@ -940,12 +918,7 @@ MaybeObject* LoadIC::Load(State state,
return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
}
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->map()->is_deprecated()) {
- JSObject::MigrateInstance(receiver);
- }
- }
+ bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
// Named lookup in the object.
LookupResult lookup(isolate());
@@ -960,24 +933,20 @@ MaybeObject* LoadIC::Load(State state,
}
// Update inline cache and stub cache.
- if (FLAG_use_ic) UpdateCaches(&lookup, state, object, name);
+ if (use_ic) UpdateCaches(&lookup, object, name);
PropertyAttributes attr;
- if (lookup.IsInterceptor() || lookup.IsHandler()) {
- // Get the property.
- Handle<Object> result =
- Object::GetProperty(object, object, &lookup, name, &attr);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- // If the property is not present, check if we need to throw an
- // exception.
- if (attr == ABSENT && IsUndeclaredGlobal(object)) {
- return ReferenceError("not_defined", name);
- }
- return *result;
- }
-
// Get the property.
- return Object::GetPropertyOrFail(object, object, &lookup, name, &attr);
+ Handle<Object> result =
+ Object::GetProperty(object, object, &lookup, name, &attr);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ // If the property is not present, check if we need to throw an
+ // exception.
+ if ((lookup.IsInterceptor() || lookup.IsHandler()) &&
+ attr == ABSENT && IsUndeclaredGlobal(object)) {
+ return ReferenceError("not_defined", name);
+ }
+ return *result;
}
@@ -995,149 +964,105 @@ static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
}
-bool IC::UpdatePolymorphicIC(State state,
- Handle<HeapObject> receiver,
+bool IC::UpdatePolymorphicIC(Handle<Type> type,
Handle<String> name,
- Handle<Code> code,
- StrictModeFlag strict_mode) {
- if (code->type() == Code::NORMAL) return false;
- if (target()->ic_state() == MONOMORPHIC &&
- target()->type() == Code::NORMAL) {
- return false;
- }
-
- MapHandleList receiver_maps;
+ Handle<Code> code) {
+ if (!code->is_handler()) return false;
+ TypeHandleList types;
CodeHandleList handlers;
- int number_of_valid_maps;
+ int number_of_valid_types;
int handler_to_overwrite = -1;
- Handle<Map> new_receiver_map(receiver->map());
- {
- DisallowHeapAllocation no_gc;
- target()->FindAllMaps(&receiver_maps);
- int number_of_maps = receiver_maps.length();
- number_of_valid_maps = number_of_maps;
-
- for (int i = 0; i < number_of_maps; i++) {
- Handle<Map> map = receiver_maps.at(i);
- // Filter out deprecated maps to ensure its instances get migrated.
- if (map->is_deprecated()) {
- number_of_valid_maps--;
- // If the receiver map is already in the polymorphic IC, this indicates
- // there was a prototoype chain failure. In that case, just overwrite the
- // handler.
- } else if (map.is_identical_to(new_receiver_map)) {
- number_of_valid_maps--;
- handler_to_overwrite = i;
- }
- }
- if (number_of_valid_maps >= 4) return false;
-
- // Only allow 0 maps in case target() was reset to UNINITIALIZED by the GC.
- // In that case, allow the IC to go back monomorphic.
- if (number_of_maps == 0 && target()->ic_state() != UNINITIALIZED) {
- return false;
+ target()->FindAllTypes(&types);
+ int number_of_types = types.length();
+ number_of_valid_types = number_of_types;
+
+ for (int i = 0; i < number_of_types; i++) {
+ Handle<Type> current_type = types.at(i);
+ // Filter out deprecated maps to ensure their instances get migrated.
+ if (current_type->IsClass() && current_type->AsClass()->is_deprecated()) {
+ number_of_valid_types--;
+ // If the receiver type is already in the polymorphic IC, this indicates
+ // there was a prototoype chain failure. In that case, just overwrite the
+ // handler.
+ } else if (type->IsCurrently(current_type)) {
+ ASSERT(handler_to_overwrite == -1);
+ number_of_valid_types--;
+ handler_to_overwrite = i;
}
- target()->FindAllCode(&handlers, receiver_maps.length());
}
- number_of_valid_maps++;
+ if (number_of_valid_types >= 4) return false;
+ if (number_of_types == 0) return false;
+ if (!target()->FindHandlers(&handlers, types.length())) return false;
+
+ number_of_valid_types++;
if (handler_to_overwrite >= 0) {
handlers.Set(handler_to_overwrite, code);
} else {
- receiver_maps.Add(new_receiver_map);
+ types.Add(type);
handlers.Add(code);
}
- Handle<Code> ic = ComputePolymorphicIC(
- &receiver_maps, &handlers, number_of_valid_maps, name, strict_mode);
+ Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
+ &types, &handlers, number_of_valid_types, name, extra_ic_state());
set_target(*ic);
return true;
}
-Handle<Code> LoadIC::ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
- return isolate()->stub_cache()->ComputePolymorphicLoadIC(
- receiver_maps, handlers, number_of_valid_maps, name);
+Handle<Type> IC::CurrentTypeOf(Handle<Object> object, Isolate* isolate) {
+ Type* type = object->IsJSGlobalObject()
+ ? Type::Constant(Handle<JSGlobalObject>::cast(object))
+ : Type::OfCurrently(object);
+ return handle(type, isolate);
}
-Handle<Code> StoreIC::ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
- return isolate()->stub_cache()->ComputePolymorphicStoreIC(
- receiver_maps, handlers, number_of_valid_maps, name, strict_mode);
-}
-
-
-void LoadIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode) {
- if (handler->is_load_stub()) return set_target(*handler);
- Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicLoadIC(
- receiver, handler, name);
- set_target(*ic);
-}
-
-
-void KeyedLoadIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode) {
- if (handler->is_keyed_load_stub()) return set_target(*handler);
- Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicKeyedLoadIC(
- receiver, handler, name);
- set_target(*ic);
+Handle<Map> IC::TypeToMap(Type* type, Isolate* isolate) {
+ if (type->Is(Type::Number())) return isolate->factory()->heap_number_map();
+ if (type->Is(Type::Boolean())) return isolate->factory()->oddball_map();
+ if (type->IsConstant()) {
+ return handle(Handle<JSGlobalObject>::cast(type->AsConstant())->map());
+ }
+ ASSERT(type->IsClass());
+ return type->AsClass();
}
-void StoreIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode) {
- if (handler->is_store_stub()) return set_target(*handler);
- Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicStoreIC(
- receiver, handler, name, strict_mode);
- set_target(*ic);
+Type* IC::MapToType(Handle<Map> map) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) return Type::Number();
+ // The only oddballs that can be recorded in ICs are booleans.
+ if (map->instance_type() == ODDBALL_TYPE) return Type::Boolean();
+ return Type::Class(map);
}
-void KeyedStoreIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode) {
- if (handler->is_keyed_store_stub()) return set_target(*handler);
- Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicKeyedStoreIC(
- receiver, handler, name, strict_mode);
+void IC::UpdateMonomorphicIC(Handle<Type> type,
+ Handle<Code> handler,
+ Handle<String> name) {
+ if (!handler->is_handler()) return set_target(*handler);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
+ name, type, handler, extra_ic_state());
set_target(*ic);
}
void IC::CopyICToMegamorphicCache(Handle<String> name) {
- MapHandleList receiver_maps;
+ TypeHandleList types;
CodeHandleList handlers;
- {
- DisallowHeapAllocation no_gc;
- target()->FindAllMaps(&receiver_maps);
- target()->FindAllCode(&handlers, receiver_maps.length());
- }
- for (int i = 0; i < receiver_maps.length(); i++) {
- UpdateMegamorphicCache(*receiver_maps.at(i), *name, *handlers.at(i));
+ target()->FindAllTypes(&types);
+ if (!target()->FindHandlers(&handlers, types.length())) return;
+ for (int i = 0; i < types.length(); i++) {
+ UpdateMegamorphicCache(*types.at(i), *name, *handlers.at(i));
}
}
-bool IC::IsTransitionedMapOfMonomorphicTarget(Map* receiver_map) {
- DisallowHeapAllocation no_allocation;
-
+bool IC::IsTransitionOfMonomorphicTarget(Type* type) {
+ if (!type->IsClass()) return false;
+ Map* receiver_map = *type->AsClass();
Map* current_map = target()->FindFirstMap();
ElementsKind receiver_elements_kind = receiver_map->elements_kind();
bool more_general_transition =
@@ -1151,70 +1076,37 @@ bool IC::IsTransitionedMapOfMonomorphicTarget(Map* receiver_map) {
}
-// Since GC may have been invoked, by the time PatchCache is called, |state| is
-// not necessarily equal to target()->state().
-void IC::PatchCache(State state,
- StrictModeFlag strict_mode,
- Handle<HeapObject> receiver,
+void IC::PatchCache(Handle<Type> type,
Handle<String> name,
Handle<Code> code) {
- switch (state) {
+ switch (state()) {
case UNINITIALIZED:
case PREMONOMORPHIC:
case MONOMORPHIC_PROTOTYPE_FAILURE:
- UpdateMonomorphicIC(receiver, code, name, strict_mode);
+ UpdateMonomorphicIC(type, code, name);
break;
- case MONOMORPHIC:
- // Only move to megamorphic if the target changes.
- if (target() != *code) {
- if (target()->is_load_stub() || target()->is_store_stub()) {
- bool is_same_handler = false;
- {
- DisallowHeapAllocation no_allocation;
- Code* old_handler = target()->FindFirstCode();
- is_same_handler = old_handler == *code;
- }
- if (is_same_handler
- && IsTransitionedMapOfMonomorphicTarget(receiver->map())) {
- UpdateMonomorphicIC(receiver, code, name, strict_mode);
- break;
- }
- if (UpdatePolymorphicIC(state, receiver, name, code, strict_mode)) {
- break;
- }
-
- if (target()->type() != Code::NORMAL) {
- CopyICToMegamorphicCache(name);
- }
- }
-
- UpdateMegamorphicCache(receiver->map(), *name, *code);
- set_target((strict_mode == kStrictMode)
- ? *megamorphic_stub_strict()
- : *megamorphic_stub());
+ case MONOMORPHIC: {
+ // For now, call stubs are allowed to rewrite to the same stub. This
+ // happens e.g., when the field does not contain a function.
+ ASSERT(target()->is_call_stub() ||
+ target()->is_keyed_call_stub() ||
+ !target().is_identical_to(code));
+ Code* old_handler = target()->FindFirstHandler();
+ if (old_handler == *code && IsTransitionOfMonomorphicTarget(*type)) {
+ UpdateMonomorphicIC(type, code, name);
+ break;
}
- break;
- case MEGAMORPHIC:
- // Update the stub cache.
- UpdateMegamorphicCache(receiver->map(), *name, *code);
- break;
+ // Fall through.
+ }
case POLYMORPHIC:
- if (target()->is_load_stub() || target()->is_store_stub()) {
- if (UpdatePolymorphicIC(state, receiver, name, code, strict_mode)) {
- break;
- }
+ if (!target()->is_keyed_stub()) {
+ if (UpdatePolymorphicIC(type, name, code)) break;
CopyICToMegamorphicCache(name);
- UpdateMegamorphicCache(receiver->map(), *name, *code);
- set_target((strict_mode == kStrictMode)
- ? *megamorphic_stub_strict()
- : *megamorphic_stub());
- } else {
- // When trying to patch a polymorphic keyed load/store element stub
- // with anything other than another polymorphic stub, go generic.
- set_target((strict_mode == kStrictMode)
- ? *generic_stub_strict()
- : *generic_stub());
}
+ set_target(*megamorphic_stub());
+ // Fall through.
+ case MEGAMORPHIC:
+ UpdateMegamorphicCache(*type, *name, *code);
break;
case DEBUG_STUB:
break;
@@ -1225,126 +1117,152 @@ void IC::PatchCache(State state,
}
-static void GetReceiverMapsForStub(Handle<Code> stub,
- MapHandleList* result) {
- ASSERT(stub->is_inline_cache_stub());
- switch (stub->ic_state()) {
- case MONOMORPHIC: {
- Map* map = stub->FindFirstMap();
- if (map != NULL) {
- result->Add(Handle<Map>(map));
- }
- break;
- }
- case POLYMORPHIC: {
- DisallowHeapAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*stub, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Handle<Object> object(info->target_object(), stub->GetIsolate());
- if (object->IsString()) break;
- ASSERT(object->IsMap());
- AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object));
- }
- break;
- }
- case MEGAMORPHIC:
- break;
- case UNINITIALIZED:
- case PREMONOMORPHIC:
- case MONOMORPHIC_PROTOTYPE_FAILURE:
- case GENERIC:
- case DEBUG_STUB:
- UNREACHABLE();
- break;
+Handle<Code> LoadIC::SimpleFieldLoad(int offset,
+ bool inobject,
+ Representation representation) {
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(inobject, offset, representation);
+ return stub.GetCode(isolate());
+ } else {
+ KeyedLoadFieldStub stub(inobject, offset, representation);
+ return stub.GetCode(isolate());
}
}
void LoadIC::UpdateCaches(LookupResult* lookup,
- State state,
Handle<Object> object,
Handle<String> name) {
- if (!object->IsHeapObject()) return;
-
- Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
-
- Handle<Code> code;
- if (state == UNINITIALIZED) {
+ if (state() == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
- code = pre_monomorphic_stub();
- } else if (!lookup->IsCacheable()) {
+ set_target(*pre_monomorphic_stub());
+ TRACE_IC("LoadIC", name);
+ return;
+ }
+
+ Handle<Type> type = CurrentTypeOf(object, isolate());
+ Handle<Code> code;
+ if (!lookup->IsCacheable()) {
// Bail out if the result is not cacheable.
code = slow_stub();
- } else if (!object->IsJSObject()) {
- // TODO(jkummerow): It would be nice to support non-JSObjects in
- // ComputeLoadHandler, then we wouldn't need to go generic here.
- code = slow_stub();
+ } else if (!lookup->IsProperty()) {
+ if (kind() == Code::LOAD_IC) {
+ code = isolate()->stub_cache()->ComputeLoadNonexistent(name, type);
+ } else {
+ code = slow_stub();
+ }
} else {
- code = ComputeLoadHandler(lookup, Handle<JSObject>::cast(receiver), name);
- if (code.is_null()) code = slow_stub();
+ code = ComputeHandler(lookup, object, name);
}
- PatchCache(state, kNonStrictMode, receiver, name, code);
- TRACE_IC("LoadIC", name, state, target());
+ PatchCache(type, name, code);
+ TRACE_IC("LoadIC", name);
}
-void IC::UpdateMegamorphicCache(Map* map, Name* name, Code* code) {
+void IC::UpdateMegamorphicCache(Type* type, Name* name, Code* code) {
// Cache code holding map should be consistent with
// GenerateMonomorphicCacheProbe.
+ Map* map = *TypeToMap(type, isolate());
isolate()->stub_cache()->Set(name, map, code);
}
-Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<String> name) {
- if (!lookup->IsProperty()) {
- // Nonexistent property. The result is undefined.
- return isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver);
+Handle<Code> IC::ComputeHandler(LookupResult* lookup,
+ Handle<Object> object,
+ Handle<String> name,
+ Handle<Object> value) {
+ InlineCacheHolderFlag cache_holder = GetCodeCacheForObject(*object);
+ Handle<HeapObject> stub_holder(GetCodeCacheHolder(
+ isolate(), *object, cache_holder));
+
+ Handle<Code> code = isolate()->stub_cache()->FindHandler(
+ name, handle(stub_holder->map()), kind(), cache_holder);
+ if (!code.is_null()) return code;
+
+ code = CompileHandler(lookup, object, name, value, cache_holder);
+ ASSERT(code->is_handler());
+
+ if (code->type() != Code::NORMAL) {
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
+ }
+
+ return code;
+}
+
+
+Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
+ Handle<Object> object,
+ Handle<String> name,
+ Handle<Object> unused,
+ InlineCacheHolderFlag cache_holder) {
+ if (object->IsString() && name->Equals(isolate()->heap()->length_string())) {
+ int length_index = String::kLengthOffset / kPointerSize;
+ return SimpleFieldLoad(length_index);
}
- // Compute monomorphic stub.
+ Handle<Type> type = CurrentTypeOf(object, isolate());
Handle<JSObject> holder(lookup->holder());
+ LoadStubCompiler compiler(isolate(), kNoExtraICState, cache_holder, kind());
+
switch (lookup->type()) {
- case FIELD:
- return isolate()->stub_cache()->ComputeLoadField(
- name, receiver, holder,
- lookup->GetFieldIndex(), lookup->representation());
+ case FIELD: {
+ PropertyIndex field = lookup->GetFieldIndex();
+ if (object.is_identical_to(holder)) {
+ return SimpleFieldLoad(field.translate(holder),
+ field.is_inobject(holder),
+ lookup->representation());
+ }
+ return compiler.CompileLoadField(
+ type, holder, name, field, lookup->representation());
+ }
case CONSTANT: {
Handle<Object> constant(lookup->GetConstant(), isolate());
// TODO(2803): Don't compute a stub for cons strings because they cannot
// be embedded into code.
- if (constant->IsConsString()) return Handle<Code>::null();
- return isolate()->stub_cache()->ComputeLoadConstant(
- name, receiver, holder, constant);
+ if (constant->IsConsString()) break;
+ return compiler.CompileLoadConstant(type, holder, name, constant);
}
case NORMAL:
+ if (kind() != Code::LOAD_IC) break;
if (holder->IsGlobalObject()) {
Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
Handle<PropertyCell> cell(
global->GetPropertyCell(lookup), isolate());
- return isolate()->stub_cache()->ComputeLoadGlobal(
- name, receiver, global, cell, lookup->IsDontDelete());
+ Handle<Code> code = compiler.CompileLoadGlobal(
+ type, global, cell, name, lookup->IsDontDelete());
+ // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
+ Handle<HeapObject> stub_holder(GetCodeCacheHolder(
+ isolate(), *object, cache_holder));
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
+ return code;
}
// There is only one shared stub for loading normalized
// properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
+ // property must be found in the object for the stub to be
// applicable.
- if (!holder.is_identical_to(receiver)) break;
- return isolate()->stub_cache()->ComputeLoadNormal(name, receiver);
+ if (!object.is_identical_to(holder)) break;
+ return isolate()->builtins()->LoadIC_Normal();
case CALLBACKS: {
+ // Use simple field loads for some well-known callback properties.
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ Handle<Map> map(receiver->map());
+ int object_offset;
+ if (Accessors::IsJSObjectFieldAccessor(map, name, &object_offset)) {
+ return SimpleFieldLoad(object_offset / kPointerSize);
+ }
+ }
+
Handle<Object> callback(lookup->GetCallbackObject(), isolate());
if (callback->IsExecutableAccessorInfo()) {
Handle<ExecutableAccessorInfo> info =
Handle<ExecutableAccessorInfo>::cast(callback);
if (v8::ToCData<Address>(info->getter()) == 0) break;
- if (!info->IsCompatibleReceiver(*receiver)) break;
- return isolate()->stub_cache()->ComputeLoadCallback(
- name, receiver, holder, info);
+ if (!info->IsCompatibleReceiver(*object)) break;
+ return compiler.CompileLoadCallback(type, holder, name, info);
} else if (callback->IsAccessorPair()) {
Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter(),
isolate());
@@ -1352,21 +1270,20 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
if (holder->IsGlobalObject()) break;
if (!holder->HasFastProperties()) break;
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+ if (!object->IsJSObject() &&
+ !function->IsBuiltin() &&
+ function->shared()->is_classic_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ break;
+ }
CallOptimization call_optimization(function);
if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*receiver) &&
- FLAG_js_accessor_ics) {
- return isolate()->stub_cache()->ComputeLoadCallback(
- name, receiver, holder, call_optimization);
+ call_optimization.IsCompatibleReceiver(*object)) {
+ return compiler.CompileLoadCallback(
+ type, holder, name, call_optimization);
}
- return isolate()->stub_cache()->ComputeLoadViaGetter(
- name, receiver, holder, function);
- } else if (receiver->IsJSArray() &&
- name->Equals(isolate()->heap()->length_string())) {
- PropertyIndex lengthIndex =
- PropertyIndex::NewHeaderIndex(JSArray::kLengthOffset / kPointerSize);
- return isolate()->stub_cache()->ComputeLoadField(
- name, receiver, holder, lengthIndex, Representation::Tagged());
+ return compiler.CompileLoadViaGetter(type, holder, name, function);
}
// TODO(dcarney): Handle correctly.
if (callback->IsDeclaredAccessorInfo()) break;
@@ -1376,12 +1293,12 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
}
case INTERCEPTOR:
ASSERT(HasInterceptorGetter(*holder));
- return isolate()->stub_cache()->ComputeLoadInterceptor(
- name, receiver, holder);
+ return compiler.CompileLoadInterceptor(type, holder, name);
default:
break;
}
- return Handle<Code>::null();
+
+ return slow_stub();
}
@@ -1406,8 +1323,6 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
- State ic_state = target()->ic_state();
-
// Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
@@ -1418,17 +1333,16 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
Handle<Map> receiver_map(receiver->map(), isolate());
MapHandleList target_receiver_maps;
- if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ if (state() == UNINITIALIZED || state() == PREMONOMORPHIC) {
// Optimistically assume that ICs that haven't reached the MONOMORPHIC state
// yet will do so and stay there.
return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
}
- if (target() == *string_stub()) {
+ if (target().is_identical_to(string_stub())) {
target_receiver_maps.Add(isolate()->factory()->string_map());
} else {
- GetReceiverMapsForStub(Handle<Code>(target(), isolate()),
- &target_receiver_maps);
+ target()->FindAllMaps(&target_receiver_maps);
if (target_receiver_maps.length() == 0) {
return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
}
@@ -1441,14 +1355,14 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
// monomorphic. If this optimistic assumption is not true, the IC will
// miss again and it will become polymorphic and support both the
// untransitioned and transitioned maps.
- if (ic_state == MONOMORPHIC &&
+ if (state() == MONOMORPHIC &&
IsMoreGeneralElementsKindTransition(
target_receiver_maps.at(0)->elements_kind(),
receiver->GetElementsKind())) {
return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
}
- ASSERT(ic_state != GENERIC);
+ ASSERT(state() != GENERIC);
// Determine the list of receiver maps that this call site has seen,
// adding the map that was just encountered.
@@ -1471,132 +1385,65 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
}
-MaybeObject* KeyedLoadIC::Load(State state,
- Handle<Object> object,
- Handle<Object> key,
- ICMissMode miss_mode) {
+MaybeObject* KeyedLoadIC::Load(Handle<Object> object, Handle<Object> key) {
+ if (MigrateDeprecated(object)) {
+ return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
+ }
+
+ MaybeObject* maybe_object = NULL;
+ Handle<Code> stub = generic_stub();
+
// Check for values that can be converted into an internalized string directly
// or is representable as a smi.
key = TryConvertKey(key, isolate());
if (key->IsInternalizedString()) {
- return LoadIC::Load(state, object, Handle<String>::cast(key));
+ maybe_object = LoadIC::Load(object, Handle<String>::cast(key));
+ if (maybe_object->IsFailure()) return maybe_object;
+ } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
+ ASSERT(!object->IsJSGlobalProxy());
+ if (object->IsString() && key->IsNumber()) {
+ if (state() == UNINITIALIZED) stub = string_stub();
+ } else if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->elements()->map() ==
+ isolate()->heap()->non_strict_arguments_elements_map()) {
+ stub = non_strict_arguments_stub();
+ } else if (receiver->HasIndexedInterceptor()) {
+ stub = indexed_interceptor_stub();
+ } else if (!key->ToSmi()->IsFailure() &&
+ (!target().is_identical_to(non_strict_arguments_stub()))) {
+ stub = LoadElementStub(receiver);
+ }
+ }
}
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
- ASSERT(!(use_ic && object->IsJSGlobalProxy()));
-
- if (use_ic) {
- Handle<Code> stub = generic_stub();
- if (miss_mode != MISS_FORCE_GENERIC) {
- if (object->IsString() && key->IsNumber()) {
- if (state == UNINITIALIZED) {
- stub = string_stub();
- }
- } else if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->map()->is_deprecated()) {
- JSObject::MigrateInstance(receiver);
- }
-
- if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
- } else if (receiver->HasIndexedInterceptor()) {
- stub = indexed_interceptor_stub();
- } else if (!key->ToSmi()->IsFailure() &&
- (target() != *non_strict_arguments_stub())) {
- stub = LoadElementStub(receiver);
- }
- }
- } else {
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "force generic");
+ if (!is_target_set()) {
+ if (*stub == *generic_stub()) {
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
}
ASSERT(!stub.is_null());
set_target(*stub);
- TRACE_IC("KeyedLoadIC", key, state, target());
+ TRACE_IC("LoadIC", key);
}
-
+ if (maybe_object != NULL) return maybe_object;
return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
}
-Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<String> name) {
- // Bail out if we didn't find a result.
- if (!lookup->IsProperty()) return Handle<Code>::null();
-
- // Compute a monomorphic stub.
- Handle<JSObject> holder(lookup->holder(), isolate());
- switch (lookup->type()) {
- case FIELD:
- return isolate()->stub_cache()->ComputeKeyedLoadField(
- name, receiver, holder,
- lookup->GetFieldIndex(), lookup->representation());
- case CONSTANT: {
- Handle<Object> constant(lookup->GetConstant(), isolate());
- // TODO(2803): Don't compute a stub for cons strings because they cannot
- // be embedded into code.
- if (constant->IsConsString()) return Handle<Code>::null();
- return isolate()->stub_cache()->ComputeKeyedLoadConstant(
- name, receiver, holder, constant);
- }
- case CALLBACKS: {
- Handle<Object> callback_object(lookup->GetCallbackObject(), isolate());
- // TODO(dcarney): Handle DeclaredAccessorInfo correctly.
- if (callback_object->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> callback =
- Handle<ExecutableAccessorInfo>::cast(callback_object);
- if (v8::ToCData<Address>(callback->getter()) == 0) break;
- if (!callback->IsCompatibleReceiver(*receiver)) break;
- return isolate()->stub_cache()->ComputeKeyedLoadCallback(
- name, receiver, holder, callback);
- } else if (callback_object->IsAccessorPair()) {
- Handle<Object> getter(
- Handle<AccessorPair>::cast(callback_object)->getter(),
- isolate());
- if (!getter->IsJSFunction()) break;
- if (holder->IsGlobalObject()) break;
- if (!holder->HasFastProperties()) break;
- Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
- CallOptimization call_optimization(function);
- if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*receiver) &&
- FLAG_js_accessor_ics) {
- return isolate()->stub_cache()->ComputeKeyedLoadCallback(
- name, receiver, holder, call_optimization);
- }
- }
- break;
- }
- case INTERCEPTOR:
- ASSERT(HasInterceptorGetter(lookup->holder()));
- return isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
- name, receiver, holder);
- default:
- // Always rewrite to the generic case so that we do not
- // repeatedly try to rewrite.
- return generic_stub();
- }
- return Handle<Code>::null();
-}
-
-
static bool LookupForWrite(Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value,
LookupResult* lookup,
- IC::State* state) {
+ IC* ic) {
Handle<JSObject> holder = receiver;
receiver->Lookup(*name, lookup);
if (lookup->IsFound()) {
if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
if (lookup->holder() == *receiver) {
- if (lookup->IsInterceptor() &&
- receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
+ if (lookup->IsInterceptor() && !HasInterceptorSetter(*receiver)) {
receiver->LocalLookupRealNamedProperty(*name, lookup);
return lookup->IsFound() &&
!lookup->IsReadOnly() &&
@@ -1607,7 +1454,8 @@ static bool LookupForWrite(Handle<JSObject> receiver,
}
if (lookup->IsPropertyCallbacks()) return true;
-
+ // JSGlobalProxy always goes via the runtime, so it's safe to cache.
+ if (receiver->IsJSGlobalProxy()) return true;
// Currently normal holders in the prototype chain are not supported. They
// would require a runtime positive lookup and verification that the details
// have not changed.
@@ -1622,8 +1470,7 @@ static bool LookupForWrite(Handle<JSObject> receiver,
// receiver when trying to fetch extra information from the transition.
receiver->map()->LookupTransition(*holder, *name, lookup);
if (!lookup->IsTransition()) return false;
- PropertyDetails target_details =
- lookup->GetTransitionDetails(receiver->map());
+ PropertyDetails target_details = lookup->GetTransitionDetails();
if (target_details.IsReadOnly()) return false;
// If the value that's being stored does not fit in the field that the
@@ -1634,7 +1481,7 @@ static bool LookupForWrite(Handle<JSObject> receiver,
// transition target.
ASSERT(!receiver->map()->is_deprecated());
if (!value->FitsRepresentation(target_details.representation())) {
- Handle<Map> target(lookup->GetTransitionMapFromMap(receiver->map()));
+ Handle<Map> target(lookup->GetTransitionTarget());
Map::GeneralizeRepresentation(
target, target->LastAdded(),
value->OptimalRepresentation(), FORCE_FIELD);
@@ -1642,22 +1489,21 @@ static bool LookupForWrite(Handle<JSObject> receiver,
// entirely by the migration above.
receiver->map()->LookupTransition(*holder, *name, lookup);
if (!lookup->IsTransition()) return false;
- *state = MONOMORPHIC_PROTOTYPE_FAILURE;
+ ic->MarkMonomorphicPrototypeFailure();
}
return true;
}
-MaybeObject* StoreIC::Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
+MaybeObject* StoreIC::Store(Handle<Object> object,
Handle<String> name,
Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode) {
- // Handle proxies.
- if (object->IsJSProxy()) {
- return JSReceiver::SetPropertyOrFail(
- Handle<JSReceiver>::cast(object), name, value, NONE, strict_mode);
+ if (MigrateDeprecated(object) || object->IsJSProxy()) {
+ Handle<Object> result = JSReceiver::SetProperty(
+ Handle<JSReceiver>::cast(object), name, value, NONE, strict_mode());
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
// If the object is undefined or null it's illegal to try to set any
@@ -1667,7 +1513,7 @@ MaybeObject* StoreIC::Store(State state,
}
// The length property of string values is read-only. Throw in strict mode.
- if (strict_mode == kStrictMode && object->IsString() &&
+ if (strict_mode() == kStrictMode && object->IsString() &&
name->Equals(isolate()->heap()->length_string())) {
return TypeError("strict_read_only_property", object, name);
}
@@ -1678,23 +1524,21 @@ MaybeObject* StoreIC::Store(State state,
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->map()->is_deprecated()) {
- JSObject::MigrateInstance(receiver);
- }
-
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
Handle<Object> result =
- JSObject::SetElement(receiver, index, value, NONE, strict_mode);
+ JSObject::SetElement(receiver, index, value, NONE, strict_mode());
RETURN_IF_EMPTY_HANDLE(isolate(), result);
return *value;
}
// Observed objects are always modified through the runtime.
if (FLAG_harmony_observation && receiver->map()->is_observed()) {
- return JSReceiver::SetPropertyOrFail(
- receiver, name, value, NONE, strict_mode, store_mode);
+ Handle<Object> result = JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
// Use specialized code for setting the length of arrays with fast
@@ -1708,110 +1552,111 @@ MaybeObject* StoreIC::Store(State state,
receiver->HasFastProperties() &&
!receiver->map()->is_frozen()) {
Handle<Code> stub =
- StoreArrayLengthStub(kind(), strict_mode).GetCode(isolate());
+ StoreArrayLengthStub(kind(), strict_mode()).GetCode(isolate());
set_target(*stub);
- TRACE_IC("StoreIC", name, state, *stub);
- return JSReceiver::SetPropertyOrFail(
- receiver, name, value, NONE, strict_mode, store_mode);
- }
-
- if (receiver->IsJSGlobalProxy()) {
- if (FLAG_use_ic && kind() != Code::KEYED_STORE_IC) {
- // Generate a generic stub that goes to the runtime when we see a global
- // proxy as receiver.
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? global_proxy_stub_strict()
- : global_proxy_stub();
- set_target(*stub);
- TRACE_IC("StoreIC", name, state, *stub);
- }
- return JSReceiver::SetPropertyOrFail(
- receiver, name, value, NONE, strict_mode, store_mode);
+ TRACE_IC("StoreIC", name);
+ Handle<Object> result = JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
LookupResult lookup(isolate());
- bool can_store = LookupForWrite(receiver, name, value, &lookup, &state);
+ bool can_store = LookupForWrite(receiver, name, value, &lookup, this);
if (!can_store &&
- strict_mode == kStrictMode &&
+ strict_mode() == kStrictMode &&
!(lookup.IsProperty() && lookup.IsReadOnly()) &&
IsUndeclaredGlobal(object)) {
// Strict mode doesn't allow setting non-existent global property.
return ReferenceError("not_defined", name);
}
if (FLAG_use_ic) {
- if (state == UNINITIALIZED) {
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? pre_monomorphic_stub_strict()
- : pre_monomorphic_stub();
+ if (state() == UNINITIALIZED) {
+ Handle<Code> stub = pre_monomorphic_stub();
set_target(*stub);
- TRACE_IC("StoreIC", name, state, *stub);
+ TRACE_IC("StoreIC", name);
} else if (can_store) {
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+ UpdateCaches(&lookup, receiver, name, value);
} else if (!name->IsCacheable(isolate()) ||
lookup.IsNormal() ||
(lookup.IsField() && lookup.CanHoldValue(value))) {
- Handle<Code> stub = (strict_mode == kStrictMode) ? generic_stub_strict()
- : generic_stub();
+ Handle<Code> stub = generic_stub();
set_target(*stub);
}
}
// Set the property.
- return JSReceiver::SetPropertyOrFail(
- receiver, name, value, NONE, strict_mode, store_mode);
+ Handle<Object> result = JSReceiver::SetProperty(
+ receiver, name, value, NONE, strict_mode(), store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
void StoreIC::UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value) {
- ASSERT(!receiver->IsJSGlobalProxy());
ASSERT(lookup->IsFound());
// These are not cacheable, so we never see such LookupResults here.
ASSERT(!lookup->IsHandler());
- Handle<Code> code = ComputeStoreMonomorphic(
- lookup, strict_mode, receiver, name, value);
- if (code.is_null()) {
- Handle<Code> stub = strict_mode == kStrictMode
- ? generic_stub_strict() : generic_stub();
- set_target(*stub);
- return;
- }
+ Handle<Code> code = ComputeHandler(lookup, receiver, name, value);
- PatchCache(state, strict_mode, receiver, name, code);
- TRACE_IC("StoreIC", name, state, target());
+ PatchCache(CurrentTypeOf(receiver, isolate()), name, code);
+ TRACE_IC("StoreIC", name);
}
-Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value) {
+Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
+ Handle<Object> object,
+ Handle<String> name,
+ Handle<Object> value,
+ InlineCacheHolderFlag cache_holder) {
+ if (object->IsJSGlobalProxy()) return slow_stub();
+ ASSERT(cache_holder == OWN_MAP);
+ // This is currently guaranteed by checks in StoreIC::Store.
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
Handle<JSObject> holder(lookup->holder());
+ // Handlers do not use strict mode.
+ StoreStubCompiler compiler(isolate(), kNonStrictMode, kind());
switch (lookup->type()) {
case FIELD:
- return isolate()->stub_cache()->ComputeStoreField(
- name, receiver, lookup, strict_mode);
+ return compiler.CompileStoreField(receiver, lookup, name);
+ case TRANSITION: {
+ // Explicitly pass in the receiver map since LookupForWrite may have
+ // stored something else than the receiver in the holder.
+ Handle<Map> transition(lookup->GetTransitionTarget());
+ PropertyDetails details = transition->GetLastDescriptorDetails();
+
+ if (details.type() == CALLBACKS || details.attributes() != NONE) break;
+
+ return compiler.CompileStoreTransition(
+ receiver, lookup, transition, name);
+ }
case NORMAL:
+ if (kind() == Code::KEYED_STORE_IC) break;
if (receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- Handle<PropertyCell> cell(
- global->GetPropertyCell(lookup), isolate());
- return isolate()->stub_cache()->ComputeStoreGlobal(
- name, global, cell, value, strict_mode);
+ Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate());
+ Handle<Type> union_type = PropertyCell::UpdatedType(cell, value);
+ StoreGlobalStub stub(union_type->IsConstant());
+
+ Handle<Code> code = stub.GetCodeCopyFromTemplate(
+ isolate(), receiver->map(), *cell);
+ // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
+ HeapObject::UpdateMapCodeCache(receiver, name, code);
+ return code;
}
ASSERT(holder.is_identical_to(receiver));
- return isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
+ return isolate()->builtins()->StoreIC_Normal();
case CALLBACKS: {
+ if (kind() == Code::KEYED_STORE_IC) break;
Handle<Object> callback(lookup->GetCallbackObject(), isolate());
if (callback->IsExecutableAccessorInfo()) {
Handle<ExecutableAccessorInfo> info =
@@ -1819,8 +1664,7 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
if (v8::ToCData<Address>(info->setter()) == 0) break;
if (!holder->HasFastProperties()) break;
if (!info->IsCompatibleReceiver(*receiver)) break;
- return isolate()->stub_cache()->ComputeStoreCallback(
- name, receiver, holder, info, strict_mode);
+ return compiler.CompileStoreCallback(receiver, holder, name, info);
} else if (callback->IsAccessorPair()) {
Handle<Object> setter(
Handle<AccessorPair>::cast(callback)->setter(), isolate());
@@ -1830,14 +1674,12 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
CallOptimization call_optimization(function);
if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*receiver) &&
- FLAG_js_accessor_ics) {
- return isolate()->stub_cache()->ComputeStoreCallback(
- name, receiver, holder, call_optimization, strict_mode);
+ call_optimization.IsCompatibleReceiver(*receiver)) {
+ return compiler.CompileStoreCallback(
+ receiver, holder, name, call_optimization);
}
- return isolate()->stub_cache()->ComputeStoreViaSetter(
- name, receiver, holder, Handle<JSFunction>::cast(setter),
- strict_mode);
+ return compiler.CompileStoreViaSetter(
+ receiver, holder, name, Handle<JSFunction>::cast(setter));
}
// TODO(dcarney): Handle correctly.
if (callback->IsDeclaredAccessorInfo()) break;
@@ -1846,55 +1688,38 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
break;
}
case INTERCEPTOR:
- ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
- return isolate()->stub_cache()->ComputeStoreInterceptor(
- name, receiver, strict_mode);
+ if (kind() == Code::KEYED_STORE_IC) break;
+ ASSERT(HasInterceptorSetter(*receiver));
+ return compiler.CompileStoreInterceptor(receiver, name);
case CONSTANT:
break;
- case TRANSITION: {
- // Explicitly pass in the receiver map since LookupForWrite may have
- // stored something else than the receiver in the holder.
- Handle<Map> transition(
- lookup->GetTransitionTarget(receiver->map()), isolate());
- int descriptor = transition->LastAdded();
-
- DescriptorArray* target_descriptors = transition->instance_descriptors();
- PropertyDetails details = target_descriptors->GetDetails(descriptor);
-
- if (details.type() == CALLBACKS || details.attributes() != NONE) break;
-
- return isolate()->stub_cache()->ComputeStoreTransition(
- name, receiver, lookup, transition, strict_mode);
- }
case NONEXISTENT:
case HANDLER:
UNREACHABLE();
break;
}
- return Handle<Code>::null();
+ return slow_stub();
}
Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
- KeyedAccessStoreMode store_mode,
- StrictModeFlag strict_mode) {
+ KeyedAccessStoreMode store_mode) {
// Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
if (target()->type() != Code::NORMAL) {
TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
- return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
+ return generic_stub();
}
- State ic_state = target()->ic_state();
Handle<Map> receiver_map(receiver->map(), isolate());
- if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ if (state() == UNINITIALIZED || state() == PREMONOMORPHIC) {
// Optimistically assume that ICs that haven't reached the MONOMORPHIC state
// yet will do so and stay there.
Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver, store_mode);
store_mode = GetNonTransitioningStoreMode(store_mode);
return isolate()->stub_cache()->ComputeKeyedStoreElement(
- monomorphic_map, strict_mode, store_mode);
+ monomorphic_map, strict_mode(), store_mode);
}
MapHandleList target_receiver_maps;
@@ -1903,9 +1728,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// In the case that there is a non-map-specific IC is installed (e.g. keyed
// stores into properties in dictionary mode), then there will be not
// receiver maps in the target.
- return strict_mode == kStrictMode
- ? generic_stub_strict()
- : generic_stub();
+ return generic_stub();
}
// There are several special cases where an IC that is MONOMORPHIC can still
@@ -1913,9 +1736,9 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// superset of the original IC. Handle those here if the receiver map hasn't
// changed or it has transitioned to a more general kind.
KeyedAccessStoreMode old_store_mode =
- Code::GetKeyedAccessStoreMode(target()->extra_ic_state());
+ KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
- if (ic_state == MONOMORPHIC) {
+ if (state() == MONOMORPHIC) {
// If the "old" and "new" maps are in the same elements map family, stay
// MONOMORPHIC and use the map for the most generic ElementsKind.
Handle<Map> transitioned_receiver_map = receiver_map;
@@ -1923,11 +1746,11 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
transitioned_receiver_map =
ComputeTransitionedMap(receiver, store_mode);
}
- if (IsTransitionedMapOfMonomorphicTarget(*transitioned_receiver_map)) {
+ if (IsTransitionOfMonomorphicTarget(MapToType(transitioned_receiver_map))) {
// Element family is the same, use the "worst" case map.
store_mode = GetNonTransitioningStoreMode(store_mode);
return isolate()->stub_cache()->ComputeKeyedStoreElement(
- transitioned_receiver_map, strict_mode, store_mode);
+ transitioned_receiver_map, strict_mode(), store_mode);
} else if (*previous_receiver_map == receiver->map() &&
old_store_mode == STANDARD_STORE &&
(IsGrowStoreMode(store_mode) ||
@@ -1937,11 +1760,11 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
return isolate()->stub_cache()->ComputeKeyedStoreElement(
- receiver_map, strict_mode, store_mode);
+ receiver_map, strict_mode(), store_mode);
}
}
- ASSERT(ic_state != GENERIC);
+ ASSERT(state() != GENERIC);
bool map_added =
AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
@@ -1957,14 +1780,14 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
- return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
+ return generic_stub();
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
- return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
+ return generic_stub();
}
// Make sure all polymorphic handlers have the same store mode, otherwise the
@@ -1975,9 +1798,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
store_mode = old_store_mode;
} else if (store_mode != old_store_mode) {
TRACE_GENERIC_IC(isolate(), "KeyedIC", "store mode mismatch");
- return strict_mode == kStrictMode
- ? generic_stub_strict()
- : generic_stub();
+ return generic_stub();
}
}
@@ -1995,14 +1816,12 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
external_arrays != target_receiver_maps.length()) {
TRACE_GENERIC_IC(isolate(), "KeyedIC",
"unsupported combination of external and normal arrays");
- return strict_mode == kStrictMode
- ? generic_stub_strict()
- : generic_stub();
+ return generic_stub();
}
}
return isolate()->stub_cache()->ComputeStoreElementPolymorphic(
- &target_receiver_maps, store_mode, strict_mode);
+ &target_receiver_maps, store_mode, strict_mode());
}
@@ -2125,117 +1944,85 @@ KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
}
-MaybeObject* KeyedStoreIC::Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
+MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> key,
- Handle<Object> value,
- ICMissMode miss_mode) {
+ Handle<Object> value) {
+ if (MigrateDeprecated(object)) {
+ Handle<Object> result = Runtime::SetObjectProperty(isolate(), object,
+ key,
+ value,
+ NONE,
+ strict_mode());
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
+ }
+
// Check for values that can be converted into an internalized string directly
// or is representable as a smi.
key = TryConvertKey(key, isolate());
+ MaybeObject* maybe_object = NULL;
+ Handle<Code> stub = generic_stub();
+
if (key->IsInternalizedString()) {
- return StoreIC::Store(state,
- strict_mode,
- object,
- Handle<String>::cast(key),
- value,
- JSReceiver::MAY_BE_STORE_FROM_KEYED);
- }
-
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
- !(FLAG_harmony_observation && object->IsJSObject() &&
- JSObject::cast(*object)->map()->is_observed());
- if (use_ic && !object->IsSmi()) {
- // Don't use ICs for maps of the objects in Array's prototype chain. We
- // expect to be able to trap element sets to objects with those maps in the
- // runtime to enable optimization of element hole access.
- Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
- if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
- }
- ASSERT(!(use_ic && object->IsJSGlobalProxy()));
-
- if (use_ic) {
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? generic_stub_strict()
- : generic_stub();
- if (miss_mode != MISS_FORCE_GENERIC) {
+ maybe_object = StoreIC::Store(object,
+ Handle<String>::cast(key),
+ value,
+ JSReceiver::MAY_BE_STORE_FROM_KEYED);
+ if (maybe_object->IsFailure()) return maybe_object;
+ } else {
+ bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
+ !(FLAG_harmony_observation && object->IsJSObject() &&
+ JSObject::cast(*object)->map()->is_observed());
+ if (use_ic && !object->IsSmi()) {
+ // Don't use ICs for maps of the objects in Array's prototype chain. We
+ // expect to be able to trap element sets to objects with those maps in
+ // the runtime to enable optimization of element hole access.
+ Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
+ if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
+ }
+
+ if (use_ic) {
+ ASSERT(!object->IsJSGlobalProxy());
+
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->map()->is_deprecated()) {
- JSObject::MigrateInstance(receiver);
- }
bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
if (receiver->elements()->map() ==
isolate()->heap()->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
} else if (key_is_smi_like &&
- (target() != *non_strict_arguments_stub())) {
- KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value);
- stub = StoreElementStub(receiver, store_mode, strict_mode);
- } else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "key not a number");
+ !(target().is_identical_to(non_strict_arguments_stub()))) {
+ // We should go generic if receiver isn't a dictionary, but our
+ // prototype chain does have dictionary elements. This ensures that
+ // other non-dictionary receivers in the polymorphic case benefit
+ // from fast path keyed stores.
+ if (!(receiver->map()->DictionaryElementsInPrototypeChainOnly())) {
+ KeyedAccessStoreMode store_mode =
+ GetStoreMode(receiver, key, value);
+ stub = StoreElementStub(receiver, store_mode);
+ }
}
- } else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "not an object");
}
- } else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "force generic");
}
- ASSERT(!stub.is_null());
- set_target(*stub);
- TRACE_IC("KeyedStoreIC", key, state, target());
}
- return Runtime::SetObjectPropertyOrFail(
- isolate(), object , key, value, NONE, strict_mode);
-}
-
-
-Handle<Code> KeyedStoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value) {
- // If the property has a non-field type allowing map transitions
- // where there is extra room in the object, we leave the IC in its
- // current state.
- switch (lookup->type()) {
- case FIELD:
- return isolate()->stub_cache()->ComputeKeyedStoreField(
- name, receiver, lookup, strict_mode);
- case TRANSITION: {
- // Explicitly pass in the receiver map since LookupForWrite may have
- // stored something else than the receiver in the holder.
- Handle<Map> transition(
- lookup->GetTransitionTarget(receiver->map()), isolate());
- int descriptor = transition->LastAdded();
-
- DescriptorArray* target_descriptors = transition->instance_descriptors();
- PropertyDetails details = target_descriptors->GetDetails(descriptor);
-
- if (details.type() != CALLBACKS && details.attributes() == NONE) {
- return isolate()->stub_cache()->ComputeKeyedStoreTransition(
- name, receiver, lookup, transition, strict_mode);
- }
- // fall through.
+ if (!is_target_set()) {
+ if (*stub == *generic_stub()) {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
}
- case NORMAL:
- case CONSTANT:
- case CALLBACKS:
- case INTERCEPTOR:
- // Always rewrite to the generic case so that we do not
- // repeatedly try to rewrite.
- return (strict_mode == kStrictMode)
- ? generic_stub_strict()
- : generic_stub();
- case HANDLER:
- case NONEXISTENT:
- UNREACHABLE();
- break;
+ ASSERT(!stub.is_null());
+ set_target(*stub);
+ TRACE_IC("StoreIC", key);
}
- return Handle<Code>::null();
+
+ if (maybe_object) return maybe_object;
+ Handle<Object> result = Runtime::SetObjectProperty(isolate(), object, key,
+ value,
+ NONE,
+ strict_mode());
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
@@ -2251,12 +2038,10 @@ RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CallIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- MaybeObject* maybe_result = ic.LoadFunction(state,
- extra_ic_state,
- args.at<Object>(0),
- args.at<String>(1));
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
JSFunction* raw_function;
if (!maybe_result->To(&raw_function)) return maybe_result;
@@ -2278,9 +2063,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedCallIC ic(isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- MaybeObject* maybe_result =
- ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1));
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
// Result could be a function or a failure.
JSFunction* raw_function = NULL;
if (!maybe_result->To(&raw_function)) return maybe_result;
@@ -2298,8 +2084,10 @@ RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<String>(1));
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Load(receiver, key);
}
@@ -2308,8 +2096,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Load(receiver, key);
}
@@ -2317,20 +2107,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 2);
- KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state,
- args.at<Object>(0),
- args.at<Object>(1),
- MISS_FORCE_GENERIC);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Load(receiver, key);
}
@@ -2339,13 +2119,10 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<String>(1),
- args.at<Object>(2));
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2));
}
@@ -2353,13 +2130,32 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<String>(1),
- args.at<Object>(2));
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2));
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_MissFromStubFailure) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ KeyedCallIC ic(isolate);
+ Arguments* caller_args = reinterpret_cast<Arguments*>(args[0]);
+ Handle<Object> key = args.at<Object>(1);
+ Handle<Object> receiver((*caller_args)[0], isolate);
+
+ ic.UpdateState(receiver, key);
+ MaybeObject* maybe_result = ic.LoadFunction(receiver, key);
+ // Result could be a function or a failure.
+ JSFunction* raw_function = NULL;
+ if (!maybe_result->To(&raw_function)) return maybe_result;
+
+ if (raw_function->is_compiled()) return raw_function;
+
+ Handle<JSFunction> function(raw_function, isolate);
+ JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+ return *function;
}
@@ -2442,14 +2238,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<Object>(1),
- args.at<Object>(2),
- MISS);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2));
}
@@ -2457,358 +2249,532 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<Object>(1),
- args.at<Object>(2),
- MISS);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ return ic.Store(receiver, key, args.at<Object>(2));
}
RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
- return Runtime::SetObjectProperty(isolate,
- object,
- key,
- value,
- NONE,
- strict_mode);
+ StrictModeFlag strict_mode = ic.strict_mode();
+ Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
+ value,
+ NONE,
+ strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
- return Runtime::SetObjectProperty(isolate,
- object,
- key,
- value,
- NONE,
- strict_mode);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
- return ic.Store(state,
- Code::GetStrictMode(extra_ic_state),
- args.at<Object>(0),
- args.at<Object>(1),
- args.at<Object>(2),
- MISS_FORCE_GENERIC);
+ StrictModeFlag strict_mode = ic.strict_mode();
+ Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
+ value,
+ NONE,
+ strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
- SealHandleScope scope(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
Handle<Object> value = args.at<Object>(0);
+ Handle<Map> map = args.at<Map>(1);
Handle<Object> key = args.at<Object>(2);
Handle<Object> object = args.at<Object>(3);
- StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
- return Runtime::SetObjectProperty(isolate,
- object,
- key,
- value,
- NONE,
- strict_mode);
+ StrictModeFlag strict_mode = ic.strict_mode();
+ if (object->IsJSObject()) {
+ JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
+ map->elements_kind());
+ }
+ Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
+ value,
+ NONE,
+ strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
-void BinaryOpIC::patch(Code* code) {
- set_target(code);
+BinaryOpIC::State::State(ExtraICState extra_ic_state) {
+ // We don't deserialize the SSE2 Field, since this is only used to be able
+ // to include SSE2 as well as non-SSE2 versions in the snapshot. For code
+ // generation we always want it to reflect the current state.
+ op_ = static_cast<Token::Value>(
+ FIRST_TOKEN + OpField::decode(extra_ic_state));
+ mode_ = OverwriteModeField::decode(extra_ic_state);
+ fixed_right_arg_ = Maybe<int>(
+ HasFixedRightArgField::decode(extra_ic_state),
+ 1 << FixedRightArgValueField::decode(extra_ic_state));
+ left_kind_ = LeftKindField::decode(extra_ic_state);
+ if (fixed_right_arg_.has_value) {
+ right_kind_ = Smi::IsValid(fixed_right_arg_.value) ? SMI : INT32;
+ } else {
+ right_kind_ = RightKindField::decode(extra_ic_state);
+ }
+ result_kind_ = ResultKindField::decode(extra_ic_state);
+ ASSERT_LE(FIRST_TOKEN, op_);
+ ASSERT_LE(op_, LAST_TOKEN);
+}
+
+
+ExtraICState BinaryOpIC::State::GetExtraICState() const {
+ bool sse2 = (Max(result_kind_, Max(left_kind_, right_kind_)) > SMI &&
+ CpuFeatures::IsSafeForSnapshot(SSE2));
+ ExtraICState extra_ic_state =
+ SSE2Field::encode(sse2) |
+ OpField::encode(op_ - FIRST_TOKEN) |
+ OverwriteModeField::encode(mode_) |
+ LeftKindField::encode(left_kind_) |
+ ResultKindField::encode(result_kind_) |
+ HasFixedRightArgField::encode(fixed_right_arg_.has_value);
+ if (fixed_right_arg_.has_value) {
+ extra_ic_state = FixedRightArgValueField::update(
+ extra_ic_state, WhichPowerOf2(fixed_right_arg_.value));
+ } else {
+ extra_ic_state = RightKindField::update(extra_ic_state, right_kind_);
+ }
+ return extra_ic_state;
}
-const char* BinaryOpIC::GetName(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED: return "Uninitialized";
- case SMI: return "Smi";
- case INT32: return "Int32";
- case NUMBER: return "Number";
- case ODDBALL: return "Oddball";
- case STRING: return "String";
- case GENERIC: return "Generic";
- default: return "Invalid";
+// static
+void BinaryOpIC::State::GenerateAheadOfTime(
+ Isolate* isolate, void (*Generate)(Isolate*, const State&)) {
+ // TODO(olivf) We should investigate why adding stubs to the snapshot is so
+ // expensive at runtime. When solved we should be able to add most binops to
+ // the snapshot instead of hand-picking them.
+ // Generated list of commonly used stubs
+#define GENERATE(op, left_kind, right_kind, result_kind, mode) \
+ do { \
+ State state(op, mode); \
+ state.left_kind_ = left_kind; \
+ state.fixed_right_arg_.has_value = false; \
+ state.right_kind_ = right_kind; \
+ state.result_kind_ = result_kind; \
+ Generate(isolate, state); \
+ } while (false)
+ GENERATE(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT);
+#undef GENERATE
+#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind, mode) \
+ do { \
+ State state(op, mode); \
+ state.left_kind_ = left_kind; \
+ state.fixed_right_arg_.has_value = true; \
+ state.fixed_right_arg_.value = fixed_right_arg_value; \
+ state.right_kind_ = SMI; \
+ state.result_kind_ = result_kind; \
+ Generate(isolate, state); \
+ } while (false)
+ GENERATE(Token::MOD, SMI, 2, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 4, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MOD, SMI, 8, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MOD, SMI, 32, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE);
+#undef GENERATE
+}
+
+
+Handle<Type> BinaryOpIC::State::GetResultType(Isolate* isolate) const {
+ Kind result_kind = result_kind_;
+ if (HasSideEffects()) {
+ result_kind = NONE;
+ } else if (result_kind == GENERIC && op_ == Token::ADD) {
+ return handle(Type::Union(handle(Type::Number(), isolate),
+ handle(Type::String(), isolate)), isolate);
+ } else if (result_kind == NUMBER && op_ == Token::SHR) {
+ return handle(Type::Unsigned32(), isolate);
+ }
+ ASSERT_NE(GENERIC, result_kind);
+ return KindToType(result_kind, isolate);
+}
+
+
+void BinaryOpIC::State::Print(StringStream* stream) const {
+ stream->Add("(%s", Token::Name(op_));
+ if (mode_ == OVERWRITE_LEFT) stream->Add("_ReuseLeft");
+ else if (mode_ == OVERWRITE_RIGHT) stream->Add("_ReuseRight");
+ stream->Add(":%s*", KindToString(left_kind_));
+ if (fixed_right_arg_.has_value) {
+ stream->Add("%d", fixed_right_arg_.value);
+ } else {
+ stream->Add("%s", KindToString(right_kind_));
}
+ stream->Add("->%s)", KindToString(result_kind_));
}
-BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED:
- return ::v8::internal::UNINITIALIZED;
- case SMI:
- case INT32:
- case NUMBER:
- case ODDBALL:
- case STRING:
- return MONOMORPHIC;
- case GENERIC:
- return ::v8::internal::GENERIC;
+void BinaryOpIC::State::Update(Handle<Object> left,
+ Handle<Object> right,
+ Handle<Object> result) {
+ ExtraICState old_extra_ic_state = GetExtraICState();
+
+ left_kind_ = UpdateKind(left, left_kind_);
+ right_kind_ = UpdateKind(right, right_kind_);
+
+ int32_t fixed_right_arg_value = 0;
+ bool has_fixed_right_arg =
+ op_ == Token::MOD &&
+ right->ToInt32(&fixed_right_arg_value) &&
+ fixed_right_arg_value > 0 &&
+ IsPowerOf2(fixed_right_arg_value) &&
+ FixedRightArgValueField::is_valid(fixed_right_arg_value) &&
+ (left_kind_ == SMI || left_kind_ == INT32) &&
+ (result_kind_ == NONE || !fixed_right_arg_.has_value);
+ fixed_right_arg_ = Maybe<int32_t>(has_fixed_right_arg,
+ fixed_right_arg_value);
+
+ result_kind_ = UpdateKind(result, result_kind_);
+
+ if (!Token::IsTruncatingBinaryOp(op_)) {
+ Kind input_kind = Max(left_kind_, right_kind_);
+ if (result_kind_ < input_kind && input_kind <= NUMBER) {
+ result_kind_ = input_kind;
+ }
+ }
+
+ // Reset overwrite mode unless we can actually make use of it, or may be able
+ // to make use of it at some point in the future.
+ if ((mode_ == OVERWRITE_LEFT && left_kind_ > NUMBER) ||
+ (mode_ == OVERWRITE_RIGHT && right_kind_ > NUMBER) ||
+ result_kind_ > NUMBER) {
+ mode_ = NO_OVERWRITE;
+ }
+
+ if (old_extra_ic_state == GetExtraICState()) {
+ // Tagged operations can lead to non-truncating HChanges
+ if (left->IsUndefined() || left->IsBoolean()) {
+ left_kind_ = GENERIC;
+ } else if (right->IsUndefined() || right->IsBoolean()) {
+ right_kind_ = GENERIC;
+ } else {
+ // Since the X87 is too precise, we might bail out on numbers which
+ // actually would truncate with 64 bit precision.
+ ASSERT(!CpuFeatures::IsSupported(SSE2));
+ ASSERT(result_kind_ < NUMBER);
+ result_kind_ = NUMBER;
+ }
}
- UNREACHABLE();
- return ::v8::internal::UNINITIALIZED;
}
-Handle<Type> BinaryOpIC::TypeInfoToType(BinaryOpIC::TypeInfo binary_type,
- Isolate* isolate) {
- switch (binary_type) {
- case UNINITIALIZED:
- return handle(Type::None(), isolate);
- case SMI:
- return handle(Type::Smi(), isolate);
- case INT32:
- return handle(Type::Signed32(), isolate);
- case NUMBER:
- return handle(Type::Number(), isolate);
- case ODDBALL:
- return handle(Type::Optional(
- handle(Type::Union(
- handle(Type::Number(), isolate),
- handle(Type::String(), isolate)), isolate)), isolate);
- case STRING:
- return handle(Type::String(), isolate);
- case GENERIC:
- return handle(Type::Any(), isolate);
+BinaryOpIC::State::Kind BinaryOpIC::State::UpdateKind(Handle<Object> object,
+ Kind kind) const {
+ Kind new_kind = GENERIC;
+ bool is_truncating = Token::IsTruncatingBinaryOp(op());
+ if (object->IsBoolean() && is_truncating) {
+ // Booleans will be automatically truncated by HChange.
+ new_kind = INT32;
+ } else if (object->IsUndefined()) {
+ // Undefined will be automatically truncated by HChange.
+ new_kind = is_truncating ? INT32 : NUMBER;
+ } else if (object->IsSmi()) {
+ new_kind = SMI;
+ } else if (object->IsHeapNumber()) {
+ double value = Handle<HeapNumber>::cast(object)->value();
+ new_kind = TypeInfo::IsInt32Double(value) ? INT32 : NUMBER;
+ } else if (object->IsString() && op() == Token::ADD) {
+ new_kind = STRING;
}
- UNREACHABLE();
- return handle(Type::Any(), isolate);
-}
-
-
-void BinaryOpIC::StubInfoToType(int minor_key,
- Handle<Type>* left,
- Handle<Type>* right,
- Handle<Type>* result,
- Isolate* isolate) {
- TypeInfo left_typeinfo, right_typeinfo, result_typeinfo;
- BinaryOpStub::decode_types_from_minor_key(
- minor_key, &left_typeinfo, &right_typeinfo, &result_typeinfo);
- *left = TypeInfoToType(left_typeinfo, isolate);
- *right = TypeInfoToType(right_typeinfo, isolate);
- *result = TypeInfoToType(result_typeinfo, isolate);
-}
-
-
-static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
- Token::Value op) {
- v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(value);
- if (type.IsSmi()) return BinaryOpIC::SMI;
- if (type.IsInteger32()) {
- if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
- return BinaryOpIC::INT32;
- }
- if (type.IsNumber()) return BinaryOpIC::NUMBER;
- if (type.IsString()) return BinaryOpIC::STRING;
- if (value->IsUndefined()) {
- if (op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR ||
- op == Token::SAR ||
- op == Token::SHL ||
- op == Token::SHR) {
- if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
- return BinaryOpIC::INT32;
- }
- return BinaryOpIC::ODDBALL;
+ if (new_kind == INT32 && SmiValuesAre32Bits()) {
+ new_kind = NUMBER;
+ }
+ if (kind != NONE &&
+ ((new_kind <= NUMBER && kind > NUMBER) ||
+ (new_kind > NUMBER && kind <= NUMBER))) {
+ new_kind = GENERIC;
}
- return BinaryOpIC::GENERIC;
+ return Max(kind, new_kind);
}
-static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type,
- Handle<Object> value,
- Token::Value op) {
- BinaryOpIC::TypeInfo new_type = TypeInfoFromValue(value, op);
- if (old_type == BinaryOpIC::STRING) {
- if (new_type == BinaryOpIC::STRING) return new_type;
- return BinaryOpIC::GENERIC;
+// static
+const char* BinaryOpIC::State::KindToString(Kind kind) {
+ switch (kind) {
+ case NONE: return "None";
+ case SMI: return "Smi";
+ case INT32: return "Int32";
+ case NUMBER: return "Number";
+ case STRING: return "String";
+ case GENERIC: return "Generic";
}
- return Max(old_type, new_type);
+ UNREACHABLE();
+ return NULL;
}
-#ifdef DEBUG
-static void TraceBinaryOp(BinaryOpIC::TypeInfo left,
- BinaryOpIC::TypeInfo right,
- Maybe<int32_t> fixed_right_arg,
- BinaryOpIC::TypeInfo result) {
- PrintF("%s*%s", BinaryOpIC::GetName(left), BinaryOpIC::GetName(right));
- if (fixed_right_arg.has_value) PrintF("{%d}", fixed_right_arg.value);
- PrintF("->%s", BinaryOpIC::GetName(result));
+// static
+Handle<Type> BinaryOpIC::State::KindToType(Kind kind, Isolate* isolate) {
+ Type* type = NULL;
+ switch (kind) {
+ case NONE: type = Type::None(); break;
+ case SMI: type = Type::Smi(); break;
+ case INT32: type = Type::Signed32(); break;
+ case NUMBER: type = Type::Number(); break;
+ case STRING: type = Type::String(); break;
+ case GENERIC: type = Type::Any(); break;
+ }
+ return handle(type, isolate);
}
-#endif
-RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
- ASSERT(args.length() == 3);
+MaybeObject* BinaryOpIC::Transition(Handle<Object> left, Handle<Object> right) {
+ State state(target()->extended_extra_ic_state());
- HandleScope scope(isolate);
- Handle<Object> left = args.at<Object>(0);
- Handle<Object> right = args.at<Object>(1);
- int key = args.smi_at(2);
- Token::Value op = BinaryOpStub::decode_op_from_minor_key(key);
-
- BinaryOpIC::TypeInfo previous_left, previous_right, previous_result;
- BinaryOpStub::decode_types_from_minor_key(
- key, &previous_left, &previous_right, &previous_result);
-
- BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op);
- BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op);
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
-
- // STRING is only used for ADD operations.
- if ((new_left == BinaryOpIC::STRING || new_right == BinaryOpIC::STRING) &&
- op != Token::ADD) {
- new_left = new_right = BinaryOpIC::GENERIC;
- }
-
- BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
- BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
-
- Maybe<int> previous_fixed_right_arg =
- BinaryOpStub::decode_fixed_right_arg_from_minor_key(key);
-
- int32_t value;
- bool new_has_fixed_right_arg =
- op == Token::MOD &&
- right->ToInt32(&value) &&
- BinaryOpStub::can_encode_arg_value(value) &&
- (previous_overall == BinaryOpIC::UNINITIALIZED ||
- (previous_fixed_right_arg.has_value &&
- previous_fixed_right_arg.value == value));
- Maybe<int32_t> new_fixed_right_arg(
- new_has_fixed_right_arg, new_has_fixed_right_arg ? value : 1);
-
- if (previous_fixed_right_arg.has_value == new_fixed_right_arg.has_value) {
- if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
- if (op == Token::DIV ||
- op == Token::MUL ||
- op == Token::SHR ||
- SmiValuesAre32Bits()) {
- // Arithmetic on two Smi inputs has yielded a heap number.
- // That is the only way to get here from the Smi stub.
- // With 32-bit Smis, all overflows give heap numbers, but with
- // 31-bit Smis, most operations overflow to int32 results.
- result_type = BinaryOpIC::NUMBER;
- } else {
- // Other operations on SMIs that overflow yield int32s.
- result_type = BinaryOpIC::INT32;
- }
- }
- if (new_overall == BinaryOpIC::INT32 &&
- previous_overall == BinaryOpIC::INT32) {
- if (new_left == previous_left && new_right == previous_right) {
- result_type = BinaryOpIC::NUMBER;
- }
- }
- }
+ // Compute the actual result using the builtin for the binary operation.
+ Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
+ TokenToJSBuiltin(state.op()));
+ Handle<JSFunction> function = handle(JSFunction::cast(builtin), isolate());
+ bool caught_exception;
+ Handle<Object> result = Execution::Call(
+ isolate(), function, left, 1, &right, &caught_exception);
+ if (caught_exception) return Failure::Exception();
- BinaryOpStub stub(key, new_left, new_right, result_type, new_fixed_right_arg);
- Handle<Code> code = stub.GetCode(isolate);
- if (!code.is_null()) {
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[BinaryOpIC in ");
- JavaScriptFrame::PrintTop(isolate, stdout, false, true);
- PrintF(" ");
- TraceBinaryOp(previous_left, previous_right, previous_fixed_right_arg,
- previous_result);
- PrintF(" => ");
- TraceBinaryOp(new_left, new_right, new_fixed_right_arg, result_type);
- PrintF(" #%s @ %p]\n", Token::Name(op), static_cast<void*>(*code));
- }
-#endif
- BinaryOpIC ic(isolate);
- ic.patch(*code);
+ // Compute the new state.
+ State old_state = state;
+ state.Update(left, right, result);
- // Activate inlined smi code.
- if (previous_overall == BinaryOpIC::UNINITIALIZED) {
- PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
- }
- }
+ // Install the new stub.
+ BinaryOpICStub stub(state);
+ set_target(*stub.GetCode(isolate()));
- Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
- Object* builtin = NULL; // Initialization calms down the compiler.
- switch (op) {
- case Token::ADD:
- builtin = builtins->javascript_builtin(Builtins::ADD);
- break;
- case Token::SUB:
- builtin = builtins->javascript_builtin(Builtins::SUB);
- break;
- case Token::MUL:
- builtin = builtins->javascript_builtin(Builtins::MUL);
- break;
- case Token::DIV:
- builtin = builtins->javascript_builtin(Builtins::DIV);
- break;
- case Token::MOD:
- builtin = builtins->javascript_builtin(Builtins::MOD);
- break;
- case Token::BIT_AND:
- builtin = builtins->javascript_builtin(Builtins::BIT_AND);
- break;
- case Token::BIT_OR:
- builtin = builtins->javascript_builtin(Builtins::BIT_OR);
- break;
- case Token::BIT_XOR:
- builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
- break;
- case Token::SHR:
- builtin = builtins->javascript_builtin(Builtins::SHR);
- break;
- case Token::SAR:
- builtin = builtins->javascript_builtin(Builtins::SAR);
- break;
- case Token::SHL:
- builtin = builtins->javascript_builtin(Builtins::SHL);
- break;
- default:
- UNREACHABLE();
+ if (FLAG_trace_ic) {
+ char buffer[150];
+ NoAllocationStringAllocator allocator(
+ buffer, static_cast<unsigned>(sizeof(buffer)));
+ StringStream stream(&allocator);
+ stream.Add("[BinaryOpIC");
+ old_state.Print(&stream);
+ stream.Add(" => ");
+ state.Print(&stream);
+ stream.Add(" @ %p <- ", static_cast<void*>(*target()));
+ stream.OutputToStdOut();
+ JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ PrintF("]\n");
}
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
-
- bool caught_exception;
- Handle<Object> builtin_args[] = { right };
- Handle<Object> result = Execution::Call(isolate,
- builtin_function,
- left,
- ARRAY_SIZE(builtin_args),
- builtin_args,
- &caught_exception);
- if (caught_exception) {
- return Failure::Exception();
+ // Patch the inlined smi code as necessary.
+ if (!old_state.UseInlinedSmiCode() && state.UseInlinedSmiCode()) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ } else if (old_state.UseInlinedSmiCode() && !state.UseInlinedSmiCode()) {
+ PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
}
+
return *result;
}
+RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss) {
+ HandleScope scope(isolate);
+ Handle<Object> left = args.at<Object>(BinaryOpICStub::kLeft);
+ Handle<Object> right = args.at<Object>(BinaryOpICStub::kRight);
+ BinaryOpIC ic(isolate);
+ return ic.Transition(left, right);
+}
+
+
Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
Code* code = NULL;
@@ -2991,7 +2957,7 @@ CompareIC::State CompareIC::TargetState(State old_state,
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope(isolate());
State previous_left, previous_right, previous_state;
ICCompareStub::DecodeMinorKey(target()->stub_info(), &previous_left,
@@ -3005,9 +2971,9 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
stub.set_known_map(
Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
}
- set_target(*stub.GetCode(isolate()));
+ Handle<Code> new_target = stub.GetCode(isolate());
+ set_target(*new_target);
-#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[CompareIC in ");
JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
@@ -3021,28 +2987,28 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
Token::Name(op_),
static_cast<void*>(*stub.GetCode(isolate())));
}
-#endif
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
}
+
+ return *new_target;
}
// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
- ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
- return ic.target();
+ return ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
}
void CompareNilIC::Clear(Address address, Code* target) {
- if (target->ic_state() == UNINITIALIZED) return;
- Code::ExtraICState state = target->extended_extra_ic_state();
+ if (IsCleared(target)) return;
+ ExtraICState state = target->extended_extra_ic_state();
CompareNilICStub stub(state, HydrogenCodeStub::UNINITIALIZED);
stub.ClearState();
@@ -3064,7 +3030,7 @@ MaybeObject* CompareNilIC::DoCompareNilSlow(NilValue nil,
MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) {
- Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state();
+ ExtraICState extra_ic_state = target()->extended_extra_ic_state();
CompareNilICStub stub(extra_ic_state);
@@ -3106,9 +3072,49 @@ RUNTIME_FUNCTION(MaybeObject*, Unreachable) {
}
-MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object,
- Code::ExtraICState extra_ic_state) {
- ToBooleanStub stub(extra_ic_state);
+Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
+ switch (op) {
+ default:
+ UNREACHABLE();
+ case Token::ADD:
+ return Builtins::ADD;
+ break;
+ case Token::SUB:
+ return Builtins::SUB;
+ break;
+ case Token::MUL:
+ return Builtins::MUL;
+ break;
+ case Token::DIV:
+ return Builtins::DIV;
+ break;
+ case Token::MOD:
+ return Builtins::MOD;
+ break;
+ case Token::BIT_OR:
+ return Builtins::BIT_OR;
+ break;
+ case Token::BIT_AND:
+ return Builtins::BIT_AND;
+ break;
+ case Token::BIT_XOR:
+ return Builtins::BIT_XOR;
+ break;
+ case Token::SAR:
+ return Builtins::SAR;
+ break;
+ case Token::SHR:
+ return Builtins::SHR;
+ break;
+ case Token::SHL:
+ return Builtins::SHL;
+ break;
+ }
+}
+
+
+MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object) {
+ ToBooleanStub stub(target()->extended_extra_ic_state());
bool to_boolean_value = stub.UpdateStatus(object);
Handle<Code> code = stub.GetCode(isolate());
set_target(*code);
@@ -3121,8 +3127,7 @@ RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss) {
HandleScope scope(isolate);
Handle<Object> object = args.at<Object>(0);
ToBooleanIC ic(isolate);
- Code::ExtraICState ic_state = ic.target()->extended_extra_ic_state();
- return ic.ToBoolean(object, ic_state);
+ return ic.ToBoolean(object);
}
diff --git a/chromium/v8/src/ic.h b/chromium/v8/src/ic.h
index 8f09e1d0a2c..fa7ed6dbc13 100644
--- a/chromium/v8/src/ic.h
+++ b/chromium/v8/src/ic.h
@@ -40,7 +40,6 @@ namespace internal {
#define IC_UTIL_LIST(ICU) \
ICU(LoadIC_Miss) \
ICU(KeyedLoadIC_Miss) \
- ICU(KeyedLoadIC_MissForceGeneric) \
ICU(CallIC_Miss) \
ICU(KeyedCallIC_Miss) \
ICU(StoreIC_Miss) \
@@ -48,7 +47,6 @@ namespace internal {
ICU(StoreIC_Slow) \
ICU(SharedStoreIC_ExtendStorage) \
ICU(KeyedStoreIC_Miss) \
- ICU(KeyedStoreIC_MissForceGeneric) \
ICU(KeyedStoreIC_Slow) \
/* Utilities for IC stubs. */ \
ICU(StoreCallbackProperty) \
@@ -57,8 +55,8 @@ namespace internal {
ICU(LoadPropertyWithInterceptorForCall) \
ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty) \
- ICU(BinaryOp_Patch) \
ICU(CompareIC_Miss) \
+ ICU(BinaryOpIC_Miss) \
ICU(CompareNilIC_Miss) \
ICU(Unreachable) \
ICU(ToBooleanIC_Miss)
@@ -94,12 +92,14 @@ class IC {
IC(FrameDepth depth, Isolate* isolate);
virtual ~IC() {}
- // Get the call-site target; used for determining the state.
- Code* target() const { return GetTargetAtAddress(address()); }
+ State state() const { return state_; }
inline Address address() const;
// Compute the current IC state based on the target stub, receiver and name.
- static State StateFrom(Code* target, Object* receiver, Object* name);
+ void UpdateState(Handle<Object> receiver, Handle<Object> name);
+ void MarkMonomorphicPrototypeFailure() {
+ state_ = MONOMORPHIC_PROTOTYPE_FAILURE;
+ }
// Clear the inline cache to initial state.
static void Clear(Isolate* isolate, Address address);
@@ -124,17 +124,53 @@ class IC {
return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
}
+#ifdef DEBUG
+ bool IsLoadStub() {
+ return target()->is_load_stub() || target()->is_keyed_load_stub();
+ }
+
+ bool IsStoreStub() {
+ return target()->is_store_stub() || target()->is_keyed_store_stub();
+ }
+
+ bool IsCallStub() {
+ return target()->is_call_stub() || target()->is_keyed_call_stub();
+ }
+#endif
+
// Determines which map must be used for keeping the code stub.
// These methods should not be called with undefined or null.
- static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object,
- JSObject* holder);
- static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
- JSObject* holder);
- static inline JSObject* GetCodeCacheHolder(Isolate* isolate,
- Object* object,
- InlineCacheHolderFlag holder);
+ static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object);
+ // TODO(verwaest): This currently returns a HeapObject rather than JSObject*
+ // since loading the IC for loading the length from strings are stored on
+ // the string map directly, rather than on the JSObject-typed prototype.
+ static inline HeapObject* GetCodeCacheHolder(Isolate* isolate,
+ Object* object,
+ InlineCacheHolderFlag holder);
+
+ static inline InlineCacheHolderFlag GetCodeCacheFlag(Type* type);
+ static inline Handle<Map> GetCodeCacheHolder(InlineCacheHolderFlag flag,
+ Type* type,
+ Isolate* isolate);
+
+ static bool IsCleared(Code* code) {
+ InlineCacheState state = code->ic_state();
+ return state == UNINITIALIZED || state == PREMONOMORPHIC;
+ }
+
+ // Utility functions to convert maps to types and back. There are two special
+ // cases:
+ // - The heap_number_map is used as a marker which includes heap numbers as
+ // well as smis.
+ // - The oddball map is only used for booleans.
+ static Handle<Map> TypeToMap(Type* type, Isolate* isolate);
+ static Type* MapToType(Handle<Map> type);
+ static Handle<Type> CurrentTypeOf(Handle<Object> object, Isolate* isolate);
protected:
+ // Get the call-site target; used for determining the state.
+ Handle<Code> target() const { return target_; }
+
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
Isolate* isolate() const { return isolate_; }
@@ -146,15 +182,17 @@ class IC {
#endif
// Set the call-site target.
- void set_target(Code* code) { SetTargetAtAddress(address(), code); }
+ void set_target(Code* code) {
+ SetTargetAtAddress(address(), code);
+ target_set_ = true;
+ }
+
+ bool is_target_set() { return target_set_; }
#ifdef DEBUG
char TransitionMarkFromState(IC::State state);
- void TraceIC(const char* type,
- Handle<Object> name,
- State old_state,
- Code* new_target);
+ void TraceIC(const char* type, Handle<Object> name);
#endif
Failure* TypeError(const char* type,
@@ -167,53 +205,61 @@ class IC {
static inline void SetTargetAtAddress(Address address, Code* target);
static void PostPatching(Address address, Code* target, Code* old_target);
- virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode) {
- set_target(*handler);
- }
- bool UpdatePolymorphicIC(State state,
- Handle<HeapObject> receiver,
- Handle<String> name,
- Handle<Code> code,
- StrictModeFlag strict_mode);
-
- virtual Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
+ // Compute the handler either by compiling or by retrieving a cached version.
+ Handle<Code> ComputeHandler(LookupResult* lookup,
+ Handle<Object> object,
+ Handle<String> name,
+ Handle<Object> value = Handle<Code>::null());
+ virtual Handle<Code> CompileHandler(LookupResult* lookup,
+ Handle<Object> object,
+ Handle<String> name,
+ Handle<Object> value,
+ InlineCacheHolderFlag cache_holder) {
UNREACHABLE();
return Handle<Code>::null();
- };
+ }
+
+ void UpdateMonomorphicIC(Handle<Type> type,
+ Handle<Code> handler,
+ Handle<String> name);
+
+ bool UpdatePolymorphicIC(Handle<Type> type,
+ Handle<String> name,
+ Handle<Code> code);
+
+ virtual void UpdateMegamorphicCache(Type* type, Name* name, Code* code);
void CopyICToMegamorphicCache(Handle<String> name);
- bool IsTransitionedMapOfMonomorphicTarget(Map* receiver_map);
- void PatchCache(State state,
- StrictModeFlag strict_mode,
- Handle<HeapObject> receiver,
+ bool IsTransitionOfMonomorphicTarget(Type* type);
+ void PatchCache(Handle<Type> type,
Handle<String> name,
Handle<Code> code);
- virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code);
- virtual Handle<Code> megamorphic_stub() {
+ virtual Code::Kind kind() const {
UNREACHABLE();
- return Handle<Code>::null();
+ return Code::STUB;
}
- virtual Handle<Code> megamorphic_stub_strict() {
+ virtual Handle<Code> slow_stub() const {
UNREACHABLE();
return Handle<Code>::null();
}
- virtual Handle<Code> generic_stub() const {
+ virtual Handle<Code> megamorphic_stub() {
UNREACHABLE();
return Handle<Code>::null();
}
- virtual Handle<Code> generic_stub_strict() const {
+ virtual Handle<Code> generic_stub() const {
UNREACHABLE();
return Handle<Code>::null();
}
+ bool TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
+ Handle<String> name);
+ void TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name);
+
+ virtual ExtraICState extra_ic_state() { return kNoExtraICState; }
+
private:
+ Code* raw_target() const { return GetTargetAtAddress(address()); }
+
// Frame pointer for the frame that uses (calls) the IC.
Address fp_;
@@ -225,6 +271,11 @@ class IC {
Isolate* isolate_;
+ // The original code target that missed.
+ Handle<Code> target_;
+ State state_;
+ bool target_set_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
};
@@ -247,35 +298,30 @@ class IC_Utility {
class CallICBase: public IC {
public:
- class Contextual: public BitField<bool, 0, 1> {};
+ // ExtraICState bits
+ class Contextual: public BitField<ContextualMode, 0, 1> {};
class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
+ static ExtraICState ComputeExtraICState(ContextualMode mode,
+ StringStubFeedback feedback) {
+ return Contextual::encode(mode) | StringStubState::encode(feedback);
+ }
// Returns a JSFunction or a Failure.
- MUST_USE_RESULT MaybeObject* LoadFunction(State state,
- Code::ExtraICState extra_ic_state,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* LoadFunction(Handle<Object> object,
Handle<String> name);
protected:
CallICBase(Code::Kind kind, Isolate* isolate)
: IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
- bool TryUpdateExtraICState(LookupResult* lookup,
- Handle<Object> object,
- Code::ExtraICState* extra_ic_state);
-
// Compute a monomorphic stub if possible, otherwise return a null handle.
Handle<Code> ComputeMonomorphicStub(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_state,
Handle<Object> object,
Handle<String> name);
// Update the inline cache and the global stub cache based on the lookup
// result.
void UpdateCaches(LookupResult* lookup,
- State state,
- Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);
@@ -293,14 +339,17 @@ class CallICBase: public IC {
static void GenerateMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id,
- Code::ExtraICState extra_state);
+ ExtraICState extra_state);
static void GenerateNormal(MacroAssembler* masm, int argc);
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
- Code::ExtraICState extra_state);
+ ExtraICState extra_state);
+
+ virtual Handle<Code> megamorphic_stub();
+ virtual Handle<Code> pre_monomorphic_stub();
Code::Kind kind_;
@@ -310,31 +359,40 @@ class CallICBase: public IC {
class CallIC: public CallICBase {
public:
- explicit CallIC(Isolate* isolate) : CallICBase(Code::CALL_IC, isolate) {
+ explicit CallIC(Isolate* isolate)
+ : CallICBase(Code::CALL_IC, isolate),
+ extra_ic_state_(target()->extra_ic_state()) {
ASSERT(target()->is_call_stub());
}
// Code generator routines.
static void GenerateInitialize(MacroAssembler* masm,
int argc,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
GenerateMiss(masm, argc, extra_state);
}
static void GenerateMiss(MacroAssembler* masm,
int argc,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
CallICBase::GenerateMiss(masm, argc, IC::kCallIC_Miss, extra_state);
}
static void GenerateMegamorphic(MacroAssembler* masm,
int argc,
- Code::ExtraICState extra_ic_state);
+ ExtraICState extra_ic_state);
static void GenerateNormal(MacroAssembler* masm, int argc) {
CallICBase::GenerateNormal(masm, argc);
- GenerateMiss(masm, argc, Code::kNoExtraICState);
+ GenerateMiss(masm, argc, kNoExtraICState);
}
+ bool TryUpdateExtraICState(LookupResult* lookup, Handle<Object> object);
+
+ protected:
+ virtual ExtraICState extra_ic_state() { return extra_ic_state_; }
+
+ private:
+ ExtraICState extra_ic_state_;
};
@@ -345,8 +403,7 @@ class KeyedCallIC: public CallICBase {
ASSERT(target()->is_keyed_call_stub());
}
- MUST_USE_RESULT MaybeObject* LoadFunction(State state,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* LoadFunction(Handle<Object> object,
Handle<Object> key);
// Code generator routines.
@@ -356,7 +413,7 @@ class KeyedCallIC: public CallICBase {
static void GenerateMiss(MacroAssembler* masm, int argc) {
CallICBase::GenerateMiss(masm, argc, IC::kKeyedCallIC_Miss,
- Code::kNoExtraICState);
+ kNoExtraICState);
}
static void GenerateMegamorphic(MacroAssembler* masm, int argc);
@@ -368,7 +425,7 @@ class KeyedCallIC: public CallICBase {
class LoadIC: public IC {
public:
explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
- ASSERT(target()->is_load_stub() || target()->is_keyed_load_stub());
+ ASSERT(IsLoadStub());
}
// Code generator routines.
@@ -381,8 +438,7 @@ class LoadIC: public IC {
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- MUST_USE_RESULT MaybeObject* Load(State state,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* Load(Handle<Object> object,
Handle<String> name);
protected:
@@ -399,46 +455,40 @@ class LoadIC: public IC {
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
- State state,
Handle<Object> object,
Handle<String> name);
- virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode);
-
- virtual Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode);
-
- virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<String> name);
+ virtual Handle<Code> CompileHandler(LookupResult* lookup,
+ Handle<Object> object,
+ Handle<String> name,
+ Handle<Object> unused,
+ InlineCacheHolderFlag cache_holder);
private:
// Stub accessors.
static Handle<Code> initialize_stub(Isolate* isolate) {
return isolate->builtins()->LoadIC_Initialize();
}
+
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
+ return isolate->builtins()->LoadIC_PreMonomorphic();
+ }
+
virtual Handle<Code> pre_monomorphic_stub() {
- return isolate()->builtins()->LoadIC_PreMonomorphic();
+ return pre_monomorphic_stub(isolate());
}
+ Handle<Code> SimpleFieldLoad(int offset,
+ bool inobject = true,
+ Representation representation =
+ Representation::Tagged());
+
static void Clear(Isolate* isolate, Address address, Code* target);
friend class IC;
};
-enum ICMissMode {
- MISS_FORCE_GENERIC,
- MISS
-};
-
-
class KeyedLoadIC: public LoadIC {
public:
explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
@@ -446,19 +496,15 @@ class KeyedLoadIC: public LoadIC {
ASSERT(target()->is_keyed_load_stub());
}
- MUST_USE_RESULT MaybeObject* Load(State state,
- Handle<Object> object,
- Handle<Object> key,
- ICMissMode force_generic);
+ MUST_USE_RESULT MaybeObject* Load(Handle<Object> object,
+ Handle<Object> key);
// Code generator routines.
- static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
+ static void GenerateMiss(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- static void GenerateInitialize(MacroAssembler* masm) {
- GenerateMiss(masm, MISS);
- }
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
@@ -487,23 +533,18 @@ class KeyedLoadIC: public LoadIC {
return isolate()->builtins()->KeyedLoadIC_Slow();
}
- // Update the inline cache.
- virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode);
- virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
- Handle<JSObject> receiver,
- Handle<String> name);
- virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
+ virtual void UpdateMegamorphicCache(Type* type, Name* name, Code* code) { }
private:
// Stub accessors.
static Handle<Code> initialize_stub(Isolate* isolate) {
return isolate->builtins()->KeyedLoadIC_Initialize();
}
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
+ return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
+ }
virtual Handle<Code> pre_monomorphic_stub() {
- return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
+ return pre_monomorphic_stub(isolate());
}
Handle<Code> indexed_interceptor_stub() {
return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
@@ -523,10 +564,29 @@ class KeyedLoadIC: public LoadIC {
class StoreIC: public IC {
public:
- StoreIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
- ASSERT(target()->is_store_stub() || target()->is_keyed_store_stub());
+ // ExtraICState bits
+ class StrictModeState: public BitField<StrictModeFlag, 0, 1> {};
+ static ExtraICState ComputeExtraICState(StrictModeFlag flag) {
+ return StrictModeState::encode(flag);
+ }
+
+ static StrictModeFlag GetStrictMode(ExtraICState state) {
+ return StrictModeState::decode(state);
}
+ // For convenience, a statically declared encoding of strict mode extra
+ // IC state.
+ static const ExtraICState kStrictModeState =
+ 1 << StrictModeState::kShift;
+
+ StoreIC(FrameDepth depth, Isolate* isolate)
+ : IC(depth, isolate),
+ strict_mode_(GetStrictMode(target()->extra_ic_state())) {
+ ASSERT(IsStoreStub());
+ }
+
+ StrictModeFlag strict_mode() const { return strict_mode_; }
+
// Code generators for stub routines. Only called once at startup.
static void GenerateSlow(MacroAssembler* masm);
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
@@ -535,14 +595,12 @@ class StoreIC: public IC {
}
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode);
+ ExtraICState extra_ic_state);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* Store(
- State state,
- StrictModeFlag strict_mode,
Handle<Object> object,
Handle<String> name,
Handle<Object> value,
@@ -552,75 +610,75 @@ class StoreIC: public IC {
protected:
virtual Code::Kind kind() const { return Code::STORE_IC; }
virtual Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->StoreIC_Megamorphic();
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->StoreIC_Megamorphic_Strict();
+ } else {
+ return isolate()->builtins()->StoreIC_Megamorphic();
+ }
}
// Stub accessors.
- virtual Handle<Code> megamorphic_stub_strict() {
- return isolate()->builtins()->StoreIC_Megamorphic_Strict();
- }
virtual Handle<Code> generic_stub() const {
- return isolate()->builtins()->StoreIC_Generic();
- }
- virtual Handle<Code> generic_stub_strict() const {
- return isolate()->builtins()->StoreIC_Generic_Strict();
- }
- virtual Handle<Code> pre_monomorphic_stub() const {
- return isolate()->builtins()->StoreIC_PreMonomorphic();
- }
- virtual Handle<Code> pre_monomorphic_stub_strict() const {
- return isolate()->builtins()->StoreIC_PreMonomorphic_Strict();
- }
- virtual Handle<Code> global_proxy_stub() {
- return isolate()->builtins()->StoreIC_GlobalProxy();
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->StoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->StoreIC_Generic();
+ }
}
- virtual Handle<Code> global_proxy_stub_strict() {
- return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
+
+ virtual Handle<Code> slow_stub() const {
+ return isolate()->builtins()->StoreIC_Slow();
}
- virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode);
+ virtual Handle<Code> pre_monomorphic_stub() {
+ return pre_monomorphic_stub(isolate(), strict_mode());
+ }
- virtual Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode);
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ if (strict_mode == kStrictMode) {
+ return isolate->builtins()->StoreIC_PreMonomorphic_Strict();
+ } else {
+ return isolate->builtins()->StoreIC_PreMonomorphic();
+ }
+ }
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value);
- // Compute the code stub for this store; used for rewriting to
- // monomorphic state and making sure that the code stub is in the
- // stub cache.
- virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value);
+ virtual Handle<Code> CompileHandler(LookupResult* lookup,
+ Handle<Object> object,
+ Handle<String> name,
+ Handle<Object> value,
+ InlineCacheHolderFlag cache_holder);
+
+ virtual ExtraICState extra_ic_state() {
+ return ComputeExtraICState(strict_mode());
+ }
private:
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
- ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
- Code::GetStrictMode(target()->extra_ic_state()));
+ ASSERT(GetStrictMode(code->extra_ic_state()) ==
+ GetStrictMode(target()->extra_ic_state()));
IC::set_target(code);
}
- static Handle<Code> initialize_stub(Isolate* isolate) {
- return isolate->builtins()->StoreIC_Initialize();
- }
- static Handle<Code> initialize_stub_strict(Isolate* isolate) {
- return isolate->builtins()->StoreIC_Initialize_Strict();
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ if (strict_mode == kStrictMode) {
+ return isolate->builtins()->StoreIC_Initialize_Strict();
+ } else {
+ return isolate->builtins()->StoreIC_Initialize();
+ }
}
+
static void Clear(Isolate* isolate, Address address, Code* target);
+ StrictModeFlag strict_mode_;
+
friend class IC;
};
@@ -639,26 +697,37 @@ enum KeyedStoreIncrementLength {
class KeyedStoreIC: public StoreIC {
public:
+ // ExtraICState bits (building on IC)
+ // ExtraICState bits
+ class ExtraICStateKeyedAccessStoreMode:
+ public BitField<KeyedAccessStoreMode, 1, 4> {}; // NOLINT
+
+ static ExtraICState ComputeExtraICState(StrictModeFlag flag,
+ KeyedAccessStoreMode mode) {
+ return StrictModeState::encode(flag) |
+ ExtraICStateKeyedAccessStoreMode::encode(mode);
+ }
+
+ static KeyedAccessStoreMode GetKeyedAccessStoreMode(
+ ExtraICState extra_state) {
+ return ExtraICStateKeyedAccessStoreMode::decode(extra_state);
+ }
+
KeyedStoreIC(FrameDepth depth, Isolate* isolate)
: StoreIC(depth, isolate) {
ASSERT(target()->is_keyed_store_stub());
}
- MUST_USE_RESULT MaybeObject* Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
+ MUST_USE_RESULT MaybeObject* Store(Handle<Object> object,
Handle<Object> name,
- Handle<Object> value,
- ICMissMode force_generic);
+ Handle<Object> value);
// Code generators for stub routines. Only called once at startup.
- static void GenerateInitialize(MacroAssembler* masm) {
- GenerateMiss(masm, MISS);
- }
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
- static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
+ static void GenerateMiss(MacroAssembler* masm);
static void GenerateSlow(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode);
@@ -668,56 +737,62 @@ class KeyedStoreIC: public StoreIC {
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
- virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value);
- virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
+ virtual void UpdateMegamorphicCache(Type* type, Name* name, Code* code) { }
- virtual Handle<Code> pre_monomorphic_stub() const {
- return isolate()->builtins()->KeyedStoreIC_PreMonomorphic();
+ virtual ExtraICState extra_ic_state() {
+ return ComputeExtraICState(strict_mode(), STANDARD_STORE);
}
- virtual Handle<Code> pre_monomorphic_stub_strict() const {
- return isolate()->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
+
+ virtual Handle<Code> pre_monomorphic_stub() {
+ return pre_monomorphic_stub(isolate(), strict_mode());
}
- virtual Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->KeyedStoreIC_Generic();
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ if (strict_mode == kStrictMode) {
+ return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
+ } else {
+ return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
+ }
}
- virtual Handle<Code> megamorphic_stub_strict() {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ virtual Handle<Code> slow_stub() const {
+ return isolate()->builtins()->KeyedStoreIC_Slow();
+ }
+ virtual Handle<Code> megamorphic_stub() {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
+ }
}
Handle<Code> StoreElementStub(Handle<JSObject> receiver,
- KeyedAccessStoreMode store_mode,
- StrictModeFlag strict_mode);
-
- virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<String> name,
- StrictModeFlag strict_mode);
+ KeyedAccessStoreMode store_mode);
private:
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
- ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
- Code::GetStrictMode(target()->extra_ic_state()));
+ ASSERT(GetStrictMode(code->extra_ic_state()) == strict_mode());
IC::set_target(code);
}
// Stub accessors.
- static Handle<Code> initialize_stub(Isolate* isolate) {
- return isolate->builtins()->KeyedStoreIC_Initialize();
- }
- static Handle<Code> initialize_stub_strict(Isolate* isolate) {
- return isolate->builtins()->KeyedStoreIC_Initialize_Strict();
- }
- Handle<Code> generic_stub() const {
- return isolate()->builtins()->KeyedStoreIC_Generic();
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ StrictModeFlag strict_mode) {
+ if (strict_mode == kStrictMode) {
+ return isolate->builtins()->KeyedStoreIC_Initialize_Strict();
+ } else {
+ return isolate->builtins()->KeyedStoreIC_Initialize();
+ }
}
- Handle<Code> generic_stub_strict() const {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+
+ virtual Handle<Code> generic_stub() const {
+ if (strict_mode() == kStrictMode) {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
+ }
}
+
Handle<Code> non_strict_arguments_stub() {
return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
}
@@ -735,35 +810,116 @@ class KeyedStoreIC: public StoreIC {
};
+// Mode to overwrite BinaryExpression values.
+enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
+
// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
class BinaryOpIC: public IC {
public:
- enum TypeInfo {
- UNINITIALIZED,
- SMI,
- INT32,
- NUMBER,
- ODDBALL,
- STRING, // Only used for addition operation.
- GENERIC
- };
+ class State V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit State(ExtraICState extra_ic_state);
+
+ State(Token::Value op, OverwriteMode mode)
+ : op_(op), mode_(mode), left_kind_(NONE), right_kind_(NONE),
+ result_kind_(NONE) {
+ ASSERT_LE(FIRST_TOKEN, op);
+ ASSERT_LE(op, LAST_TOKEN);
+ }
- static void StubInfoToType(int minor_key,
- Handle<Type>* left,
- Handle<Type>* right,
- Handle<Type>* result,
- Isolate* isolate);
+ InlineCacheState GetICState() const {
+ if (Max(left_kind_, right_kind_) == NONE) {
+ return ::v8::internal::UNINITIALIZED;
+ }
+ if (Max(left_kind_, right_kind_) == GENERIC) {
+ return ::v8::internal::MEGAMORPHIC;
+ }
+ if (Min(left_kind_, right_kind_) == GENERIC) {
+ return ::v8::internal::GENERIC;
+ }
+ return ::v8::internal::MONOMORPHIC;
+ }
- explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+ ExtraICState GetExtraICState() const;
- void patch(Code* code);
+ static void GenerateAheadOfTime(
+ Isolate*, void (*Generate)(Isolate*, const State&));
- static const char* GetName(TypeInfo type_info);
+ bool CanReuseDoubleBox() const {
+ return (result_kind_ > SMI && result_kind_ <= NUMBER) &&
+ ((mode_ == OVERWRITE_LEFT &&
+ left_kind_ > SMI && left_kind_ <= NUMBER) ||
+ (mode_ == OVERWRITE_RIGHT &&
+ right_kind_ > SMI && right_kind_ <= NUMBER));
+ }
- static State ToState(TypeInfo type_info);
+ bool HasSideEffects() const {
+ return Max(left_kind_, right_kind_) == GENERIC;
+ }
- private:
- static Handle<Type> TypeInfoToType(TypeInfo binary_type, Isolate* isolate);
+ bool UseInlinedSmiCode() const {
+ return KindMaybeSmi(left_kind_) || KindMaybeSmi(right_kind_);
+ }
+
+ static const int FIRST_TOKEN = Token::BIT_OR;
+ static const int LAST_TOKEN = Token::MOD;
+
+ Token::Value op() const { return op_; }
+ OverwriteMode mode() const { return mode_; }
+ Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+
+ Handle<Type> GetLeftType(Isolate* isolate) const {
+ return KindToType(left_kind_, isolate);
+ }
+ Handle<Type> GetRightType(Isolate* isolate) const {
+ return KindToType(right_kind_, isolate);
+ }
+ Handle<Type> GetResultType(Isolate* isolate) const;
+
+ void Print(StringStream* stream) const;
+
+ void Update(Handle<Object> left,
+ Handle<Object> right,
+ Handle<Object> result);
+
+ private:
+ enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
+
+ Kind UpdateKind(Handle<Object> object, Kind kind) const;
+
+ static const char* KindToString(Kind kind);
+ static Handle<Type> KindToType(Kind kind, Isolate* isolate);
+ static bool KindMaybeSmi(Kind kind) {
+ return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
+ }
+
+ // We truncate the last bit of the token.
+ STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
+ class OpField: public BitField<int, 0, 4> {};
+ class OverwriteModeField: public BitField<OverwriteMode, 4, 2> {};
+ class SSE2Field: public BitField<bool, 6, 1> {};
+ class ResultKindField: public BitField<Kind, 7, 3> {};
+ class LeftKindField: public BitField<Kind, 10, 3> {};
+ // When fixed right arg is set, we don't need to store the right kind.
+ // Thus the two fields can overlap.
+ class HasFixedRightArgField: public BitField<bool, 13, 1> {};
+ class FixedRightArgValueField: public BitField<int, 14, 4> {};
+ class RightKindField: public BitField<Kind, 14, 3> {};
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ Kind left_kind_;
+ Kind right_kind_;
+ Kind result_kind_;
+ Maybe<int> fixed_right_arg_;
+ };
+
+ explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
+
+ static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
+
+ MUST_USE_RESULT MaybeObject* Transition(Handle<Object> left,
+ Handle<Object> right);
};
@@ -804,7 +960,7 @@ class CompareIC: public IC {
: IC(EXTRA_CALL_FRAME, isolate), op_(op) { }
// Update the inline cache for the given operands.
- void UpdateCaches(Handle<Object> x, Handle<Object> y);
+ Code* UpdateCaches(Handle<Object> x, Handle<Object> y);
// Factory method for getting an uninitialized compare stub.
@@ -857,7 +1013,7 @@ class ToBooleanIC: public IC {
public:
explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
- MaybeObject* ToBoolean(Handle<Object> object, Code::ExtraICState state);
+ MaybeObject* ToBoolean(Handle<Object> object);
};
@@ -869,7 +1025,9 @@ DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss);
diff --git a/chromium/v8/src/incremental-marking.cc b/chromium/v8/src/incremental-marking.cc
index df0f14a74ce..4223dde211e 100644
--- a/chromium/v8/src/incremental-marking.cc
+++ b/chromium/v8/src/incremental-marking.cc
@@ -648,6 +648,8 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+ heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
+
// Ready to start incremental marking.
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Running\n");
@@ -726,7 +728,7 @@ void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
MarkBit mark_bit = Marking::MarkBitFrom(obj);
-#ifdef DEBUG
+#if ENABLE_SLOW_ASSERTS
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
diff --git a/chromium/v8/src/isolate-inl.h b/chromium/v8/src/isolate-inl.h
index 45076f56578..764bcb8bf38 100644
--- a/chromium/v8/src/isolate-inl.h
+++ b/chromium/v8/src/isolate-inl.h
@@ -48,6 +48,11 @@ SaveContext::SaveContext(Isolate* isolate)
}
+bool Isolate::IsCodePreAgingActive() {
+ return FLAG_optimize_for_size && FLAG_age_code && !IsDebuggerActive();
+}
+
+
bool Isolate::IsDebuggerActive() {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!NoBarrier_Load(&debugger_initialized_)) return false;
diff --git a/chromium/v8/src/isolate.cc b/chromium/v8/src/isolate.cc
index 6fa496a9026..25bc54685f4 100644
--- a/chromium/v8/src/isolate.cc
+++ b/chromium/v8/src/isolate.cc
@@ -29,7 +29,6 @@
#include "v8.h"
-#include "allocation-inl.h"
#include "ast.h"
#include "bootstrapper.h"
#include "codegen.h"
@@ -42,7 +41,6 @@
#include "isolate-inl.h"
#include "lithium-allocator.h"
#include "log.h"
-#include "marking-thread.h"
#include "messages.h"
#include "platform.h"
#include "regexp-stack.h"
@@ -121,11 +119,7 @@ void ThreadLocalTop::InitializeInternal() {
void ThreadLocalTop::Initialize() {
InitializeInternal();
#ifdef USE_SIMULATOR
-#if V8_TARGET_ARCH_ARM
simulator_ = Simulator::current(isolate_);
-#elif V8_TARGET_ARCH_MIPS
- simulator_ = Simulator::current(isolate_);
-#endif
#endif
thread_id_ = ThreadId::Current();
}
@@ -136,207 +130,6 @@ v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
}
-int SystemThreadManager::NumberOfParallelSystemThreads(
- ParallelSystemComponent type) {
- int number_of_threads = Min(CPU::NumberOfProcessorsOnline(), kMaxThreads);
- ASSERT(number_of_threads > 0);
- if (number_of_threads == 1) {
- return 0;
- }
- if (type == PARALLEL_SWEEPING) {
- return number_of_threads;
- } else if (type == CONCURRENT_SWEEPING) {
- return number_of_threads - 1;
- } else if (type == PARALLEL_MARKING) {
- return number_of_threads;
- }
- return 1;
-}
-
-
-// Create a dummy thread that will wait forever on a semaphore. The only
-// purpose for this thread is to have some stack area to save essential data
-// into for use by a stacks only core dump (aka minidump).
-class PreallocatedMemoryThread: public Thread {
- public:
- char* data() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return data_;
- }
-
- unsigned length() {
- if (data_ready_semaphore_ != NULL) {
- // Initial access is guarded until the data has been published.
- data_ready_semaphore_->Wait();
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
- return length_;
- }
-
- // Stop the PreallocatedMemoryThread and release its resources.
- void StopThread() {
- keep_running_ = false;
- wait_for_ever_semaphore_->Signal();
-
- // Wait for the thread to terminate.
- Join();
-
- if (data_ready_semaphore_ != NULL) {
- delete data_ready_semaphore_;
- data_ready_semaphore_ = NULL;
- }
-
- delete wait_for_ever_semaphore_;
- wait_for_ever_semaphore_ = NULL;
- }
-
- protected:
- // When the thread starts running it will allocate a fixed number of bytes
- // on the stack and publish the location of this memory for others to use.
- void Run() {
- EmbeddedVector<char, 15 * 1024> local_buffer;
-
- // Initialize the buffer with a known good value.
- OS::StrNCpy(local_buffer, "Trace data was not generated.\n",
- local_buffer.length());
-
- // Publish the local buffer and signal its availability.
- data_ = local_buffer.start();
- length_ = local_buffer.length();
- data_ready_semaphore_->Signal();
-
- while (keep_running_) {
- // This thread will wait here until the end of time.
- wait_for_ever_semaphore_->Wait();
- }
-
- // Make sure we access the buffer after the wait to remove all possibility
- // of it being optimized away.
- OS::StrNCpy(local_buffer, "PreallocatedMemoryThread shutting down.\n",
- local_buffer.length());
- }
-
-
- private:
- PreallocatedMemoryThread()
- : Thread("v8:PreallocMem"),
- keep_running_(true),
- wait_for_ever_semaphore_(new Semaphore(0)),
- data_ready_semaphore_(new Semaphore(0)),
- data_(NULL),
- length_(0) {
- }
-
- // Used to make sure that the thread keeps looping even for spurious wakeups.
- bool keep_running_;
-
- // This semaphore is used by the PreallocatedMemoryThread to wait for ever.
- Semaphore* wait_for_ever_semaphore_;
- // Semaphore to signal that the data has been initialized.
- Semaphore* data_ready_semaphore_;
-
- // Location and size of the preallocated memory block.
- char* data_;
- unsigned length_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(PreallocatedMemoryThread);
-};
-
-
-void Isolate::PreallocatedMemoryThreadStart() {
- if (preallocated_memory_thread_ != NULL) return;
- preallocated_memory_thread_ = new PreallocatedMemoryThread();
- preallocated_memory_thread_->Start();
-}
-
-
-void Isolate::PreallocatedMemoryThreadStop() {
- if (preallocated_memory_thread_ == NULL) return;
- preallocated_memory_thread_->StopThread();
- // Done with the thread entirely.
- delete preallocated_memory_thread_;
- preallocated_memory_thread_ = NULL;
-}
-
-
-void Isolate::PreallocatedStorageInit(size_t size) {
- ASSERT(free_list_.next_ == &free_list_);
- ASSERT(free_list_.previous_ == &free_list_);
- PreallocatedStorage* free_chunk =
- reinterpret_cast<PreallocatedStorage*>(new char[size]);
- free_list_.next_ = free_list_.previous_ = free_chunk;
- free_chunk->next_ = free_chunk->previous_ = &free_list_;
- free_chunk->size_ = size - sizeof(PreallocatedStorage);
- preallocated_storage_preallocated_ = true;
-}
-
-
-void* Isolate::PreallocatedStorageNew(size_t size) {
- if (!preallocated_storage_preallocated_) {
- return FreeStoreAllocationPolicy().New(size);
- }
- ASSERT(free_list_.next_ != &free_list_);
- ASSERT(free_list_.previous_ != &free_list_);
-
- size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
- // Search for exact fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ == size) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Search for first fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- PreallocatedStorage* left_over =
- reinterpret_cast<PreallocatedStorage*>(
- reinterpret_cast<char*>(storage + 1) + size);
- left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
- ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
- storage->size_);
- storage->size_ = size;
- left_over->LinkTo(&free_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Allocation failure.
- ASSERT(false);
- return NULL;
-}
-
-
-// We don't attempt to coalesce.
-void Isolate::PreallocatedStorageDelete(void* p) {
- if (p == NULL) {
- return;
- }
- if (!preallocated_storage_preallocated_) {
- FreeStoreAllocationPolicy::Delete(p);
- return;
- }
- PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
- ASSERT(storage->next_->previous_ == storage);
- ASSERT(storage->previous_->next_ == storage);
- storage->Unlink();
- storage->LinkTo(&free_list_);
-}
-
Isolate* Isolate::default_isolate_ = NULL;
Thread::LocalStorageKey Isolate::isolate_key_;
Thread::LocalStorageKey Isolate::thread_id_key_;
@@ -345,6 +138,14 @@ Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
#endif // DEBUG
Mutex Isolate::process_wide_mutex_;
+// TODO(dcarney): Remove with default isolate.
+enum DefaultIsolateStatus {
+ kDefaultIsolateUninitialized,
+ kDefaultIsolateInitialized,
+ kDefaultIsolateCrashIfInitialized
+};
+static DefaultIsolateStatus default_isolate_status_
+ = kDefaultIsolateUninitialized;
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
Atomic32 Isolate::isolate_counter_ = 0;
@@ -382,8 +183,16 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
}
+void Isolate::SetCrashIfDefaultIsolateInitialized() {
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ CHECK(default_isolate_status_ != kDefaultIsolateInitialized);
+ default_isolate_status_ = kDefaultIsolateCrashIfInitialized;
+}
+
+
void Isolate::EnsureDefaultIsolate() {
LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ CHECK(default_isolate_status_ != kDefaultIsolateCrashIfInitialized);
if (default_isolate_ == NULL) {
isolate_key_ = Thread::CreateThreadLocalKey();
thread_id_key_ = Thread::CreateThreadLocalKey();
@@ -844,24 +653,12 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
}
-void Isolate::PrintStack() {
- PrintStack(stdout);
-}
-
-
void Isolate::PrintStack(FILE* out) {
if (stack_trace_nesting_level_ == 0) {
stack_trace_nesting_level_++;
-
- StringAllocator* allocator;
- if (preallocated_message_space_ == NULL) {
- allocator = new HeapStringAllocator();
- } else {
- allocator = preallocated_message_space_;
- }
-
StringStream::ClearMentionedObjectCache(this);
- StringStream accumulator(allocator);
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
incomplete_message_ = &accumulator;
PrintStack(&accumulator);
accumulator.OutputToFile(out);
@@ -869,10 +666,6 @@ void Isolate::PrintStack(FILE* out) {
accumulator.Log(this);
incomplete_message_ = NULL;
stack_trace_nesting_level_ = 0;
- if (preallocated_message_space_ == NULL) {
- // Remove the HeapStringAllocator created above.
- delete allocator;
- }
} else if (stack_trace_nesting_level_ == 1) {
stack_trace_nesting_level_++;
OS::PrintError(
@@ -1087,7 +880,7 @@ Failure* Isolate::StackOverflow() {
Handle<String> key = factory()->stack_overflow_string();
Handle<JSObject> boilerplate =
Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key));
- Handle<JSObject> exception = Copy(boilerplate);
+ Handle<JSObject> exception = JSObject::Copy(boilerplate);
DoThrow(*exception, NULL);
// Get stack trace limit.
@@ -1530,11 +1323,6 @@ MessageLocation Isolate::GetMessageLocation() {
}
-void Isolate::TraceException(bool flag) {
- FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use.
-}
-
-
bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
ASSERT(has_pending_exception());
PropagatePendingExceptionToExternalTryCatch();
@@ -1657,11 +1445,7 @@ char* Isolate::RestoreThread(char* from) {
// This might be just paranoia, but it seems to be needed in case a
// thread_local_top_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR
-#if V8_TARGET_ARCH_ARM
thread_local_top()->simulator_ = Simulator::current(this);
-#elif V8_TARGET_ARCH_MIPS
- thread_local_top()->simulator_ = Simulator::current(this);
-#endif
#endif
ASSERT(context() == NULL || context()->IsContext());
return from + sizeof(ThreadLocalTop);
@@ -1730,13 +1514,11 @@ void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
Isolate::Isolate()
- : state_(UNINITIALIZED),
- embedder_data_(NULL),
+ : embedder_data_(),
+ state_(UNINITIALIZED),
entry_stack_(NULL),
stack_trace_nesting_level_(0),
incomplete_message_(NULL),
- preallocated_memory_thread_(NULL),
- preallocated_message_space_(NULL),
bootstrapper_(NULL),
runtime_profiler_(NULL),
compilation_cache_(NULL),
@@ -1758,14 +1540,10 @@ Isolate::Isolate()
handle_scope_implementer_(NULL),
unicode_cache_(NULL),
runtime_zone_(this),
- in_use_list_(0),
- free_list_(0),
- preallocated_storage_preallocated_(false),
inner_pointer_to_code_cache_(NULL),
write_iterator_(NULL),
global_handles_(NULL),
eternal_handles_(NULL),
- context_switcher_(NULL),
thread_manager_(NULL),
fp_stubs_generated_(false),
has_installed_extensions_(false),
@@ -1776,7 +1554,6 @@ Isolate::Isolate()
// TODO(bmeurer) Initialized lazily because it depends on flags; can
// be fixed once the default isolate cleanup is done.
random_number_generator_(NULL),
- is_memory_constrained_(false),
has_fatal_error_(false),
use_crankshaft_(true),
initialized_from_snapshot_(false),
@@ -1784,9 +1561,10 @@ Isolate::Isolate()
heap_profiler_(NULL),
function_entry_hook_(NULL),
deferred_handles_head_(NULL),
- optimizing_compiler_thread_(this),
- marking_thread_(NULL),
+ optimizing_compiler_thread_(NULL),
sweeper_thread_(NULL),
+ num_sweeper_threads_(0),
+ max_available_threads_(0),
stress_deopt_count_(0) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
@@ -1879,23 +1657,20 @@ void Isolate::Deinit() {
debugger()->UnloadDebugger();
#endif
- if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Stop();
-
- if (FLAG_sweeper_threads > 0) {
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- sweeper_thread_[i]->Stop();
- delete sweeper_thread_[i];
- }
- delete[] sweeper_thread_;
+ if (concurrent_recompilation_enabled()) {
+ optimizing_compiler_thread_->Stop();
+ delete optimizing_compiler_thread_;
+ optimizing_compiler_thread_ = NULL;
}
- if (FLAG_marking_threads > 0) {
- for (int i = 0; i < FLAG_marking_threads; i++) {
- marking_thread_[i]->Stop();
- delete marking_thread_[i];
- }
- delete[] marking_thread_;
+ for (int i = 0; i < num_sweeper_threads_; i++) {
+ sweeper_thread_[i]->Stop();
+ delete sweeper_thread_[i];
+ sweeper_thread_[i] = NULL;
}
+ delete[] sweeper_thread_;
+ sweeper_thread_ = NULL;
+
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
@@ -1909,18 +1684,9 @@ void Isolate::Deinit() {
delete deoptimizer_data_;
deoptimizer_data_ = NULL;
- if (FLAG_preemption) {
- v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
- v8::Locker::StopPreemption();
- }
builtins_.TearDown();
bootstrapper_->TearDown();
- // Remove the external reference to the preallocated stack memory.
- delete preallocated_message_space_;
- preallocated_message_space_ = NULL;
- PreallocatedMemoryThreadStop();
-
if (runtime_profiler_ != NULL) {
runtime_profiler_->TearDown();
delete runtime_profiler_;
@@ -2029,8 +1795,6 @@ Isolate::~Isolate() {
delete write_iterator_;
write_iterator_ = NULL;
- delete context_switcher_;
- context_switcher_ = NULL;
delete thread_manager_;
thread_manager_ = NULL;
@@ -2235,20 +1999,31 @@ bool Isolate::Init(Deserializer* des) {
bootstrapper_->Initialize(create_heap_objects);
builtins_.SetUp(this, create_heap_objects);
- // Only preallocate on the first initialization.
- if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
- // Start the thread which will set aside some memory.
- PreallocatedMemoryThreadStart();
- preallocated_message_space_ =
- new NoAllocationStringAllocator(
- preallocated_memory_thread_->data(),
- preallocated_memory_thread_->length());
- PreallocatedStorageInit(preallocated_memory_thread_->length() / 4);
+ if (create_heap_objects) heap_.CreateStubsRequiringBuiltins();
+
+ // Set default value if not yet set.
+ // TODO(yangguo): move this to ResourceConstraints::ConfigureDefaults
+ // once ResourceConstraints becomes an argument to the Isolate constructor.
+ if (max_available_threads_ < 1) {
+ // Choose the default between 1 and 4.
+ max_available_threads_ = Max(Min(CPU::NumberOfProcessorsOnline(), 4), 1);
}
- if (FLAG_preemption) {
- v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
- v8::Locker::StartPreemption(100);
+ num_sweeper_threads_ = SweeperThread::NumberOfThreads(max_available_threads_);
+
+ if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs) {
+ PrintF("Concurrent recompilation has been disabled for tracing.\n");
+ } else if (OptimizingCompilerThread::Enabled(max_available_threads_)) {
+ optimizing_compiler_thread_ = new OptimizingCompilerThread(this);
+ optimizing_compiler_thread_->Start();
+ }
+
+ if (num_sweeper_threads_ > 0) {
+ sweeper_thread_ = new SweeperThread*[num_sweeper_threads_];
+ for (int i = 0; i < num_sweeper_threads_; i++) {
+ sweeper_thread_[i] = new SweeperThread(this);
+ sweeper_thread_[i]->Start();
+ }
}
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -2279,12 +2054,22 @@ bool Isolate::Init(Deserializer* des) {
// If we are deserializing, log non-function code objects and compiled
// functions found in the snapshot.
if (!create_heap_objects &&
- (FLAG_log_code || FLAG_ll_prof || logger_->is_logging_code_events())) {
+ (FLAG_log_code ||
+ FLAG_ll_prof ||
+ FLAG_perf_jit_prof ||
+ FLAG_perf_basic_prof ||
+ logger_->is_logging_code_events())) {
HandleScope scope(this);
LOG(this, LogCodeObjects());
LOG(this, LogCompiledFunctions());
}
+ // If we are profiling with the Linux perf tool, we need to disable
+ // code relocation.
+ if (FLAG_perf_jit_prof || FLAG_perf_basic_prof) {
+ FLAG_compact_code_space = false;
+ }
+
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
Internals::kIsolateEmbedderDataOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
@@ -2311,6 +2096,7 @@ bool Isolate::Init(Deserializer* des) {
CodeStub::GenerateFPStubs(this);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
StubFailureTrampolineStub::GenerateAheadOfTime(this);
+ StubFailureTailCallTrampolineStub::GenerateAheadOfTime(this);
// TODO(mstarzinger): The following is an ugly hack to make sure the
// interface descriptor is initialized even when stubs have been
// deserialized out of the snapshot without the graph builder.
@@ -2318,29 +2104,14 @@ bool Isolate::Init(Deserializer* des) {
DONT_TRACK_ALLOCATION_SITE, 0);
stub.InitializeInterfaceDescriptor(
this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
+ BinaryOpICStub::InstallDescriptors(this);
CompareNilICStub::InitializeForIsolate(this);
ToBooleanStub::InitializeForIsolate(this);
ArrayConstructorStubBase::InstallDescriptors(this);
InternalArrayConstructorStubBase::InstallDescriptors(this);
FastNewClosureStub::InstallDescriptors(this);
- }
-
- if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Start();
-
- if (FLAG_marking_threads > 0) {
- marking_thread_ = new MarkingThread*[FLAG_marking_threads];
- for (int i = 0; i < FLAG_marking_threads; i++) {
- marking_thread_[i] = new MarkingThread(this);
- marking_thread_[i]->Start();
- }
- }
-
- if (FLAG_sweeper_threads > 0) {
- sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- sweeper_thread_[i] = new SweeperThread(this);
- sweeper_thread_[i]->Start();
- }
+ NumberToStringStub::InstallDescriptors(this);
+ NewStringAddStub::InstallDescriptors(this);
}
initialized_from_snapshot_ = (des != NULL);
@@ -2470,6 +2241,12 @@ HTracer* Isolate::GetHTracer() {
}
+CodeTracer* Isolate::GetCodeTracer() {
+ if (code_tracer() == NULL) set_code_tracer(new CodeTracer(id()));
+ return code_tracer();
+}
+
+
Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
Context* native_context = context()->native_context();
Object* maybe_map_array = native_context->js_array_maps();
diff --git a/chromium/v8/src/isolate.h b/chromium/v8/src/isolate.h
index b826ec596ab..7ba30883c75 100644
--- a/chromium/v8/src/isolate.h
+++ b/chromium/v8/src/isolate.h
@@ -55,9 +55,9 @@ class Bootstrapper;
class CodeGenerator;
class CodeRange;
struct CodeStubInterfaceDescriptor;
+class CodeTracer;
class CompilationCache;
class ContextSlotCache;
-class ContextSwitcher;
class Counters;
class CpuFeatures;
class CpuProfiler;
@@ -75,8 +75,6 @@ class HTracer;
class InlineRuntimeFunctionsTable;
class NoAllocationStringAllocator;
class InnerPointerToCodeCache;
-class MarkingThread;
-class PreallocatedMemoryThread;
class RandomNumberGenerator;
class RegExpStack;
class SaveContext;
@@ -274,10 +272,8 @@ class ThreadLocalTop BASE_EMBEDDED {
Address handler_; // try-blocks are chained through the stack
#ifdef USE_SIMULATOR
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
Simulator* simulator_;
#endif
-#endif // USE_SIMULATOR
Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
// the external callback we're currently in
@@ -303,21 +299,6 @@ class ThreadLocalTop BASE_EMBEDDED {
};
-class SystemThreadManager {
- public:
- enum ParallelSystemComponent {
- PARALLEL_SWEEPING,
- CONCURRENT_SWEEPING,
- PARALLEL_MARKING,
- PARALLEL_RECOMPILATION
- };
-
- static int NumberOfParallelSystemThreads(ParallelSystemComponent type);
-
- static const int kMaxThreads = 4;
-};
-
-
#ifdef ENABLE_DEBUGGER_SUPPORT
#define ISOLATE_DEBUGGER_INIT_LIST(V) \
@@ -348,7 +329,7 @@ class SystemThreadManager {
V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
-typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
+typedef List<HeapObject*> DebugObjectCache;
#define ISOLATE_INIT_LIST(V) \
/* SerializerDeserializer state. */ \
@@ -378,9 +359,10 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
/* AstNode state. */ \
V(int, ast_node_id, 0) \
V(unsigned, ast_node_count, 0) \
- V(bool, observer_delivery_pending, false) \
+ V(bool, microtask_pending, false) \
V(HStatistics*, hstatistics, NULL) \
V(HTracer*, htracer, NULL) \
+ V(CodeTracer*, code_tracer, NULL) \
ISOLATE_DEBUGGER_INIT_LIST(V)
class Isolate {
@@ -497,6 +479,7 @@ class Isolate {
bool IsDefaultIsolate() const { return this == default_isolate_; }
+ static void SetCrashIfDefaultIsolateInitialized();
// Ensures that process-wide resources and the default isolate have been
// allocated. It is only necessary to call this method in rare cases, for
// example if you are using V8 from within the body of a static initializer.
@@ -732,10 +715,8 @@ class Isolate {
}
void PrintCurrentStackTrace(FILE* out);
- void PrintStackTrace(FILE* out, char* thread_data);
void PrintStack(StringStream* accumulator);
void PrintStack(FILE* out);
- void PrintStack();
Handle<String> StackTraceString();
NO_INLINE(void PushStackTraceAndDie(unsigned int magic,
Object* object,
@@ -753,6 +734,19 @@ class Isolate {
// Returns if the top context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
// set.
+
+ // TODO(yangguo): temporary wrappers
+ bool MayNamedAccessWrapper(Handle<JSObject> receiver,
+ Handle<Object> key,
+ v8::AccessType type) {
+ return MayNamedAccess(*receiver, *key, type);
+ }
+ bool MayIndexedAccessWrapper(Handle<JSObject> receiver,
+ uint32_t index,
+ v8::AccessType type) {
+ return MayIndexedAccess(*receiver, index, type);
+ }
+
bool MayNamedAccess(JSObject* receiver,
Object* key,
v8::AccessType type);
@@ -791,9 +785,6 @@ class Isolate {
// result in the target out parameter.
void ComputeLocation(MessageLocation* target);
- // Override command line flag.
- void TraceException(bool flag);
-
// Out of resource exception helpers.
Failure* StackOverflow();
Failure* TerminateExecution();
@@ -924,12 +915,6 @@ class Isolate {
ThreadManager* thread_manager() { return thread_manager_; }
- ContextSwitcher* context_switcher() { return context_switcher_; }
-
- void set_context_switcher(ContextSwitcher* switcher) {
- context_switcher_ = switcher;
- }
-
StringTracker* string_tracker() { return string_tracker_; }
unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
@@ -980,9 +965,7 @@ class Isolate {
return &interp_canonicalize_mapping_;
}
- void* PreallocatedStorageNew(size_t size);
- void PreallocatedStorageDelete(void* p);
- void PreallocatedStorageInit(size_t size);
+ inline bool IsCodePreAgingActive();
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger* debugger() {
@@ -1050,8 +1033,14 @@ class Isolate {
thread_local_top_.current_vm_state_ = state;
}
- void SetData(void* data) { embedder_data_ = data; }
- void* GetData() { return embedder_data_; }
+ void SetData(uint32_t slot, void* data) {
+ ASSERT(slot < Internals::kNumIsolateDataSlots);
+ embedder_data_[slot] = data;
+ }
+ void* GetData(uint32_t slot) {
+ ASSERT(slot < Internals::kNumIsolateDataSlots);
+ return embedder_data_[slot];
+ }
LookupResult* top_lookup_result() {
return thread_local_top_.top_lookup_result_;
@@ -1097,27 +1086,46 @@ class Isolate {
bool IsDeferredHandle(Object** location);
#endif // DEBUG
- OptimizingCompilerThread* optimizing_compiler_thread() {
- return &optimizing_compiler_thread_;
+ void set_max_available_threads(int value) {
+ max_available_threads_ = value;
}
- // PreInits and returns a default isolate. Needed when a new thread tries
- // to create a Locker for the first time (the lock itself is in the isolate).
- // TODO(svenpanne) This method is on death row...
- static v8::Isolate* GetDefaultIsolateForLocking();
+ bool concurrent_recompilation_enabled() {
+ // Thread is only available with flag enabled.
+ ASSERT(optimizing_compiler_thread_ == NULL ||
+ FLAG_concurrent_recompilation);
+ return optimizing_compiler_thread_ != NULL;
+ }
+
+ bool concurrent_osr_enabled() const {
+ // Thread is only available with flag enabled.
+ ASSERT(optimizing_compiler_thread_ == NULL ||
+ FLAG_concurrent_recompilation);
+ return optimizing_compiler_thread_ != NULL && FLAG_concurrent_osr;
+ }
- MarkingThread** marking_threads() {
- return marking_thread_;
+ OptimizingCompilerThread* optimizing_compiler_thread() {
+ return optimizing_compiler_thread_;
+ }
+
+ int num_sweeper_threads() const {
+ return num_sweeper_threads_;
}
SweeperThread** sweeper_threads() {
return sweeper_thread_;
}
+ // PreInits and returns a default isolate. Needed when a new thread tries
+ // to create a Locker for the first time (the lock itself is in the isolate).
+ // TODO(svenpanne) This method is on death row...
+ static v8::Isolate* GetDefaultIsolateForLocking();
+
int id() const { return static_cast<int>(id_); }
HStatistics* GetHStatistics();
HTracer* GetHTracer();
+ CodeTracer* GetCodeTracer();
FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
@@ -1131,13 +1139,6 @@ class Isolate {
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
- bool is_memory_constrained() const {
- return is_memory_constrained_;
- }
- void set_is_memory_constrained(bool value) {
- is_memory_constrained_ = value;
- }
-
private:
Isolate();
@@ -1152,9 +1153,9 @@ class Isolate {
// These fields are accessed through the API, offsets must be kept in sync
// with v8::internal::Internals (in include/v8.h) constants. This is also
// verified in Isolate::Init() using runtime checks.
- State state_; // Will be padded to kApiPointerSize.
- void* embedder_data_;
+ void* embedder_data_[Internals::kNumIsolateDataSlots];
Heap heap_;
+ State state_; // Will be padded to kApiPointerSize.
// The per-process lock should be acquired before the ThreadDataTable is
// modified.
@@ -1230,11 +1231,8 @@ class Isolate {
// at the same time, this should be prevented using external locking.
void Exit();
- void PreallocatedMemoryThreadStart();
- void PreallocatedMemoryThreadStop();
void InitializeThreadLocal();
- void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
void MarkCompactPrologue(bool is_compacting,
ThreadLocalTop* archived_thread_data);
void MarkCompactEpilogue(bool is_compacting,
@@ -1254,10 +1252,7 @@ class Isolate {
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
StringStream* incomplete_message_;
- // The preallocated memory thread singleton.
- PreallocatedMemoryThread* preallocated_memory_thread_;
Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
- NoAllocationStringAllocator* preallocated_message_space_;
Bootstrapper* bootstrapper_;
RuntimeProfiler* runtime_profiler_;
CompilationCache* compilation_cache_;
@@ -1284,14 +1279,10 @@ class Isolate {
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
Zone runtime_zone_;
- PreallocatedStorage in_use_list_;
- PreallocatedStorage free_list_;
- bool preallocated_storage_preallocated_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
ConsStringIteratorOp* write_iterator_;
GlobalHandles* global_handles_;
EternalHandles* eternal_handles_;
- ContextSwitcher* context_switcher_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
bool fp_stubs_generated_;
@@ -1310,7 +1301,6 @@ class Isolate {
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
RandomNumberGenerator* random_number_generator_;
- bool is_memory_constrained_;
// True if fatal error has been signaled for this isolate.
bool has_fatal_error_;
@@ -1368,9 +1358,13 @@ class Isolate {
#endif
DeferredHandles* deferred_handles_head_;
- OptimizingCompilerThread optimizing_compiler_thread_;
- MarkingThread** marking_thread_;
+ OptimizingCompilerThread* optimizing_compiler_thread_;
SweeperThread** sweeper_thread_;
+ int num_sweeper_threads_;
+
+ // TODO(yangguo): This will become obsolete once ResourceConstraints
+ // becomes an argument to Isolate constructor.
+ int max_available_threads_;
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
@@ -1378,7 +1372,6 @@ class Isolate {
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
- friend class MarkingThread;
friend class OptimizingCompilerThread;
friend class SweeperThread;
friend class ThreadManager;
@@ -1426,9 +1419,9 @@ class SaveContext BASE_EMBEDDED {
class AssertNoContextChange BASE_EMBEDDED {
#ifdef DEBUG
public:
- AssertNoContextChange()
- : isolate_(Isolate::Current()),
- context_(isolate_->context()) { }
+ explicit AssertNoContextChange(Isolate* isolate)
+ : isolate_(isolate),
+ context_(isolate->context(), isolate) { }
~AssertNoContextChange() {
ASSERT(isolate_->context() == *context_);
}
@@ -1438,32 +1431,7 @@ class AssertNoContextChange BASE_EMBEDDED {
Handle<Context> context_;
#else
public:
- AssertNoContextChange() { }
-#endif
-};
-
-
-// TODO(mstarzinger): Depracate as soon as everything is handlified.
-class AssertNoContextChangeWithHandleScope BASE_EMBEDDED {
-#ifdef DEBUG
- public:
- AssertNoContextChangeWithHandleScope() :
- isolate_(Isolate::Current()),
- scope_(isolate_),
- context_(isolate_->context(), isolate_) {
- }
-
- ~AssertNoContextChangeWithHandleScope() {
- ASSERT(isolate_->context() == *context_);
- }
-
- private:
- Isolate* isolate_;
- HandleScope scope_;
- Handle<Context> context_;
-#else
- public:
- AssertNoContextChangeWithHandleScope() { }
+ explicit AssertNoContextChange(Isolate* isolate) { }
#endif
};
@@ -1534,6 +1502,73 @@ inline void Context::mark_out_of_memory() {
native_context()->set_out_of_memory(GetIsolate()->heap()->true_value());
}
+class CodeTracer V8_FINAL : public Malloced {
+ public:
+ explicit CodeTracer(int isolate_id)
+ : file_(NULL),
+ scope_depth_(0) {
+ if (!ShouldRedirect()) {
+ file_ = stdout;
+ return;
+ }
+
+ if (FLAG_redirect_code_traces_to == NULL) {
+ OS::SNPrintF(filename_,
+ "code-%d-%d.asm",
+ OS::GetCurrentProcessId(),
+ isolate_id);
+ } else {
+ OS::StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
+ }
+
+ WriteChars(filename_.start(), "", 0, false);
+ }
+
+ class Scope {
+ public:
+ explicit Scope(CodeTracer* tracer) : tracer_(tracer) { tracer->OpenFile(); }
+ ~Scope() { tracer_->CloseFile(); }
+
+ FILE* file() const { return tracer_->file(); }
+
+ private:
+ CodeTracer* tracer_;
+ };
+
+ void OpenFile() {
+ if (!ShouldRedirect()) {
+ return;
+ }
+
+ if (file_ == NULL) {
+ file_ = OS::FOpen(filename_.start(), "a");
+ }
+
+ scope_depth_++;
+ }
+
+ void CloseFile() {
+ if (!ShouldRedirect()) {
+ return;
+ }
+
+ if (--scope_depth_ == 0) {
+ fclose(file_);
+ file_ = NULL;
+ }
+ }
+
+ FILE* file() const { return file_; }
+
+ private:
+ static bool ShouldRedirect() {
+ return FLAG_redirect_code_traces;
+ }
+
+ EmbeddedVector<char, 128> filename_;
+ FILE* file_;
+ int scope_depth_;
+};
} } // namespace v8::internal
diff --git a/chromium/v8/src/json-stringifier.h b/chromium/v8/src/json-stringifier.h
index 0d17b356abb..4510c4b45b6 100644
--- a/chromium/v8/src/json-stringifier.h
+++ b/chromium/v8/src/json-stringifier.h
@@ -360,6 +360,7 @@ Handle<Object> BasicJsonStringifier::ApplyToJsonFunction(
PropertyAttributes attr;
Handle<Object> fun =
Object::GetProperty(object, object, &lookup, tojson_string_, &attr);
+ if (fun.is_null()) return Handle<Object>::null();
if (!fun->IsJSFunction()) return object;
// Call toJSON function.
diff --git a/chromium/v8/src/json.js b/chromium/v8/src/json.js
index b0e14e1965d..c21e6351d45 100644
--- a/chromium/v8/src/json.js
+++ b/chromium/v8/src/json.js
@@ -181,7 +181,7 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
}
}
// Undefined or a callable object.
- return void 0;
+ return UNDEFINED;
}
@@ -236,5 +236,5 @@ function JSONSerializeAdapter(key, object) {
var holder = {};
holder[key] = object;
// No need to pass the actual holder since there is no replacer function.
- return JSONSerialize(key, holder, void 0, new InternalArray(), "", "");
+ return JSONSerialize(key, holder, UNDEFINED, new InternalArray(), "", "");
}
diff --git a/chromium/v8/src/jsregexp.cc b/chromium/v8/src/jsregexp.cc
index 3a3d91599c3..1f3f2a172ab 100644
--- a/chromium/v8/src/jsregexp.cc
+++ b/chromium/v8/src/jsregexp.cc
@@ -1150,7 +1150,9 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
work_list_ = NULL;
#ifdef DEBUG
if (FLAG_print_code) {
- Handle<Code>::cast(code)->Disassemble(*pattern->ToCString());
+ CodeTracer::Scope trace_scope(heap->isolate()->GetCodeTracer());
+ Handle<Code>::cast(code)->Disassemble(*pattern->ToCString(),
+ trace_scope.file());
}
if (FLAG_trace_regexp_assembler) {
delete macro_assembler_;
diff --git a/chromium/v8/src/list.h b/chromium/v8/src/list.h
index 0e4e35bb41b..ea67b8b0c6c 100644
--- a/chromium/v8/src/list.h
+++ b/chromium/v8/src/list.h
@@ -84,7 +84,7 @@ class List {
// backing store (e.g. Add).
inline T& operator[](int i) const {
ASSERT(0 <= i);
- ASSERT(i < length_);
+ SLOW_ASSERT(i < length_);
return data_[i];
}
inline T& at(int i) const { return operator[](i); }
@@ -197,11 +197,13 @@ class List {
};
class Map;
+class Type;
class Code;
template<typename T> class Handle;
typedef List<Map*> MapList;
typedef List<Code*> CodeList;
typedef List<Handle<Map> > MapHandleList;
+typedef List<Handle<Type> > TypeHandleList;
typedef List<Handle<Code> > CodeHandleList;
// Perform binary search for an element in an already sorted
diff --git a/chromium/v8/src/lithium-allocator-inl.h b/chromium/v8/src/lithium-allocator-inl.h
index 8cca19b2efa..deee98877d6 100644
--- a/chromium/v8/src/lithium-allocator-inl.h
+++ b/chromium/v8/src/lithium-allocator-inl.h
@@ -145,16 +145,14 @@ void UseIterator::Advance() {
}
-void LAllocator::SetLiveRangeAssignedRegister(
- LiveRange* range,
- int reg,
- RegisterKind register_kind) {
- if (register_kind == DOUBLE_REGISTERS) {
+void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) {
+ if (range->Kind() == DOUBLE_REGISTERS) {
assigned_double_registers_->Add(reg);
} else {
+ ASSERT(range->Kind() == GENERAL_REGISTERS);
assigned_registers_->Add(reg);
}
- range->set_assigned_register(reg, register_kind, chunk()->zone());
+ range->set_assigned_register(reg, chunk()->zone());
}
diff --git a/chromium/v8/src/lithium-allocator.cc b/chromium/v8/src/lithium-allocator.cc
index 3c5abd19846..29c31942e44 100644
--- a/chromium/v8/src/lithium-allocator.cc
+++ b/chromium/v8/src/lithium-allocator.cc
@@ -131,7 +131,7 @@ bool LiveRange::HasOverlap(UseInterval* target) const {
LiveRange::LiveRange(int id, Zone* zone)
: id_(id),
spilled_(false),
- is_double_(false),
+ kind_(UNALLOCATED_REGISTERS),
assigned_register_(kInvalidAssignment),
last_interval_(NULL),
first_interval_(NULL),
@@ -145,12 +145,9 @@ LiveRange::LiveRange(int id, Zone* zone)
spill_start_index_(kMaxInt) { }
-void LiveRange::set_assigned_register(int reg,
- RegisterKind register_kind,
- Zone* zone) {
+void LiveRange::set_assigned_register(int reg, Zone* zone) {
ASSERT(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
- is_double_ = (register_kind == DOUBLE_REGISTERS);
ConvertOperands(zone);
}
@@ -234,10 +231,15 @@ LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
LOperand* op = NULL;
if (HasRegisterAssigned()) {
ASSERT(!IsSpilled());
- if (IsDouble()) {
- op = LDoubleRegister::Create(assigned_register(), zone);
- } else {
- op = LRegister::Create(assigned_register(), zone);
+ switch (Kind()) {
+ case GENERAL_REGISTERS:
+ op = LRegister::Create(assigned_register(), zone);
+ break;
+ case DOUBLE_REGISTERS:
+ op = LDoubleRegister::Create(assigned_register(), zone);
+ break;
+ default:
+ UNREACHABLE();
}
} else if (IsSpilled()) {
ASSERT(!HasRegisterAssigned());
@@ -352,6 +354,7 @@ void LiveRange::SplitAt(LifetimePosition position,
// Link the new live range in the chain before any of the other
// ranges linked from the range before the split.
result->parent_ = (parent_ == NULL) ? this : parent_;
+ result->kind_ = result->parent_->kind_;
result->next_ = next_;
next_ = result;
@@ -553,7 +556,7 @@ LAllocator::LAllocator(int num_values, HGraph* graph)
reusable_slots_(8, zone()),
next_virtual_register_(num_values),
first_artificial_register_(num_values),
- mode_(GENERAL_REGISTERS),
+ mode_(UNALLOCATED_REGISTERS),
num_registers_(-1),
graph_(graph),
has_osr_entry_(false),
@@ -653,7 +656,8 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
if (result == NULL) {
result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
ASSERT(result->IsFixed());
- SetLiveRangeAssignedRegister(result, index, GENERAL_REGISTERS);
+ result->kind_ = GENERAL_REGISTERS;
+ SetLiveRangeAssignedRegister(result, index);
fixed_live_ranges_[index] = result;
}
return result;
@@ -667,7 +671,8 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
chunk()->zone());
ASSERT(result->IsFixed());
- SetLiveRangeAssignedRegister(result, index, DOUBLE_REGISTERS);
+ result->kind_ = DOUBLE_REGISTERS;
+ SetLiveRangeAssignedRegister(result, index);
fixed_double_live_ranges_[index] = result;
}
return result;
@@ -1375,6 +1380,12 @@ void LAllocator::BuildLiveRanges() {
}
#endif
}
+
+ for (int i = 0; i < live_ranges_.length(); ++i) {
+ if (live_ranges_[i] != NULL) {
+ live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
+ }
+ }
}
@@ -1481,6 +1492,7 @@ void LAllocator::PopulatePointerMaps() {
void LAllocator::AllocateGeneralRegisters() {
LAllocatorPhase phase("L_Allocate general registers", this);
num_registers_ = Register::NumAllocatableRegisters();
+ mode_ = GENERAL_REGISTERS;
AllocateRegisters();
}
@@ -1498,7 +1510,7 @@ void LAllocator::AllocateRegisters() {
for (int i = 0; i < live_ranges_.length(); ++i) {
if (live_ranges_[i] != NULL) {
- if (RequiredRegisterKind(live_ranges_[i]->id()) == mode_) {
+ if (live_ranges_[i]->Kind() == mode_) {
AddToUnhandledUnsorted(live_ranges_[i]);
}
}
@@ -1518,6 +1530,7 @@ void LAllocator::AllocateRegisters() {
}
}
} else {
+ ASSERT(mode_ == GENERAL_REGISTERS);
for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
LiveRange* current = fixed_live_ranges_.at(i);
if (current != NULL) {
@@ -1812,7 +1825,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
TraceAlloc("Assigning preferred reg %s to live range %d\n",
RegisterName(register_index),
current->id());
- SetLiveRangeAssignedRegister(current, register_index, mode_);
+ SetLiveRangeAssignedRegister(current, register_index);
return true;
}
}
@@ -1847,7 +1860,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
TraceAlloc("Assigning free reg %s to live range %d\n",
RegisterName(reg),
current->id());
- SetLiveRangeAssignedRegister(current, reg, mode_);
+ SetLiveRangeAssignedRegister(current, reg);
return true;
}
@@ -1932,7 +1945,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
TraceAlloc("Assigning blocked reg %s to live range %d\n",
RegisterName(reg),
current->id());
- SetLiveRangeAssignedRegister(current, reg, mode_);
+ SetLiveRangeAssignedRegister(current, reg);
// This register was not free. Thus we need to find and spill
// parts of active and inactive live regions that use the same register
@@ -2149,7 +2162,7 @@ void LAllocator::Spill(LiveRange* range) {
if (!first->HasAllocatedSpillOperand()) {
LOperand* op = TryReuseSpillSlot(range);
- if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
+ if (op == NULL) op = chunk_->GetNextSpillSlot(range->Kind());
first->SetSpillOperand(op);
}
range->MakeSpilled(chunk()->zone());
diff --git a/chromium/v8/src/lithium-allocator.h b/chromium/v8/src/lithium-allocator.h
index e5edd3cf039..9908ea823d3 100644
--- a/chromium/v8/src/lithium-allocator.h
+++ b/chromium/v8/src/lithium-allocator.h
@@ -146,6 +146,7 @@ class LifetimePosition {
enum RegisterKind {
+ UNALLOCATED_REGISTERS,
GENERAL_REGISTERS,
DOUBLE_REGISTERS
};
@@ -290,9 +291,7 @@ class LiveRange: public ZoneObject {
LOperand* CreateAssignedOperand(Zone* zone);
int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; }
- void set_assigned_register(int reg,
- RegisterKind register_kind,
- Zone* zone);
+ void set_assigned_register(int reg, Zone* zone);
void MakeSpilled(Zone* zone);
// Returns use position in this live range that follows both start
@@ -323,7 +322,7 @@ class LiveRange: public ZoneObject {
// live range to the result live range.
void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
- bool IsDouble() const { return is_double_; }
+ RegisterKind Kind() const { return kind_; }
bool HasRegisterAssigned() const {
return assigned_register_ != kInvalidAssignment;
}
@@ -392,7 +391,7 @@ class LiveRange: public ZoneObject {
int id_;
bool spilled_;
- bool is_double_;
+ RegisterKind kind_;
int assigned_register_;
UseInterval* last_interval_;
UseInterval* first_interval_;
@@ -406,6 +405,8 @@ class LiveRange: public ZoneObject {
LOperand* current_hint_operand_;
LOperand* spill_operand_;
int spill_start_index_;
+
+ friend class LAllocator; // Assigns to kind_.
};
@@ -568,9 +569,7 @@ class LAllocator BASE_EMBEDDED {
HBasicBlock* block,
HBasicBlock* pred);
- inline void SetLiveRangeAssignedRegister(LiveRange* range,
- int reg,
- RegisterKind register_kind);
+ inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
// Return parallel move that should be used to connect ranges split at the
// given position.
diff --git a/chromium/v8/src/lithium-codegen.cc b/chromium/v8/src/lithium-codegen.cc
new file mode 100644
index 00000000000..19ebe7e516b
--- /dev/null
+++ b/chromium/v8/src/lithium-codegen.cc
@@ -0,0 +1,150 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "lithium-codegen.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-ia32.h"
+#include "ia32/lithium-codegen-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-x64.h"
+#include "x64/lithium-codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-arm.h"
+#include "arm/lithium-codegen-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-mips.h"
+#include "mips/lithium-codegen-mips.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+HGraph* LCodeGenBase::graph() const {
+ return chunk()->graph();
+}
+
+
+LCodeGenBase::LCodeGenBase(LChunk* chunk,
+ MacroAssembler* assembler,
+ CompilationInfo* info)
+ : chunk_(static_cast<LPlatformChunk*>(chunk)),
+ masm_(assembler),
+ info_(info),
+ zone_(info->zone()),
+ status_(UNUSED),
+ current_block_(-1),
+ current_instruction_(-1),
+ instructions_(chunk->instructions()),
+ last_lazy_deopt_pc_(0) {
+}
+
+
+bool LCodeGenBase::GenerateBody() {
+ ASSERT(is_generating());
+ bool emit_instructions = true;
+ LCodeGen* codegen = static_cast<LCodeGen*>(this);
+ for (current_instruction_ = 0;
+ !is_aborted() && current_instruction_ < instructions_->length();
+ current_instruction_++) {
+ LInstruction* instr = instructions_->at(current_instruction_);
+
+ // Don't emit code for basic blocks with a replacement.
+ if (instr->IsLabel()) {
+ emit_instructions = !LLabel::cast(instr)->HasReplacement() &&
+ (!FLAG_unreachable_code_elimination ||
+ instr->hydrogen_value()->block()->IsReachable());
+ if (FLAG_code_comments && !emit_instructions) {
+ Comment(
+ ";;; <@%d,#%d> -------------------- B%d (unreachable/replaced) "
+ "--------------------",
+ current_instruction_,
+ instr->hydrogen_value()->id(),
+ instr->hydrogen_value()->block()->block_id());
+ }
+ }
+ if (!emit_instructions) continue;
+
+ if (FLAG_code_comments && instr->HasInterestingComment(codegen)) {
+ Comment(";;; <@%d,#%d> %s",
+ current_instruction_,
+ instr->hydrogen_value()->id(),
+ instr->Mnemonic());
+ }
+
+ GenerateBodyInstructionPre(instr);
+
+ HValue* value = instr->hydrogen_value();
+ if (value->position() != RelocInfo::kNoPosition) {
+ ASSERT(!graph()->info()->IsOptimizing() ||
+ !FLAG_emit_opt_code_positions ||
+ value->position() != RelocInfo::kNoPosition);
+ RecordAndWritePosition(value->position());
+ }
+
+ instr->CompileToNative(codegen);
+
+ GenerateBodyInstructionPost(instr);
+ }
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
+ return !is_aborted();
+}
+
+
+void LCodeGenBase::Comment(const char* format, ...) {
+ if (!FLAG_code_comments) return;
+ char buffer[4 * KB];
+ StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+ va_list arguments;
+ va_start(arguments, format);
+ builder.AddFormattedList(format, arguments);
+ va_end(arguments);
+
+ // Copy the string before recording it in the assembler to avoid
+ // issues when the stack allocated buffer goes out of scope.
+ size_t length = builder.position();
+ Vector<char> copy = Vector<char>::New(static_cast<int>(length) + 1);
+ OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
+ masm()->RecordComment(copy.start());
+}
+
+
+int LCodeGenBase::GetNextEmittedBlock() const {
+ for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
+ if (!chunk_->GetLabel(i)->HasReplacement()) return i;
+ }
+ return -1;
+}
+
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/lithium-codegen.h b/chromium/v8/src/lithium-codegen.h
new file mode 100644
index 00000000000..9caab8127db
--- /dev/null
+++ b/chromium/v8/src/lithium-codegen.h
@@ -0,0 +1,96 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LITHIUM_CODEGEN_H_
+#define V8_LITHIUM_CODEGEN_H_
+
+#include "v8.h"
+
+#include "compiler.h"
+
+namespace v8 {
+namespace internal {
+
+class LInstruction;
+class LPlatformChunk;
+
+class LCodeGenBase BASE_EMBEDDED {
+ public:
+ LCodeGenBase(LChunk* chunk,
+ MacroAssembler* assembler,
+ CompilationInfo* info);
+ virtual ~LCodeGenBase() {}
+
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info_->isolate(); }
+ Factory* factory() const { return isolate()->factory(); }
+ Heap* heap() const { return isolate()->heap(); }
+ Zone* zone() const { return zone_; }
+ LPlatformChunk* chunk() const { return chunk_; }
+ HGraph* graph() const;
+
+ void FPRINTF_CHECKING Comment(const char* format, ...);
+
+ bool GenerateBody();
+ virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
+ virtual void GenerateBodyInstructionPost(LInstruction* instr) {}
+
+ virtual void EnsureSpaceForLazyDeopt(int space_needed) = 0;
+ virtual void RecordAndWritePosition(int position) = 0;
+
+ int GetNextEmittedBlock() const;
+
+ protected:
+ enum Status {
+ UNUSED,
+ GENERATING,
+ DONE,
+ ABORTED
+ };
+
+ LPlatformChunk* const chunk_;
+ MacroAssembler* const masm_;
+ CompilationInfo* const info_;
+ Zone* zone_;
+ Status status_;
+ int current_block_;
+ int current_instruction_;
+ const ZoneList<LInstruction*>* instructions_;
+ int last_lazy_deopt_pc_;
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_generating() const { return status_ == GENERATING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_LITHIUM_CODEGEN_H_
diff --git a/chromium/v8/src/lithium.cc b/chromium/v8/src/lithium.cc
index 95310bffd3f..414d5f4edeb 100644
--- a/chromium/v8/src/lithium.cc
+++ b/chromium/v8/src/lithium.cc
@@ -229,7 +229,7 @@ void LPointerMap::PrintTo(StringStream* stream) {
if (i != 0) stream->Add(";");
pointer_operands_[i]->PrintTo(stream);
}
- stream->Add("} @%d", position());
+ stream->Add("}");
}
@@ -237,7 +237,8 @@ int StackSlotOffset(int index) {
if (index >= 0) {
// Local or spill slot. Skip the frame pointer, function, and
// context in the fixed part of the frame.
- return -(index + 3) * kPointerSize;
+ return -(index + 1) * kPointerSize -
+ StandardFrameConstants::kFixedFrameSizeFromFp;
} else {
// Incoming parameter. Skip the return address.
return -(index + 1) * kPointerSize + kFPOnStackSize + kPCOnStackSize;
@@ -342,7 +343,8 @@ int LChunk::GetParameterStackSlot(int index) const {
// shift all parameter indexes down by the number of parameters, and
// make sure they end up negative so they are distinguishable from
// spill slots.
- int result = index - info()->scope()->num_parameters() - 1;
+ int result = index - info()->num_parameters() - 1;
+
ASSERT(result < 0);
return result;
}
@@ -461,6 +463,14 @@ void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
}
+LInstruction* LChunkBuilder::CheckElideControlInstruction(
+ HControlInstruction* instr) {
+ HBasicBlock* successor;
+ if (!instr->KnownSuccessorBlock(&successor)) return NULL;
+ return new(zone()) LGoto(successor);
+}
+
+
LPhase::~LPhase() {
if (ShouldProduceTraceOutput()) {
isolate()->GetHTracer()->TraceLithium(name(), chunk_);
diff --git a/chromium/v8/src/lithium.h b/chromium/v8/src/lithium.h
index d6aa205640f..d4395f2d7ee 100644
--- a/chromium/v8/src/lithium.h
+++ b/chromium/v8/src/lithium.h
@@ -476,10 +476,9 @@ class LParallelMove V8_FINAL : public ZoneObject {
class LPointerMap V8_FINAL : public ZoneObject {
public:
- explicit LPointerMap(int position, Zone* zone)
+ explicit LPointerMap(Zone* zone)
: pointer_operands_(8, zone),
untagged_operands_(0, zone),
- position_(position),
lithium_position_(-1) { }
const ZoneList<LOperand*>* GetNormalizedOperands() {
@@ -489,7 +488,6 @@ class LPointerMap V8_FINAL : public ZoneObject {
untagged_operands_.Clear();
return &pointer_operands_;
}
- int position() const { return position_; }
int lithium_position() const { return lithium_position_; }
void set_lithium_position(int pos) {
@@ -505,7 +503,6 @@ class LPointerMap V8_FINAL : public ZoneObject {
private:
ZoneList<LOperand*> pointer_operands_;
ZoneList<LOperand*> untagged_operands_;
- int position_;
int lithium_position_;
};
diff --git a/chromium/v8/src/liveedit-debugger.js b/chromium/v8/src/liveedit-debugger.js
index 451b146bde7..4618eda3666 100644
--- a/chromium/v8/src/liveedit-debugger.js
+++ b/chromium/v8/src/liveedit-debugger.js
@@ -186,7 +186,7 @@ Debug.LiveEdit = new function() {
// to old version.
if (link_to_old_script_list.length == 0) {
%LiveEditReplaceScript(script, new_source, null);
- old_script = void 0;
+ old_script = UNDEFINED;
} else {
var old_script_name = CreateNameForOldScript(script);
@@ -221,7 +221,7 @@ Debug.LiveEdit = new function() {
change_log.push( {position_patched: position_patch_report} );
for (var i = 0; i < update_positions_list.length; i++) {
- // TODO(LiveEdit): take into account wether it's source_changed or
+ // TODO(LiveEdit): take into account whether it's source_changed or
// unchanged and whether positions changed at all.
PatchPositions(update_positions_list[i], diff_array,
position_patch_report);
@@ -266,7 +266,7 @@ Debug.LiveEdit = new function() {
// LiveEdit itself believe that any function in heap that points to a
// particular script is a regular function.
// For some functions we will restore this link later.
- %LiveEditFunctionSetScript(info.shared_function_info, void 0);
+ %LiveEditFunctionSetScript(info.shared_function_info, UNDEFINED);
compile_info.push(info);
old_index_map.push(i);
}
@@ -288,7 +288,7 @@ Debug.LiveEdit = new function() {
}
}
- // After sorting update outer_inder field using old_index_map. Also
+ // After sorting update outer_index field using old_index_map. Also
// set next_sibling_index field.
var current_index = 0;
@@ -542,16 +542,16 @@ Debug.LiveEdit = new function() {
this.children = children;
// an index in array of compile_info
this.array_index = array_index;
- this.parent = void 0;
+ this.parent = UNDEFINED;
this.status = FunctionStatus.UNCHANGED;
// Status explanation is used for debugging purposes and will be shown
// in user UI if some explanations are needed.
- this.status_explanation = void 0;
- this.new_start_pos = void 0;
- this.new_end_pos = void 0;
- this.corresponding_node = void 0;
- this.unmatched_new_nodes = void 0;
+ this.status_explanation = UNDEFINED;
+ this.new_start_pos = UNDEFINED;
+ this.new_end_pos = UNDEFINED;
+ this.corresponding_node = UNDEFINED;
+ this.unmatched_new_nodes = UNDEFINED;
// 'Textual' correspondence/matching is weaker than 'pure'
// correspondence/matching. We need 'textual' level for visual presentation
@@ -559,10 +559,10 @@ Debug.LiveEdit = new function() {
// Sometimes only function body is changed (functions in old and new script
// textually correspond), but we cannot patch the code, so we see them
// as an old function deleted and new function created.
- this.textual_corresponding_node = void 0;
- this.textually_unmatched_new_nodes = void 0;
+ this.textual_corresponding_node = UNDEFINED;
+ this.textually_unmatched_new_nodes = UNDEFINED;
- this.live_shared_function_infos = void 0;
+ this.live_shared_function_infos = UNDEFINED;
}
// From array of function infos that is implicitly a tree creates
@@ -692,10 +692,10 @@ Debug.LiveEdit = new function() {
ProcessInternals(code_info_tree);
}
- // For ecah old function (if it is not damaged) tries to find a corresponding
+ // For each old function (if it is not damaged) tries to find a corresponding
// function in new script. Typically it should succeed (non-damaged functions
// by definition may only have changes inside their bodies). However there are
- // reasons for corresponence not to be found; function with unmodified text
+ // reasons for correspondence not to be found; function with unmodified text
// in new script may become enclosed into other function; the innocent change
// inside function body may in fact be something like "} function B() {" that
// splits a function into 2 functions.
@@ -703,7 +703,13 @@ Debug.LiveEdit = new function() {
// A recursive function that tries to find a correspondence for all
// child functions and for their inner functions.
- function ProcessChildren(old_node, new_node) {
+ function ProcessNode(old_node, new_node) {
+ var scope_change_description =
+ IsFunctionContextLocalsChanged(old_node.info, new_node.info);
+ if (scope_change_description) {
+ old_node.status = FunctionStatus.CHANGED;
+ }
+
var old_children = old_node.children;
var new_children = new_node.children;
@@ -729,13 +735,20 @@ Debug.LiveEdit = new function() {
new_children[new_index];
old_children[old_index].textual_corresponding_node =
new_children[new_index];
- if (old_children[old_index].status != FunctionStatus.UNCHANGED) {
- ProcessChildren(old_children[old_index],
+ if (scope_change_description) {
+ old_children[old_index].status = FunctionStatus.DAMAGED;
+ old_children[old_index].status_explanation =
+ "Enclosing function is now incompatible. " +
+ scope_change_description;
+ old_children[old_index].corresponding_node = UNDEFINED;
+ } else if (old_children[old_index].status !=
+ FunctionStatus.UNCHANGED) {
+ ProcessNode(old_children[old_index],
new_children[new_index]);
if (old_children[old_index].status == FunctionStatus.DAMAGED) {
unmatched_new_nodes_list.push(
old_children[old_index].corresponding_node);
- old_children[old_index].corresponding_node = void 0;
+ old_children[old_index].corresponding_node = UNDEFINED;
old_node.status = FunctionStatus.CHANGED;
}
}
@@ -772,11 +785,10 @@ Debug.LiveEdit = new function() {
}
if (old_node.status == FunctionStatus.CHANGED) {
- var why_wrong_expectations =
- WhyFunctionExpectationsDiffer(old_node.info, new_node.info);
- if (why_wrong_expectations) {
+ if (old_node.info.param_num != new_node.info.param_num) {
old_node.status = FunctionStatus.DAMAGED;
- old_node.status_explanation = why_wrong_expectations;
+ old_node.status_explanation = "Changed parameter number: " +
+ old_node.info.param_num + " and " + new_node.info.param_num;
}
}
old_node.unmatched_new_nodes = unmatched_new_nodes_list;
@@ -784,7 +796,7 @@ Debug.LiveEdit = new function() {
textually_unmatched_new_nodes_list;
}
- ProcessChildren(old_code_tree, new_code_tree);
+ ProcessNode(old_code_tree, new_code_tree);
old_code_tree.corresponding_node = new_code_tree;
old_code_tree.textual_corresponding_node = new_code_tree;
@@ -856,7 +868,7 @@ Debug.LiveEdit = new function() {
this.raw_array = raw_array;
}
- // Changes positions (including all statments) in function.
+ // Changes positions (including all statements) in function.
function PatchPositions(old_info_node, diff_array, report_array) {
if (old_info_node.live_shared_function_infos) {
old_info_node.live_shared_function_infos.forEach(function (info) {
@@ -878,15 +890,9 @@ Debug.LiveEdit = new function() {
return script.name + " (old)";
}
- // Compares a function interface old and new version, whether it
+ // Compares a function scope heap structure, old and new version, whether it
// changed or not. Returns explanation if they differ.
- function WhyFunctionExpectationsDiffer(function_info1, function_info2) {
- // Check that function has the same number of parameters (there may exist
- // an adapter, that won't survive function parameter number change).
- if (function_info1.param_num != function_info2.param_num) {
- return "Changed parameter number: " + function_info1.param_num +
- " and " + function_info2.param_num;
- }
+ function IsFunctionContextLocalsChanged(function_info1, function_info2) {
var scope_info1 = function_info1.scope_info;
var scope_info2 = function_info2.scope_info;
@@ -905,8 +911,8 @@ Debug.LiveEdit = new function() {
}
if (scope_info1_text != scope_info2_text) {
- return "Incompatible variable maps: [" + scope_info1_text +
- "] and [" + scope_info2_text + "]";
+ return "Variable map changed: [" + scope_info1_text +
+ "] => [" + scope_info2_text + "]";
}
// No differences. Return undefined.
return;
diff --git a/chromium/v8/src/liveedit.cc b/chromium/v8/src/liveedit.cc
index feaafd471e1..3d459d4ffb7 100644
--- a/chromium/v8/src/liveedit.cc
+++ b/chromium/v8/src/liveedit.cc
@@ -731,8 +731,8 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info);
this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
}
- void SetOuterScopeInfo(Handle<Object> scope_info_array) {
- this->SetField(kOuterScopeInfoOffset_, scope_info_array);
+ void SetFunctionScopeInfo(Handle<Object> scope_info_array) {
+ this->SetField(kFunctionScopeInfoOffset_, scope_info_array);
}
void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
Handle<JSValue> info_holder = WrapInJSValue(info);
@@ -771,7 +771,7 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
static const int kParamNumOffset_ = 3;
static const int kCodeOffset_ = 4;
static const int kCodeScopeInfoOffset_ = 5;
- static const int kOuterScopeInfoOffset_ = 6;
+ static const int kFunctionScopeInfoOffset_ = 6;
static const int kParentIndexOffset_ = 7;
static const int kSharedFunctionInfoOffset_ = 8;
static const int kLiteralNumOffset_ = 9;
@@ -880,7 +880,7 @@ class FunctionInfoListener {
Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone),
isolate());
- info.SetOuterScopeInfo(scope_info_list);
+ info.SetFunctionScopeInfo(scope_info_list);
}
Handle<JSArray> GetResult() { return result_; }
@@ -897,14 +897,12 @@ class FunctionInfoListener {
// Saves some description of scope. It stores name and indexes of
// variables in the whole scope chain. Null-named slots delimit
// scopes of this chain.
- Scope* outer_scope = scope->outer_scope();
- if (outer_scope == NULL) {
- return isolate()->heap()->undefined_value();
- }
- do {
- ZoneList<Variable*> stack_list(outer_scope->StackLocalCount(), zone);
- ZoneList<Variable*> context_list(outer_scope->ContextLocalCount(), zone);
- outer_scope->CollectStackAndContextLocals(&stack_list, &context_list);
+ Scope* current_scope = scope;
+ while (current_scope != NULL) {
+ ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone);
+ ZoneList<Variable*> context_list(
+ current_scope->ContextLocalCount(), zone);
+ current_scope->CollectStackAndContextLocals(&stack_list, &context_list);
context_list.Sort(&Variable::CompareIndex);
for (int i = 0; i < context_list.length(); i++) {
@@ -924,8 +922,8 @@ class FunctionInfoListener {
isolate()));
scope_info_length++;
- outer_scope = outer_scope->outer_scope();
- } while (outer_scope != NULL);
+ current_scope = current_scope->outer_scope();
+ }
return *scope_info_list;
}
diff --git a/chromium/v8/src/log-utils.h b/chromium/v8/src/log-utils.h
index ec8415e4b6e..f1a21e2cc15 100644
--- a/chromium/v8/src/log-utils.h
+++ b/chromium/v8/src/log-utils.h
@@ -47,7 +47,8 @@ class Log {
static bool InitLogAtStart() {
return FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_ll_prof || FLAG_log_internal_timer_events;
+ || FLAG_log_regexp || FLAG_ll_prof || FLAG_perf_basic_prof
+ || FLAG_perf_jit_prof || FLAG_log_internal_timer_events;
}
// Frees all resources acquired in Initialize and Open... functions.
diff --git a/chromium/v8/src/log.cc b/chromium/v8/src/log.cc
index 0f0ad40398f..a508e8739ea 100644
--- a/chromium/v8/src/log.cc
+++ b/chromium/v8/src/log.cc
@@ -212,7 +212,7 @@ void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
- Name* source, int line) {
+ Name* source, int line, int column) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(code));
name_buffer_->AppendString(shared->DebugName());
@@ -246,6 +246,231 @@ void CodeEventLogger::RegExpCodeCreateEvent(Code* code, String* source) {
}
+// Linux perf tool logging support
+class PerfBasicLogger : public CodeEventLogger {
+ public:
+ PerfBasicLogger();
+ virtual ~PerfBasicLogger();
+
+ virtual void CodeMoveEvent(Address from, Address to) { }
+ virtual void CodeDeleteEvent(Address from) { }
+
+ private:
+ virtual void LogRecordedBuffer(Code* code,
+ SharedFunctionInfo* shared,
+ const char* name,
+ int length);
+
+ // Extension added to V8 log file name to get the low-level log name.
+ static const char kFilenameFormatString[];
+ static const int kFilenameBufferPadding;
+
+ // File buffer size of the low-level log. We don't use the default to
+ // minimize the associated overhead.
+ static const int kLogBufferSize = 2 * MB;
+
+ FILE* perf_output_handle_;
+};
+
+const char PerfBasicLogger::kFilenameFormatString[] = "/tmp/perf-%d.map";
+// Extra space for the PID in the filename
+const int PerfBasicLogger::kFilenameBufferPadding = 16;
+
+PerfBasicLogger::PerfBasicLogger()
+ : perf_output_handle_(NULL) {
+ // Open the perf JIT dump file.
+ int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
+ ScopedVector<char> perf_dump_name(bufferSize);
+ int size = OS::SNPrintF(
+ perf_dump_name,
+ kFilenameFormatString,
+ OS::GetCurrentProcessId());
+ CHECK_NE(size, -1);
+ perf_output_handle_ = OS::FOpen(perf_dump_name.start(), OS::LogFileOpenMode);
+ CHECK_NE(perf_output_handle_, NULL);
+ setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
+}
+
+
+PerfBasicLogger::~PerfBasicLogger() {
+ fclose(perf_output_handle_);
+ perf_output_handle_ = NULL;
+}
+
+
+void PerfBasicLogger::LogRecordedBuffer(Code* code,
+ SharedFunctionInfo*,
+ const char* name,
+ int length) {
+ ASSERT(code->instruction_start() == code->address() + Code::kHeaderSize);
+
+ OS::FPrint(perf_output_handle_, "%llx %x %.*s\n",
+ reinterpret_cast<uint64_t>(code->instruction_start()),
+ code->instruction_size(),
+ length, name);
+}
+
+
+// Linux perf tool logging support
+class PerfJitLogger : public CodeEventLogger {
+ public:
+ PerfJitLogger();
+ virtual ~PerfJitLogger();
+
+ virtual void CodeMoveEvent(Address from, Address to) { }
+ virtual void CodeDeleteEvent(Address from) { }
+
+ private:
+ virtual void LogRecordedBuffer(Code* code,
+ SharedFunctionInfo* shared,
+ const char* name,
+ int length);
+
+ // Extension added to V8 log file name to get the low-level log name.
+ static const char kFilenameFormatString[];
+ static const int kFilenameBufferPadding;
+
+ // File buffer size of the low-level log. We don't use the default to
+ // minimize the associated overhead.
+ static const int kLogBufferSize = 2 * MB;
+
+ void LogWriteBytes(const char* bytes, int size);
+ void LogWriteHeader();
+
+ static const uint32_t kJitHeaderMagic = 0x4F74496A;
+ static const uint32_t kJitHeaderVersion = 0x2;
+ static const uint32_t kElfMachIA32 = 3;
+ static const uint32_t kElfMachX64 = 62;
+ static const uint32_t kElfMachARM = 40;
+ static const uint32_t kElfMachMIPS = 10;
+
+ struct jitheader {
+ uint32_t magic;
+ uint32_t version;
+ uint32_t total_size;
+ uint32_t elf_mach;
+ uint32_t pad1;
+ uint32_t pid;
+ uint64_t timestamp;
+ };
+
+ enum jit_record_type {
+ JIT_CODE_LOAD = 0
+ // JIT_CODE_UNLOAD = 1,
+ // JIT_CODE_CLOSE = 2,
+ // JIT_CODE_DEBUG_INFO = 3,
+ // JIT_CODE_PAGE_MAP = 4,
+ // JIT_CODE_MAX = 5
+ };
+
+ struct jr_code_load {
+ uint32_t id;
+ uint32_t total_size;
+ uint64_t timestamp;
+ uint64_t vma;
+ uint64_t code_addr;
+ uint32_t code_size;
+ uint32_t align;
+ };
+
+ uint32_t GetElfMach() {
+#if V8_TARGET_ARCH_IA32
+ return kElfMachIA32;
+#elif V8_TARGET_ARCH_X64
+ return kElfMachX64;
+#elif V8_TARGET_ARCH_ARM
+ return kElfMachARM;
+#elif V8_TARGET_ARCH_MIPS
+ return kElfMachMIPS;
+#else
+ UNIMPLEMENTED();
+ return 0;
+#endif
+ }
+
+ FILE* perf_output_handle_;
+};
+
+const char PerfJitLogger::kFilenameFormatString[] = "/tmp/jit-%d.dump";
+
+// Extra padding for the PID in the filename
+const int PerfJitLogger::kFilenameBufferPadding = 16;
+
+PerfJitLogger::PerfJitLogger()
+ : perf_output_handle_(NULL) {
+ // Open the perf JIT dump file.
+ int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
+ ScopedVector<char> perf_dump_name(bufferSize);
+ int size = OS::SNPrintF(
+ perf_dump_name,
+ kFilenameFormatString,
+ OS::GetCurrentProcessId());
+ CHECK_NE(size, -1);
+ perf_output_handle_ = OS::FOpen(perf_dump_name.start(), OS::LogFileOpenMode);
+ CHECK_NE(perf_output_handle_, NULL);
+ setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
+
+ LogWriteHeader();
+}
+
+
+PerfJitLogger::~PerfJitLogger() {
+ fclose(perf_output_handle_);
+ perf_output_handle_ = NULL;
+}
+
+
+void PerfJitLogger::LogRecordedBuffer(Code* code,
+ SharedFunctionInfo*,
+ const char* name,
+ int length) {
+ ASSERT(code->instruction_start() == code->address() + Code::kHeaderSize);
+ ASSERT(perf_output_handle_ != NULL);
+
+ const char* code_name = name;
+ uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->instruction_start());
+ uint32_t code_size = code->instruction_size();
+
+ static const char string_terminator[] = "\0";
+
+ jr_code_load code_load;
+ code_load.id = JIT_CODE_LOAD;
+ code_load.total_size = sizeof(code_load) + length + 1 + code_size;
+ code_load.timestamp =
+ static_cast<uint64_t>(OS::TimeCurrentMillis() * 1000.0);
+ code_load.vma = 0x0; // Our addresses are absolute.
+ code_load.code_addr = reinterpret_cast<uint64_t>(code->instruction_start());
+ code_load.code_size = code_size;
+ code_load.align = 0;
+
+ LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load));
+ LogWriteBytes(code_name, length);
+ LogWriteBytes(string_terminator, 1);
+ LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
+}
+
+
+void PerfJitLogger::LogWriteBytes(const char* bytes, int size) {
+ size_t rv = fwrite(bytes, 1, size, perf_output_handle_);
+ ASSERT(static_cast<size_t>(size) == rv);
+ USE(rv);
+}
+
+
+void PerfJitLogger::LogWriteHeader() {
+ ASSERT(perf_output_handle_ != NULL);
+ jitheader header;
+ header.magic = kJitHeaderMagic;
+ header.version = kJitHeaderVersion;
+ header.total_size = sizeof(jitheader);
+ header.pad1 = 0xdeadbeef;
+ header.elf_mach = GetElfMach();
+ header.pid = OS::GetCurrentProcessId();
+ header.timestamp = static_cast<uint64_t>(OS::TimeCurrentMillis() * 1000.0);
+ LogWriteBytes(reinterpret_cast<const char*>(&header), sizeof(header));
+}
+
+
// Low-level logging support.
#define LL_LOG(Call) if (ll_logger_) ll_logger_->Call;
@@ -711,6 +936,8 @@ Logger::Logger(Isolate* isolate)
log_events_(NULL),
is_logging_(false),
log_(new Log(this)),
+ perf_basic_logger_(NULL),
+ perf_jit_logger_(NULL),
ll_logger_(NULL),
jit_logger_(NULL),
listeners_(5),
@@ -1232,10 +1459,11 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* source, int line, int column) {
- PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
+ PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line, column));
if (!is_logging_code_events()) return;
- CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, source, line));
+ CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, source, line,
+ column));
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
@@ -1610,7 +1838,7 @@ void Logger::LogCodeObject(Object* object) {
case Code::FUNCTION:
case Code::OPTIMIZED_FUNCTION:
return; // We log this later using LogCompiledFunctions.
- case Code::BINARY_OP_IC: // fall through
+ case Code::BINARY_OP_IC:
case Code::COMPARE_IC: // fall through
case Code::COMPARE_NIL_IC: // fall through
case Code::TO_BOOLEAN_IC: // fall through
@@ -1629,6 +1857,10 @@ void Logger::LogCodeObject(Object* object) {
description = "A builtin from the snapshot";
tag = Logger::BUILTIN_TAG;
break;
+ case Code::HANDLER:
+ description = "An IC handler from the snapshot";
+ tag = Logger::HANDLER_TAG;
+ break;
case Code::KEYED_LOAD_IC:
description = "A keyed load IC from the snapshot";
tag = Logger::KEYED_LOAD_IC_TAG;
@@ -1765,15 +1997,14 @@ void Logger::LogAccessorCallbacks() {
static void AddIsolateIdIfNeeded(Isolate* isolate, StringStream* stream) {
- if (isolate->IsDefaultIsolate()) return;
+ if (isolate->IsDefaultIsolate() || !FLAG_logfile_per_isolate) return;
stream->Add("isolate-%p-", isolate);
}
static SmartArrayPointer<const char> PrepareLogFileName(
Isolate* isolate, const char* file_name) {
- if (strchr(file_name, '%') != NULL ||
- !isolate->IsDefaultIsolate()) {
+ if (strchr(file_name, '%') != NULL || !isolate->IsDefaultIsolate()) {
// If there's a '%' in the log file name we have to expand
// placeholders.
HeapStringAllocator allocator;
@@ -1835,6 +2066,17 @@ bool Logger::SetUp(Isolate* isolate) {
PrepareLogFileName(isolate, FLAG_logfile);
log_->Initialize(*log_file_name);
+
+ if (FLAG_perf_basic_prof) {
+ perf_basic_logger_ = new PerfBasicLogger();
+ addCodeEventListener(perf_basic_logger_);
+ }
+
+ if (FLAG_perf_jit_prof) {
+ perf_jit_logger_ = new PerfJitLogger();
+ addCodeEventListener(perf_jit_logger_);
+ }
+
if (FLAG_ll_prof) {
ll_logger_ = new LowLevelLogger(*log_file_name);
addCodeEventListener(ll_logger_);
@@ -1897,6 +2139,18 @@ FILE* Logger::TearDown() {
delete ticker_;
ticker_ = NULL;
+ if (perf_basic_logger_) {
+ removeCodeEventListener(perf_basic_logger_);
+ delete perf_basic_logger_;
+ perf_basic_logger_ = NULL;
+ }
+
+ if (perf_jit_logger_) {
+ removeCodeEventListener(perf_jit_logger_);
+ delete perf_jit_logger_;
+ perf_jit_logger_ = NULL;
+ }
+
if (ll_logger_) {
removeCodeEventListener(ll_logger_);
delete ll_logger_;
diff --git a/chromium/v8/src/log.h b/chromium/v8/src/log.h
index 81d45e507b4..e53551d8f45 100644
--- a/chromium/v8/src/log.h
+++ b/chromium/v8/src/log.h
@@ -131,6 +131,7 @@ struct TickSample;
V(CALLBACK_TAG, "Callback") \
V(EVAL_TAG, "Eval") \
V(FUNCTION_TAG, "Function") \
+ V(HANDLER_TAG, "Handler") \
V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC") \
V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
@@ -153,7 +154,9 @@ struct TickSample;
class JitLogger;
+class PerfBasicLogger;
class LowLevelLogger;
+class PerfJitLogger;
class Sampler;
class Logger {
@@ -436,6 +439,8 @@ class Logger {
bool is_logging_;
Log* log_;
+ PerfBasicLogger* perf_basic_logger_;
+ PerfJitLogger* perf_jit_logger_;
LowLevelLogger* ll_logger_;
JitLogger* jit_logger_;
List<CodeEventListener*> listeners_;
@@ -470,7 +475,7 @@ class CodeEventListener {
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* source,
- int line) = 0;
+ int line, int column) = 0;
virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
int args_count) = 0;
@@ -509,7 +514,7 @@ class CodeEventLogger : public CodeEventListener {
SharedFunctionInfo* shared,
CompilationInfo* info,
Name* source,
- int line);
+ int line, int column);
virtual void RegExpCodeCreateEvent(Code* code, String* source);
virtual void CallbackEvent(Name* name, Address entry_point) { }
diff --git a/chromium/v8/src/macros.py b/chromium/v8/src/macros.py
index d699c146211..7bad23bd425 100644
--- a/chromium/v8/src/macros.py
+++ b/chromium/v8/src/macros.py
@@ -157,6 +157,18 @@ macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber
macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
+# Private names.
+macro NEW_PRIVATE(name) = (%CreatePrivateSymbol(name));
+macro HAS_PRIVATE(obj, sym) = (sym in obj);
+macro GET_PRIVATE(obj, sym) = (obj[sym]);
+macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
+macro DELETE_PRIVATE(obj, sym) = (delete obj[sym]);
+
+# Constants. The compiler constant folds them.
+const NAN = $NaN;
+const INFINITY = (1/0);
+const UNDEFINED = (void 0);
+
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
diff --git a/chromium/v8/src/mark-compact.cc b/chromium/v8/src/mark-compact.cc
index 263de4878fc..07bcb7632c4 100644
--- a/chromium/v8/src/mark-compact.cc
+++ b/chromium/v8/src/mark-compact.cc
@@ -38,7 +38,6 @@
#include "ic-inl.h"
#include "incremental-marking.h"
#include "mark-compact.h"
-#include "marking-thread.h"
#include "objects-visiting.h"
#include "objects-visiting-inl.h"
#include "stub-cache.h"
@@ -92,11 +91,10 @@ class VerifyMarkingVisitor: public ObjectVisitor {
void VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps ||
- rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
- !rinfo->target_object()->IsMap() ||
- !Map::cast(rinfo->target_object())->CanTransition()) {
- VisitPointer(rinfo->target_object_address());
+ if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(),
+ rinfo->target_object())) {
+ Object* p = rinfo->target_object();
+ VisitPointer(&p);
}
}
@@ -432,9 +430,8 @@ void MarkCompactCollector::CollectGarbage() {
#endif
#ifdef VERIFY_HEAP
- if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_optimized_code &&
- heap()->weak_embedded_maps_verification_enabled()) {
- VerifyWeakEmbeddedMapsInOptimizedCode();
+ if (heap()->weak_embedded_objects_verification_enabled()) {
+ VerifyWeakEmbeddedObjectsInOptimizedCode();
}
if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
VerifyOmittedMapChecks();
@@ -495,7 +492,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
}
-void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() {
+void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() {
HeapObjectIterator code_iterator(heap()->code_space());
for (HeapObject* obj = code_iterator.Next();
obj != NULL;
@@ -503,7 +500,7 @@ void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() {
Code* code = Code::cast(obj);
if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
if (WillBeDeoptimized(code)) continue;
- code->VerifyEmbeddedMapsDependency();
+ code->VerifyEmbeddedObjectsDependency();
}
}
@@ -560,7 +557,7 @@ void MarkCompactCollector::ClearMarkbits() {
void MarkCompactCollector::StartSweeperThreads() {
sweeping_pending_ = true;
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->StartSweeping();
}
}
@@ -568,7 +565,7 @@ void MarkCompactCollector::StartSweeperThreads() {
void MarkCompactCollector::WaitUntilSweepingCompleted() {
ASSERT(sweeping_pending_ == true);
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->WaitForSweeperThread();
}
sweeping_pending_ = false;
@@ -582,7 +579,7 @@ void MarkCompactCollector::WaitUntilSweepingCompleted() {
intptr_t MarkCompactCollector::
StealMemoryFromSweeperThreads(PagedSpace* space) {
intptr_t freed_bytes = 0;
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space);
}
space->AddToAccountingStats(freed_bytes);
@@ -601,20 +598,6 @@ bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
}
-void MarkCompactCollector::MarkInParallel() {
- for (int i = 0; i < FLAG_marking_threads; i++) {
- isolate()->marking_threads()[i]->StartMarking();
- }
-}
-
-
-void MarkCompactCollector::WaitUntilMarkingCompleted() {
- for (int i = 0; i < FLAG_marking_threads; i++) {
- isolate()->marking_threads()[i]->WaitForMarkingThread();
- }
-}
-
-
bool Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
ASSERT(MemoryChunk::FromAddress(old_start) ==
@@ -1481,7 +1464,7 @@ class MarkCompactMarkingVisitor
// Mark the backing hash table without pushing it on the marking stack.
Object* table_object = weak_collection->table();
if (!table_object->IsHashTable()) return;
- ObjectHashTable* table = ObjectHashTable::cast(table_object);
+ WeakHashTable* table = WeakHashTable::cast(table_object);
Object** table_slot =
HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
MarkBit table_mark = Marking::MarkBitFrom(table);
@@ -1581,13 +1564,11 @@ void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
fixed_array->map() != heap->fixed_double_array_map() &&
fixed_array != heap->empty_fixed_array()) {
if (fixed_array->IsDictionary()) {
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- dictionary_type,
- fixed_array->Size());
+ heap->RecordFixedArraySubTypeStats(dictionary_type,
+ fixed_array->Size());
} else {
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- fast_type,
- fixed_array->Size());
+ heap->RecordFixedArraySubTypeStats(fast_type,
+ fixed_array->Size());
}
}
}
@@ -1597,7 +1578,7 @@ void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
Heap* heap = map->GetHeap();
int object_size = obj->Size();
- heap->RecordObjectStats(map->instance_type(), -1, object_size);
+ heap->RecordObjectStats(map->instance_type(), object_size);
non_count_table_.GetVisitorById(id)(map, obj);
if (obj->IsJSObject()) {
JSObject* object = JSObject::cast(obj);
@@ -1630,25 +1611,20 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
if (map_obj->owns_descriptors() &&
array != heap->empty_descriptor_array()) {
int fixed_array_size = array->Size();
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- DESCRIPTOR_ARRAY_SUB_TYPE,
- fixed_array_size);
+ heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
+ fixed_array_size);
}
if (map_obj->HasTransitionArray()) {
int fixed_array_size = map_obj->transitions()->Size();
- heap->RecordObjectStats(FIXED_ARRAY_TYPE,
- TRANSITION_ARRAY_SUB_TYPE,
- fixed_array_size);
+ heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
+ fixed_array_size);
}
if (map_obj->has_code_cache()) {
CodeCache* cache = CodeCache::cast(map_obj->code_cache());
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
- MAP_CODE_CACHE_SUB_TYPE,
- cache->default_cache()->Size());
+ heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
+ cache->default_cache()->Size());
if (!cache->normal_type_cache()->IsUndefined()) {
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
+ heap->RecordFixedArraySubTypeStats(
MAP_CODE_CACHE_SUB_TYPE,
FixedArray::cast(cache->normal_type_cache())->Size());
}
@@ -1666,7 +1642,9 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
Heap* heap = map->GetHeap();
int object_size = obj->Size();
ASSERT(map->instance_type() == CODE_TYPE);
- heap->RecordObjectStats(CODE_TYPE, Code::cast(obj)->kind(), object_size);
+ Code* code_obj = Code::cast(obj);
+ heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
+ object_size);
ObjectStatsVisitBase(kVisitCode, map, obj);
}
};
@@ -1680,8 +1658,7 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
Heap* heap = map->GetHeap();
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
if (sfi->scope_info() != heap->empty_fixed_array()) {
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
+ heap->RecordFixedArraySubTypeStats(
SCOPE_INFO_SUB_TYPE,
FixedArray::cast(sfi->scope_info())->Size());
}
@@ -1698,8 +1675,7 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
Heap* heap = map->GetHeap();
FixedArray* fixed_array = FixedArray::cast(obj);
if (fixed_array == heap->string_table()) {
- heap->RecordObjectStats(
- FIXED_ARRAY_TYPE,
+ heap->RecordFixedArraySubTypeStats(
STRING_TABLE_SUB_TYPE,
fixed_array->Size());
}
@@ -1906,6 +1882,14 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
virtual Object* RetainAs(Object* object) {
if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
return object;
+ } else if (object->IsAllocationSite() &&
+ !(AllocationSite::cast(object)->IsZombie())) {
+ // "dead" AllocationSites need to live long enough for a traversal of new
+ // space. These sites get a one-time reprieve.
+ AllocationSite* site = AllocationSite::cast(object);
+ site->MarkZombie();
+ site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
+ return object;
} else {
return NULL;
}
@@ -2017,6 +2001,8 @@ int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
int size = object->Size();
survivors_size += size;
+ Heap::UpdateAllocationSiteFeedback(object);
+
offset++;
current_cell >>= 1;
// Aggressively promote young survivors to the old space.
@@ -2108,6 +2094,12 @@ void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
}
+void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
+ MarkBit mark_bit = Marking::MarkBitFrom(site);
+ SetMark(site, mark_bit);
+}
+
+
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
@@ -2116,6 +2108,8 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Handle the string table specially.
MarkStringTable(visitor);
+ MarkWeakObjectToCodeTable();
+
// There may be overflowed objects in the heap. Visit them now.
while (marking_deque_.overflowed()) {
RefillMarkingDeque();
@@ -2156,6 +2150,16 @@ void MarkCompactCollector::MarkImplicitRefGroups() {
}
+void MarkCompactCollector::MarkWeakObjectToCodeTable() {
+ HeapObject* weak_object_to_code_table =
+ HeapObject::cast(heap()->weak_object_to_code_table());
+ if (!IsMarked(weak_object_to_code_table)) {
+ MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
+ SetMark(weak_object_to_code_table, mark);
+ }
+}
+
+
// Mark all objects reachable from the objects on the marking stack.
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
@@ -2523,7 +2527,8 @@ void MarkCompactCollector::ClearNonLiveReferences() {
if (map_mark.Get()) {
ClearNonLiveDependentCode(map->dependent_code());
} else {
- ClearAndDeoptimizeDependentCode(map);
+ ClearAndDeoptimizeDependentCode(map->dependent_code());
+ map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
}
}
@@ -2537,6 +2542,42 @@ void MarkCompactCollector::ClearNonLiveReferences() {
ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
}
}
+
+ // Iterate over allocation sites, removing dependent code that is not
+ // otherwise kept alive by strong references.
+ Object* undefined = heap()->undefined_value();
+ for (Object* site = heap()->allocation_sites_list();
+ site != undefined;
+ site = AllocationSite::cast(site)->weak_next()) {
+ if (IsMarked(site)) {
+ ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
+ }
+ }
+
+ if (heap_->weak_object_to_code_table()->IsHashTable()) {
+ WeakHashTable* table =
+ WeakHashTable::cast(heap_->weak_object_to_code_table());
+ uint32_t capacity = table->Capacity();
+ for (uint32_t i = 0; i < capacity; i++) {
+ uint32_t key_index = table->EntryToIndex(i);
+ Object* key = table->get(key_index);
+ if (!table->IsKey(key)) continue;
+ uint32_t value_index = table->EntryToValueIndex(i);
+ Object* value = table->get(value_index);
+ if (IsMarked(key)) {
+ if (!IsMarked(value)) {
+ HeapObject* obj = HeapObject::cast(value);
+ MarkBit mark = Marking::MarkBitFrom(obj);
+ SetMark(obj, mark);
+ }
+ ClearNonLiveDependentCode(DependentCode::cast(value));
+ } else {
+ ClearAndDeoptimizeDependentCode(DependentCode::cast(value));
+ table->set(key_index, heap_->the_hole_value());
+ table->set(value_index, heap_->the_hole_value());
+ }
+ }
+ }
}
@@ -2553,6 +2594,7 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
Object* prototype = prototype_transitions->get(proto_offset + i * step);
Object* cached_map = prototype_transitions->get(map_offset + i * step);
if (IsMarked(prototype) && IsMarked(cached_map)) {
+ ASSERT(!prototype->IsUndefined());
int proto_index = proto_offset + new_number_of_transitions * step;
int map_index = map_offset + new_number_of_transitions * step;
if (new_number_of_transitions != i) {
@@ -2602,9 +2644,9 @@ void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
}
-void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
+void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
+ DependentCode* entries) {
DisallowHeapAllocation no_allocation;
- DependentCode* entries = map->dependent_code();
DependentCode::GroupStartIndexes starts(entries);
int number_of_entries = starts.number_of_entries();
if (number_of_entries == 0) return;
@@ -2616,11 +2658,11 @@ void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
if (IsMarked(code) && !code->marked_for_deoptimization()) {
code->set_marked_for_deoptimization(true);
+ code->InvalidateEmbeddedObjects();
have_code_to_deoptimize_ = true;
}
entries->clear_at(i);
}
- map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
}
@@ -2726,10 +2768,12 @@ void MarkCompactCollector::MigrateObject(Address dst,
Address src,
int size,
AllocationSpace dest) {
- HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
- // TODO(hpayer): Replace these checks with asserts.
- CHECK(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
- CHECK(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
+ HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
+ if (heap_profiler->is_tracking_object_moves()) {
+ heap_profiler->ObjectMoveEvent(src, dst, size);
+ }
+ ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
+ ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
Address src_slot = src;
Address dst_slot = dst;
@@ -3459,6 +3503,13 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
updating_visitor.VisitPointer(heap_->native_contexts_list_address());
heap_->string_table()->Iterate(&updating_visitor);
+ updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
+ if (heap_->weak_object_to_code_table()->IsHashTable()) {
+ WeakHashTable* table =
+ WeakHashTable::cast(heap_->weak_object_to_code_table());
+ table->Iterate(&updating_visitor);
+ table->Rehash(heap_->undefined_value());
+ }
// Update pointers from external string table.
heap_->UpdateReferencesInExternalStringTable(
@@ -4065,8 +4116,10 @@ void MarkCompactCollector::SweepSpaces() {
#endif
SweeperType how_to_sweep =
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
- if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
- if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
+ if (isolate()->num_sweeper_threads() > 0) {
+ if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
+ if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
+ }
if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
if (sweep_precisely_) how_to_sweep = PRECISE;
diff --git a/chromium/v8/src/mark-compact.h b/chromium/v8/src/mark-compact.h
index df2f7821139..2a1d97dc2ae 100644
--- a/chromium/v8/src/mark-compact.h
+++ b/chromium/v8/src/mark-compact.h
@@ -637,7 +637,7 @@ class MarkCompactCollector {
void VerifyMarkbitsAreClean();
static void VerifyMarkbitsAreClean(PagedSpace* space);
static void VerifyMarkbitsAreClean(NewSpace* space);
- void VerifyWeakEmbeddedMapsInOptimizedCode();
+ void VerifyWeakEmbeddedObjectsInOptimizedCode();
void VerifyOmittedMapChecks();
#endif
@@ -735,10 +735,13 @@ class MarkCompactCollector {
return sequential_sweeping_;
}
- // Parallel marking support.
- void MarkInParallel();
+ // Mark the global table which maps weak objects to dependent code without
+ // marking its contents.
+ void MarkWeakObjectToCodeTable();
- void WaitUntilMarkingCompleted();
+ // Special case for processing weak references in a full collection. We need
+ // to artifically keep AllocationSites alive for a time.
+ void MarkAllocationSite(AllocationSite* site);
private:
MarkCompactCollector();
@@ -889,7 +892,7 @@ class MarkCompactCollector {
void ClearNonLivePrototypeTransitions(Map* map);
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
- void ClearAndDeoptimizeDependentCode(Map* map);
+ void ClearAndDeoptimizeDependentCode(DependentCode* dependent_code);
void ClearNonLiveDependentCode(DependentCode* dependent_code);
// Marking detaches initial maps from SharedFunctionInfo objects
diff --git a/chromium/v8/src/math.js b/chromium/v8/src/math.js
index 9ba1934b851..5cbe94a35c7 100644
--- a/chromium/v8/src/math.js
+++ b/chromium/v8/src/math.js
@@ -45,59 +45,52 @@ var $Math = new MathConstructor();
// ECMA 262 - 15.8.2.1
function MathAbs(x) {
if (%_IsSmi(x)) return x >= 0 ? x : -x;
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ x = TO_NUMBER_INLINE(x);
if (x === 0) return 0; // To handle -0.
return x > 0 ? x : -x;
}
// ECMA 262 - 15.8.2.2
function MathAcos(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_acos(x);
+ return %Math_acos(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.3
function MathAsin(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_asin(x);
+ return %Math_asin(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.4
function MathAtan(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_atan(x);
+ return %Math_atan(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.5
// The naming of y and x matches the spec, as does the order in which
// ToNumber (valueOf) is called.
function MathAtan2(y, x) {
- if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_atan2(y, x);
+ return %Math_atan2(TO_NUMBER_INLINE(y), TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.6
function MathCeil(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_ceil(x);
+ return -MathFloor(-x);
}
// ECMA 262 - 15.8.2.7
function MathCos(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathCos(x);
+ x = MathAbs(x); // Convert to number and get rid of -0.
+ return TrigonometricInterpolation(x, 1);
}
// ECMA 262 - 15.8.2.8
function MathExp(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %Math_exp(x);
+ return %Math_exp(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.9
function MathFloor(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+ x = TO_NUMBER_INLINE(x);
// It's more common to call this with a positive number that's out
// of range than negative numbers; check the upper bound first.
if (x < 0x80000000 && x > 0) {
@@ -113,34 +106,30 @@ function MathFloor(x) {
// ECMA 262 - 15.8.2.10
function MathLog(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathLog(x);
+ return %_MathLog(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
if (length == 2) {
- if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
- if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
+ arg1 = TO_NUMBER_INLINE(arg1);
+ arg2 = TO_NUMBER_INLINE(arg2);
if (arg2 > arg1) return arg2;
if (arg1 > arg2) return arg1;
if (arg1 == arg2) {
- // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be
- // a Smi or a heap number.
- return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg2 : arg1;
+ // Make sure -0 is considered less than +0.
+ return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg2 : arg1;
}
// All comparisons failed, one of the arguments must be NaN.
- return 0/0; // Compiler constant-folds this to NaN.
+ return NAN;
}
- var r = -1/0; // Compiler constant-folds this to -Infinity.
+ var r = -INFINITY;
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- // Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
- // a Smi or heap number.
- if (NUMBER_IS_NAN(n) || n > r ||
- (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) {
+ // Make sure +0 is considered greater than -0.
+ if (NUMBER_IS_NAN(n) || n > r || (r === 0 && n === 0 && %_IsMinusZero(r))) {
r = n;
}
}
@@ -151,26 +140,23 @@ function MathMax(arg1, arg2) { // length == 2
function MathMin(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
if (length == 2) {
- if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
- if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
+ arg1 = TO_NUMBER_INLINE(arg1);
+ arg2 = TO_NUMBER_INLINE(arg2);
if (arg2 > arg1) return arg1;
if (arg1 > arg2) return arg2;
if (arg1 == arg2) {
- // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be
- // a Smi or a heap number.
- return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg1 : arg2;
+ // Make sure -0 is considered less than +0.
+ return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg1 : arg2;
}
// All comparisons failed, one of the arguments must be NaN.
- return 0/0; // Compiler constant-folds this to NaN.
+ return NAN;
}
- var r = 1/0; // Compiler constant-folds this to Infinity.
+ var r = INFINITY;
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be a
- // Smi or a heap number.
- if (NUMBER_IS_NAN(n) || n < r ||
- (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) {
+ // Make sure -0 is considered less than +0.
+ if (NUMBER_IS_NAN(n) || n < r || (r === 0 && n === 0 && %_IsMinusZero(n))) {
r = n;
}
}
@@ -179,48 +165,116 @@ function MathMin(arg1, arg2) { // length == 2
// ECMA 262 - 15.8.2.13
function MathPow(x, y) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
- return %_MathPow(x, y);
+ return %_MathPow(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y));
}
// ECMA 262 - 15.8.2.14
+var rngstate; // Initialized to a Uint32Array during genesis.
function MathRandom() {
- return %_RandomHeapNumber();
+ var r0 = (MathImul(18273, rngstate[0] & 0xFFFF) + (rngstate[0] >>> 16)) | 0;
+ rngstate[0] = r0;
+ var r1 = (MathImul(36969, rngstate[1] & 0xFFFF) + (rngstate[1] >>> 16)) | 0;
+ rngstate[1] = r1;
+ var x = ((r0 << 16) + (r1 & 0xFFFF)) | 0;
+ // Division by 0x100000000 through multiplication by reciprocal.
+ return (x < 0 ? (x + 0x100000000) : x) * 2.3283064365386962890625e-10;
}
// ECMA 262 - 15.8.2.15
function MathRound(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %RoundNumber(x);
+ return %RoundNumber(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.16
function MathSin(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathSin(x);
+ x = x * 1; // Convert to number and deal with -0.
+ if (%_IsMinusZero(x)) return x;
+ return TrigonometricInterpolation(x, 0);
}
// ECMA 262 - 15.8.2.17
function MathSqrt(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathSqrt(x);
+ return %_MathSqrt(TO_NUMBER_INLINE(x));
}
// ECMA 262 - 15.8.2.18
function MathTan(x) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- return %_MathTan(x);
+ return MathSin(x) / MathCos(x);
}
// Non-standard extension.
function MathImul(x, y) {
- if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
- if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
- return %NumberImul(x, y);
+ return %NumberImul(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y));
}
+var kInversePiHalf = 0.636619772367581343; // 2 / pi
+var kInversePiHalfS26 = 9.48637384723993156e-9; // 2 / pi / (2^26)
+var kS26 = 1 << 26;
+var kTwoStepThreshold = 1 << 27;
+// pi / 2 rounded up
+var kPiHalf = 1.570796326794896780; // 0x192d4454fb21f93f
+// We use two parts for pi/2 to emulate a higher precision.
+// pi_half_1 only has 26 significant bits for mantissa.
+// Note that pi_half > pi_half_1 + pi_half_2
+var kPiHalf1 = 1.570796325802803040; // 0x00000054fb21f93f
+var kPiHalf2 = 9.920935796805404252e-10; // 0x3326a611460b113e
+
+var kSamples; // Initialized to a number during genesis.
+var kIndexConvert; // Initialized to kSamples / (pi/2) during genesis.
+var kSinTable; // Initialized to a Float64Array during genesis.
+var kCosXIntervalTable; // Initialized to a Float64Array during genesis.
+
+// This implements sine using the following algorithm.
+// 1) Multiplication takes care of to-number conversion.
+// 2) Reduce x to the first quadrant [0, pi/2].
+// Conveniently enough, in case of +/-Infinity, we get NaN.
+// Note that we try to use only 26 instead of 52 significant bits for
+// mantissa to avoid rounding errors when multiplying. For very large
+// input we therefore have additional steps.
+// 3) Replace x by (pi/2-x) if x was in the 2nd or 4th quadrant.
+// 4) Do a table lookup for the closest samples to the left and right of x.
+// 5) Find the derivatives at those sampling points by table lookup:
+// dsin(x)/dx = cos(x) = sin(pi/2-x) for x in [0, pi/2].
+// 6) Use cubic spline interpolation to approximate sin(x).
+// 7) Negate the result if x was in the 3rd or 4th quadrant.
+// 8) Get rid of -0 by adding 0.
+function TrigonometricInterpolation(x, phase) {
+ if (x < 0 || x > kPiHalf) {
+ var multiple;
+ while (x < -kTwoStepThreshold || x > kTwoStepThreshold) {
+ // Let's assume this loop does not terminate.
+ // All numbers x in each loop forms a set S.
+ // (1) abs(x) > 2^27 for all x in S.
+ // (2) abs(multiple) != 0 since (2^27 * inverse_pi_half_s26) > 1
+ // (3) multiple is rounded down in 2^26 steps, so the rounding error is
+ // at most max(ulp, 2^26).
+ // (4) so for x > 2^27, we subtract at most (1+pi/4)x and at least
+ // (1-pi/4)x
+ // (5) The subtraction results in x' so that abs(x') <= abs(x)*pi/4.
+ // Note that this difference cannot be simply rounded off.
+ // Set S cannot exist since (5) violates (1). Loop must terminate.
+ multiple = MathFloor(x * kInversePiHalfS26) * kS26;
+ x = x - multiple * kPiHalf1 - multiple * kPiHalf2;
+ }
+ multiple = MathFloor(x * kInversePiHalf);
+ x = x - multiple * kPiHalf1 - multiple * kPiHalf2;
+ phase += multiple;
+ }
+ var double_index = x * kIndexConvert;
+ if (phase & 1) double_index = kSamples - double_index;
+ var index = double_index | 0;
+ var t1 = double_index - index;
+ var t2 = 1 - t1;
+ var y1 = kSinTable[index];
+ var y2 = kSinTable[index + 1];
+ var dy = y2 - y1;
+ return (t2 * y1 + t1 * y2 +
+ t1 * t2 * ((kCosXIntervalTable[index] - dy) * t2 +
+ (dy - kCosXIntervalTable[index + 1]) * t1))
+ * (1 - (phase & 2)) + 0;
+}
+
// -------------------------------------------------------------------
function SetUpMath() {
@@ -293,6 +347,13 @@ function SetUpMath() {
"min", MathMin,
"imul", MathImul
));
+
+ %SetInlineBuiltinFlag(MathCeil);
+ %SetInlineBuiltinFlag(MathRandom);
+ %SetInlineBuiltinFlag(MathSin);
+ %SetInlineBuiltinFlag(MathCos);
+ %SetInlineBuiltinFlag(MathTan);
+ %SetInlineBuiltinFlag(TrigonometricInterpolation);
}
SetUpMath();
diff --git a/chromium/v8/src/messages.js b/chromium/v8/src/messages.js
index 2debbf86540..c7096724ace 100644
--- a/chromium/v8/src/messages.js
+++ b/chromium/v8/src/messages.js
@@ -109,6 +109,10 @@ var kMessages = {
invalid_argument: ["invalid_argument"],
data_view_not_array_buffer: ["First argument to DataView constructor must be an ArrayBuffer"],
constructor_not_function: ["Constructor ", "%0", " requires 'new'"],
+ not_a_promise: ["%0", "is not a promise"],
+ promise_cyclic: ["Chaining cycle detected for promise", "%0"],
+ array_functions_on_frozen: ["Cannot modify frozen array elements"],
+ array_functions_change_sealed: ["Cannot add/remove sealed array elements"],
// RangeError
invalid_array_length: ["Invalid array length"],
invalid_array_buffer_length: ["Invalid array buffer length"],
@@ -196,6 +200,10 @@ function FormatString(format, args) {
// str is one of %0, %1, %2 or %3.
try {
str = NoSideEffectToString(args[arg_num]);
+ if (str.length > 256) {
+ str = %SubString(str, 0, 239) + "...<omitted>..." +
+ %SubString(str, str.length - 2, str.length);
+ }
} catch (e) {
if (%IsJSModule(args[arg_num]))
str = "module";
@@ -783,64 +791,67 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// ----------------------------------------------------------------------------
// Error implementation
-var CallSiteReceiverKey = %CreateSymbol("receiver");
-var CallSiteFunctionKey = %CreateSymbol("function");
-var CallSitePositionKey = %CreateSymbol("position");
-var CallSiteStrictModeKey = %CreateSymbol("strict mode");
+//TODO(rossberg)
+var CallSiteReceiverKey = NEW_PRIVATE("receiver");
+var CallSiteFunctionKey = NEW_PRIVATE("function");
+var CallSitePositionKey = NEW_PRIVATE("position");
+var CallSiteStrictModeKey = NEW_PRIVATE("strict mode");
function CallSite(receiver, fun, pos, strict_mode) {
- this[CallSiteReceiverKey] = receiver;
- this[CallSiteFunctionKey] = fun;
- this[CallSitePositionKey] = pos;
- this[CallSiteStrictModeKey] = strict_mode;
+ SET_PRIVATE(this, CallSiteReceiverKey, receiver);
+ SET_PRIVATE(this, CallSiteFunctionKey, fun);
+ SET_PRIVATE(this, CallSitePositionKey, pos);
+ SET_PRIVATE(this, CallSiteStrictModeKey, strict_mode);
}
function CallSiteGetThis() {
- return this[CallSiteStrictModeKey] ? void 0 : this[CallSiteReceiverKey];
+ return GET_PRIVATE(this, CallSiteStrictModeKey)
+ ? UNDEFINED : GET_PRIVATE(this, CallSiteReceiverKey);
}
function CallSiteGetTypeName() {
- return GetTypeName(this[CallSiteReceiverKey], false);
+ return GetTypeName(GET_PRIVATE(this, CallSiteReceiverKey), false);
}
function CallSiteIsToplevel() {
- if (this[CallSiteReceiverKey] == null) {
+ if (GET_PRIVATE(this, CallSiteReceiverKey) == null) {
return true;
}
- return IS_GLOBAL(this[CallSiteReceiverKey]);
+ return IS_GLOBAL(GET_PRIVATE(this, CallSiteReceiverKey));
}
function CallSiteIsEval() {
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
return script && script.compilation_type == COMPILATION_TYPE_EVAL;
}
function CallSiteGetEvalOrigin() {
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
return FormatEvalOrigin(script);
}
function CallSiteGetScriptNameOrSourceURL() {
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
return script ? script.nameOrSourceURL() : null;
}
function CallSiteGetFunction() {
- return this[CallSiteStrictModeKey] ? void 0 : this[CallSiteFunctionKey];
+ return GET_PRIVATE(this, CallSiteStrictModeKey)
+ ? UNDEFINED : GET_PRIVATE(this, CallSiteFunctionKey);
}
function CallSiteGetFunctionName() {
// See if the function knows its own name
- var name = this[CallSiteFunctionKey].name;
+ var name = GET_PRIVATE(this, CallSiteFunctionKey).name;
if (name) {
return name;
}
- name = %FunctionGetInferredName(this[CallSiteFunctionKey]);
+ name = %FunctionGetInferredName(GET_PRIVATE(this, CallSiteFunctionKey));
if (name) {
return name;
}
// Maybe this is an evaluation?
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
if (script && script.compilation_type == COMPILATION_TYPE_EVAL) {
return "eval";
}
@@ -850,8 +861,8 @@ function CallSiteGetFunctionName() {
function CallSiteGetMethodName() {
// See if we can find a unique property on the receiver that holds
// this function.
- var receiver = this[CallSiteReceiverKey];
- var fun = this[CallSiteFunctionKey];
+ var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
+ var fun = GET_PRIVATE(this, CallSiteFunctionKey);
var ownName = fun.name;
if (ownName && receiver &&
(%_CallFunction(receiver, ownName, ObjectLookupGetter) === fun ||
@@ -880,49 +891,51 @@ function CallSiteGetMethodName() {
}
function CallSiteGetFileName() {
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
return script ? script.name : null;
}
function CallSiteGetLineNumber() {
- if (this[CallSitePositionKey] == -1) {
+ if (GET_PRIVATE(this, CallSitePositionKey) == -1) {
return null;
}
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
var location = null;
if (script) {
- location = script.locationFromPosition(this[CallSitePositionKey], true);
+ location = script.locationFromPosition(
+ GET_PRIVATE(this, CallSitePositionKey), true);
}
return location ? location.line + 1 : null;
}
function CallSiteGetColumnNumber() {
- if (this[CallSitePositionKey] == -1) {
+ if (GET_PRIVATE(this, CallSitePositionKey) == -1) {
return null;
}
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
var location = null;
if (script) {
- location = script.locationFromPosition(this[CallSitePositionKey], true);
+ location = script.locationFromPosition(
+ GET_PRIVATE(this, CallSitePositionKey), true);
}
return location ? location.column + 1: null;
}
function CallSiteIsNative() {
- var script = %FunctionGetScript(this[CallSiteFunctionKey]);
+ var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
return script ? (script.type == TYPE_NATIVE) : false;
}
function CallSiteGetPosition() {
- return this[CallSitePositionKey];
+ return GET_PRIVATE(this, CallSitePositionKey);
}
function CallSiteIsConstructor() {
- var receiver = this[CallSiteReceiverKey];
+ var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
var constructor = (receiver != null && IS_OBJECT(receiver))
? %GetDataProperty(receiver, "constructor") : null;
if (!constructor) return false;
- return this[CallSiteFunctionKey] === constructor;
+ return GET_PRIVATE(this, CallSiteFunctionKey) === constructor;
}
function CallSiteToString() {
@@ -965,7 +978,7 @@ function CallSiteToString() {
var isConstructor = this.isConstructor();
var isMethodCall = !(this.isToplevel() || isConstructor);
if (isMethodCall) {
- var typeName = GetTypeName(this[CallSiteReceiverKey], true);
+ var typeName = GetTypeName(GET_PRIVATE(this, CallSiteReceiverKey), true);
var methodName = this.getMethodName();
if (functionName) {
if (typeName &&
@@ -1092,7 +1105,7 @@ function FormatStackTrace(obj, error_string, frames) {
var array = [];
%MoveArrayContents(frames, array);
formatting_custom_stack_trace = true;
- var stack_trace = void 0;
+ var stack_trace = UNDEFINED;
try {
stack_trace = $Error.prepareStackTrace(obj, array);
} catch (e) {
@@ -1160,7 +1173,7 @@ function captureStackTrace(obj, cons_opt) {
// Turn this accessor into a data property.
%DefineOrRedefineDataProperty(obj, 'stack', result, NONE);
// Release context values.
- stack = error_string = void 0;
+ stack = error_string = UNDEFINED;
return result;
};
@@ -1171,7 +1184,7 @@ function captureStackTrace(obj, cons_opt) {
%DefineOrRedefineDataProperty(this, 'stack', v, NONE);
if (this === obj) {
// Release context values if holder is the same as the receiver.
- stack = error_string = void 0;
+ stack = error_string = UNDEFINED;
}
};
@@ -1213,7 +1226,7 @@ function SetUpError() {
// Define all the expected properties directly on the error
// object. This avoids going through getters and setters defined
// on prototype objects.
- %IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM);
+ %IgnoreAttributesAndSetProperty(this, 'stack', UNDEFINED, DONT_ENUM);
if (!IS_UNDEFINED(m)) {
%IgnoreAttributesAndSetProperty(
this, 'message', ToString(m), DONT_ENUM);
@@ -1247,24 +1260,25 @@ var visited_errors = new InternalArray();
var cyclic_error_marker = new $Object();
function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
+ var current = error;
// Climb the prototype chain until we find the holder.
- while (error && !%HasLocalProperty(error, name)) {
- error = %GetPrototype(error);
+ while (current && !%HasLocalProperty(current, name)) {
+ current = %GetPrototype(current);
}
- if (error === null) return void 0;
- if (!IS_OBJECT(error)) return error[name];
+ if (IS_NULL(current)) return UNDEFINED;
+ if (!IS_OBJECT(current)) return error[name];
// If the property is an accessor on one of the predefined errors that can be
// generated statically by the compiler, don't touch it. This is to address
// http://code.google.com/p/chromium/issues/detail?id=69187
- var desc = %GetOwnProperty(error, name);
+ var desc = %GetOwnProperty(current, name);
if (desc && desc[IS_ACCESSOR_INDEX]) {
var isName = name === "name";
- if (error === $ReferenceError.prototype)
- return isName ? "ReferenceError" : void 0;
- if (error === $SyntaxError.prototype)
- return isName ? "SyntaxError" : void 0;
- if (error === $TypeError.prototype)
- return isName ? "TypeError" : void 0;
+ if (current === $ReferenceError.prototype)
+ return isName ? "ReferenceError" : UNDEFINED;
+ if (current === $SyntaxError.prototype)
+ return isName ? "SyntaxError" : UNDEFINED;
+ if (current === $TypeError.prototype)
+ return isName ? "TypeError" : UNDEFINED;
}
// Otherwise, read normally.
return error[name];
diff --git a/chromium/v8/src/mips/assembler-mips-inl.h b/chromium/v8/src/mips/assembler-mips-inl.h
index 2fa6804d198..514b3aaa4f0 100644
--- a/chromium/v8/src/mips/assembler-mips-inl.h
+++ b/chromium/v8/src/mips/assembler-mips-inl.h
@@ -190,16 +190,6 @@ Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
}
-Object** RelocInfo::target_object_address() {
- // Provide a "natural pointer" to the embedded object,
- // which can be de-referenced during heap iteration.
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- reconstructed_obj_ptr_ =
- reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
- return &reconstructed_obj_ptr_;
-}
-
-
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
@@ -213,10 +203,9 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
}
-Address* RelocInfo::target_reference_address() {
+Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
- return &reconstructed_adr_ptr_;
+ return Assembler::target_address_at(pc_);
}
@@ -261,19 +250,24 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
static const int kNoCodeAgeSequenceLength = 7;
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ UNREACHABLE(); // This should never be reached on Arm.
+ return Handle<Object>();
+}
+
+
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)));
+ Assembler::target_address_at(pc_ + Assembler::kInstrSize));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)) =
- stub->instruction_start();
+ Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
+ stub->instruction_start());
}
@@ -319,6 +313,15 @@ void RelocInfo::set_call_object(Object* target) {
}
+void RelocInfo::WipeOut() {
+ ASSERT(IsEmbeddedObject(rmode_) ||
+ IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, NULL);
+}
+
+
bool RelocInfo::IsPatchedReturnSequence() {
Instr instr0 = Assembler::instr_at(pc_);
Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
diff --git a/chromium/v8/src/mips/assembler-mips.cc b/chromium/v8/src/mips/assembler-mips.cc
index 345b642454b..9aed3bd4aaa 100644
--- a/chromium/v8/src/mips/assembler-mips.cc
+++ b/chromium/v8/src/mips/assembler-mips.cc
@@ -48,6 +48,7 @@ bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
+unsigned CpuFeatures::cross_compile_ = 0;
ExternalReference ExternalReference::cpu_features() {
@@ -325,6 +326,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->origin = this;
}
@@ -2030,6 +2032,14 @@ void Assembler::dd(uint32_t data) {
}
+void Assembler::emit_code_stub_address(Code* stub) {
+ CheckBuffer();
+ *reinterpret_cast<uint32_t*>(pc_) =
+ reinterpret_cast<uint32_t>(stub->instruction_start());
+ pc_ += sizeof(uint32_t);
+}
+
+
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, rmode, data, NULL);
diff --git a/chromium/v8/src/mips/assembler-mips.h b/chromium/v8/src/mips/assembler-mips.h
index cb0896a8ded..d9ef46cd014 100644
--- a/chromium/v8/src/mips/assembler-mips.h
+++ b/chromium/v8/src/mips/assembler-mips.h
@@ -72,18 +72,25 @@ namespace internal {
// Core register.
struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kMaxNumAllocatableRegisters = 14; // v0 through t7.
+ static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp.
static const int kSizeInBytes = 4;
+ static const int kCpRegister = 23; // cp (s7) is the 23rd register.
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
- return reg.code() - 2; // zero_reg and 'at' are skipped.
+ ASSERT((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
+ reg.is(from_code(kCpRegister)));
+ return reg.is(from_code(kCpRegister)) ?
+ kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'.
+ reg.code() - 2; // zero_reg and 'at' are skipped.
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index + 2); // zero_reg and 'at' are skipped.
+ return index == kMaxNumAllocatableRegisters - 1 ?
+ from_code(kCpRegister) : // Last index is always the 'cp' register.
+ from_code(index + 2); // zero_reg and 'at' are skipped.
}
static const char* AllocationIndexToString(int index) {
@@ -102,7 +109,7 @@ struct Register {
"t4",
"t5",
"t6",
- "t7",
+ "s7",
};
return names[index];
}
@@ -404,28 +411,49 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
- return (supported_ & (1u << f)) != 0;
+ return Check(f, supported_);
}
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- return (found_by_runtime_probing_only_ &
- (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
static bool IsSafeForSnapshot(CpuFeature f) {
- return (IsSupported(f) &&
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ unsigned mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
+
private:
+ static bool Check(CpuFeature f, unsigned set) {
+ return (set & flag2set(f)) != 0;
+ }
+
+ static unsigned flag2set(CpuFeature f) {
+ return 1u << f;
+ }
+
#ifdef DEBUG
static bool initialized_;
#endif
static unsigned supported_;
static unsigned found_by_runtime_probing_only_;
+ static unsigned cross_compile_;
+
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -509,13 +537,6 @@ class Assembler : public AssemblerBase {
target);
}
- // This sets the branch destination.
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
@@ -868,6 +889,9 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
+ // Emits the address of the code stub's first instruction.
+ void emit_code_stub_address(Code* stub);
+
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Postpone the generation of the trampoline pool for the specified number of
diff --git a/chromium/v8/src/mips/builtins-mips.cc b/chromium/v8/src/mips/builtins-mips.cc
index 3aabd97b972..19f3cdf4ff8 100644
--- a/chromium/v8/src/mips/builtins-mips.cc
+++ b/chromium/v8/src/mips/builtins-mips.cc
@@ -122,7 +122,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
+ __ SmiTst(a2, t0);
__ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction,
t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
@@ -152,7 +152,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
+ __ SmiTst(a2, t0);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction1,
t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
@@ -201,14 +201,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
Register argument = a2;
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- a0, // Input.
- argument, // Result.
- a3, // Scratch.
- t0, // Scratch.
- t1, // Scratch.
- &not_cached);
+ __ LookupNumberStringCache(a0, // Input.
+ argument, // Result.
+ a3, // Scratch.
+ t0, // Scratch.
+ t1, // Scratch.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
__ bind(&argument_is_string);
@@ -303,17 +301,12 @@ static void CallRuntimePassFunction(MacroAssembler* masm,
Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
- // Function is also the parameter to the runtime call.
- __ push(a1);
+ // Push call kind information and function as parameter to the runtime call.
+ __ Push(a1, t1, a1);
__ CallRuntime(function_id, 1);
- // Restore call kind information.
- __ pop(t1);
- // Restore receiver.
- __ pop(a1);
+ // Restore call kind information and receiver.
+ __ Pop(a1, t1);
}
@@ -423,14 +416,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ sb(t0, constructor_count);
__ Branch(&allocate, ne, t0, Operand(zero_reg));
- __ Push(a1, a2);
-
- __ push(a1); // Constructor.
+ __ Push(a1, a2, a1); // a1 = Constructor.
// The call will replace the stub, so the countdown is only done once.
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
- __ pop(a2);
- __ pop(a1);
+ __ Pop(a1, a2);
__ bind(&allocate);
}
@@ -823,24 +813,22 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
- __ mov(a0, ra);
- // Adjust a0 to point to the head of the PlatformCodeAge sequence
+ // Set a0 to point to the head of the PlatformCodeAge sequence.
__ Subu(a0, a0,
Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
- // Restore the original return address of the function
- __ mov(ra, at);
// The following registers must be saved and restored when calling through to
// the runtime:
// a0 - contains return address (beginning of patch sequence)
- // a1 - function object
+ // a1 - isolate
RegList saved_regs =
(a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
- __ PrepareCallCFunction(1, 0, a1);
+ __ PrepareCallCFunction(1, 0, a2);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ MultiPop(saved_regs);
__ Jump(a0);
}
@@ -858,7 +846,48 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+
+ // Set a0 to point to the head of the PlatformCodeAge sequence.
+ __ Subu(a0, a0,
+ Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // a0 - contains return address (beginning of patch sequence)
+ // a1 - isolate
+ RegList saved_regs =
+ (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ MultiPush(saved_regs);
+ __ PrepareCallCFunction(1, 0, a2);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ __ MultiPop(saved_regs);
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ Push(ra, fp, cp, a1);
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Jump to point after the code-age stub.
+ __ Addu(a0, a0, Operand((kNoCodeAgeSequenceLength) * Assembler::kInstrSize));
+ __ Jump(a0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -867,7 +896,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
@@ -876,6 +905,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
}
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -925,23 +964,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- RegList saved_regs =
- (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
- __ MultiPush(saved_regs);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ MultiPop(saved_regs);
- __ Ret();
-}
-
-
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -984,6 +1006,23 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ Ret();
+}
+
+
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// a0: actual number of arguments
@@ -1174,11 +1213,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset = -5 * kPointerSize;
- const int kLimitOffset = -4 * kPointerSize;
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
@@ -1203,8 +1244,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Out of stack space.
__ lw(a1, MemOperand(fp, kFunctionOffset));
- __ push(a1);
- __ push(v0);
+ __ Push(a1, v0);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
// End of stack check.
@@ -1285,8 +1325,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// a0: current argument index
__ bind(&loop);
__ lw(a1, MemOperand(fp, kArgsOffset));
- __ push(a1);
- __ push(a0);
+ __ Push(a1, a0);
// Call the runtime to access the property in the arguments array.
__ CallRuntime(Runtime::kGetProperty, 2);
@@ -1339,7 +1378,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ sll(a0, a0, kSmiTagSize);
__ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
- __ Addu(fp, sp, Operand(3 * kPointerSize));
+ __ Addu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -1349,7 +1389,8 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ lw(a1, MemOperand(fp, -3 * kPointerSize));
+ __ lw(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize)));
__ mov(sp, fp);
__ MultiPop(fp.bit() | ra.bit());
__ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
@@ -1447,7 +1488,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ sll(t2, a2, kPointerSizeLog2);
__ Subu(a2, fp, Operand(t2));
- __ Addu(a2, a2, Operand(-4 * kPointerSize)); // Adjust for frame.
+ // Adjust for frame.
+ __ Subu(a2, a2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ 2 * kPointerSize));
Label fill;
__ bind(&fill);
diff --git a/chromium/v8/src/mips/code-stubs-mips.cc b/chromium/v8/src/mips/code-stubs-mips.cc
index 0589bf01624..4c3708ce7a5 100644
--- a/chromium/v8/src/mips/code-stubs-mips.cc
+++ b/chromium/v8/src/mips/code-stubs-mips.cc
@@ -60,6 +60,17 @@ void ToNumberStub::InitializeInterfaceDescriptor(
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -67,7 +78,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
}
@@ -78,7 +89,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -103,6 +114,17 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = {a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -123,6 +145,19 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void KeyedArrayCallStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -167,14 +202,21 @@ static void InitializeArrayConstructorDescriptor(
// a0 -- number of arguments
// a1 -- function
// a2 -- type info cell with elements kind
- static Register registers[] = { a1, a2 };
- descriptor->register_param_count_ = 2;
- if (constant_stack_parameter_count != 0) {
+ static Register registers_variable_args[] = { a1, a2, a0 };
+ static Register registers_no_args[] = { a1, a2 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &a0;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = a0;
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
@@ -188,15 +230,21 @@ static void InitializeInternalArrayConstructorDescriptor(
// register state
// a0 -- number of arguments
// a1 -- constructor function
- static Register registers[] = { a1 };
- descriptor->register_param_count_ = 1;
+ static Register registers_variable_args[] = { a1, a0 };
+ static Register registers_no_args[] = { a1 };
- if (constant_stack_parameter_count != 0) {
- // Stack param count needs (constructor pointer, and single argument).
- descriptor->stack_parameter_count_ = &a0;
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = a0;
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
@@ -280,6 +328,17 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
+void NewStringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -536,23 +595,27 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
Register scratch3 =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
- DoubleRegister double_scratch = kLithiumScratchDouble.low();
- DoubleRegister double_input = f12;
+ DoubleRegister double_scratch = kLithiumScratchDouble;
__ Push(scratch, scratch2, scratch3);
- __ ldc1(double_input, MemOperand(input_reg, double_offset));
-
if (!skip_fastpath()) {
+ // Load double input.
+ __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
+
// Clear cumulative exception flags and save the FCSR.
__ cfc1(scratch2, FCSR);
__ ctc1(zero_reg, FCSR);
+
// Try a conversion to a signed integer.
- __ trunc_w_d(double_scratch, double_input);
+ __ Trunc_w_d(double_scratch, double_scratch);
+ // Move the converted value into the result register.
__ mfc1(result_reg, double_scratch);
+
// Retrieve and restore the FCSR.
__ cfc1(scratch, FCSR);
__ ctc1(scratch2, FCSR);
+
// Check for overflow and NaNs.
__ And(
scratch, scratch,
@@ -565,7 +628,9 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Load the double value and perform a manual truncation.
Register input_high = scratch2;
Register input_low = scratch3;
- __ Move(input_low, input_high, double_input);
+
+ __ lw(input_low, MemOperand(input_reg, double_offset));
+ __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
Label normal_exponent, restore_sign;
// Extract the biased exponent in result.
@@ -649,33 +714,12 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(a1) &&
- the_heap_number_.is(v0) &&
- scratch_.is(a2) &&
- sign_.is(a3)) {
- return true;
- }
- if (the_int_.is(a2) &&
- the_heap_number_.is(v0) &&
- scratch_.is(a3) &&
- sign_.is(a0)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
+ stub2.GetCode(isolate);
}
@@ -994,105 +1038,6 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ sra(mask, mask, kSmiTagSize + 1);
- __ Addu(mask, mask, -1); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ Addu(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ lw(scratch2, MemOperand(scratch1, kPointerSize));
- __ lw(scratch1, MemOperand(scratch1, 0));
- __ Xor(scratch1, scratch1, Operand(scratch2));
- __ And(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
- __ Addu(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- __ lw(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
- __ Branch(not_found);
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ sra(scratch, object, 1); // Shift away the tag.
- __ And(scratch, mask, Operand(scratch));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch, scratch, kPointerSizeLog2 + 1);
- __ Addu(scratch, number_string_cache, scratch);
-
- // Check if the entry is the smi we are looking for.
- __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ Branch(not_found, ne, object, Operand(probe));
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ lw(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
-
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ lw(a1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, &runtime);
- __ DropAndRet(1);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
-}
-
-
static void ICCompareStub_CheckInputType(MacroAssembler* masm,
Register input,
Register scratch,
@@ -1316,958 +1261,18 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-// Generates code to call a C function to do a double operation.
-// This code never falls through, but returns with a heap number containing
-// the result in v0.
-// Register heap_number_result must be a heap number in which the
-// result of the operation will be stored.
-// Requires the following layout on entry:
-// a0: Left value (least significant part of mantissa).
-// a1: Left value (sign, exponent, top of mantissa).
-// a2: Right value (least significant part of mantissa).
-// a3: Right value (sign, exponent, top of mantissa).
-static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Assert that heap_number_result is saved.
- // We currently always use s0 to pass it.
- ASSERT(heap_number_result.is(s0));
-
- // Push the current return address before the C call.
- __ push(ra);
- __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number.
- // Double returned in register f0.
- __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- // Place heap_number_result in v0 and return to the pushed return address.
- __ pop(ra);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-}
-
-
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = true; // FPU is a base requirement for V8.
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(a1, a0);
-
- __ li(a2, Operand(Smi::FromInt(MinorKey())));
- __ push(a2);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
- Register left = a1;
- Register right = a0;
-
- Register scratch1 = t0;
- Register scratch2 = t1;
-
- ASSERT(right.is(a0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op) {
- case Token::ADD:
- __ AdduAndCheckForOverflow(v0, left, right, scratch1);
- __ RetOnNoOverflow(scratch1);
- // No need to revert anything - right and left are intact.
- break;
- case Token::SUB:
- __ SubuAndCheckForOverflow(v0, left, right, scratch1);
- __ RetOnNoOverflow(scratch1);
- // No need to revert anything - right and left are intact.
- break;
- case Token::MUL: {
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(scratch1, right);
- // Do multiplication.
- // lo = lower 32 bits of scratch1 * left.
- // hi = higher 32 bits of scratch1 * left.
- __ Mult(left, scratch1);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mflo(scratch1);
- __ mfhi(scratch2);
- __ sra(scratch1, scratch1, 31);
- __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
- // Go slow on zero result to handle -0.
- __ mflo(v0);
- __ Ret(ne, v0, Operand(zero_reg));
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ Addu(scratch2, right, left);
- Label skip;
- // ARM uses the 'pl' condition, which is 'ge'.
- // Negating it results in 'lt'.
- __ Branch(&skip, lt, scratch2, Operand(zero_reg));
- ASSERT(Smi::FromInt(0) == 0);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
- __ bind(&skip);
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- }
- break;
- case Token::DIV: {
- Label done;
- __ SmiUntag(scratch2, right);
- __ SmiUntag(scratch1, left);
- __ Div(scratch1, scratch2);
- // A minor optimization: div may be calculated asynchronously, so we check
- // for division by zero before getting the result.
- __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
- // If the result is 0, we need to make sure the dividsor (right) is
- // positive, otherwise it is a -0 case.
- // Quotient is in 'lo', remainder is in 'hi'.
- // Check for no remainder first.
- __ mfhi(scratch1);
- __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
- __ mflo(scratch1);
- __ Branch(&done, ne, scratch1, Operand(zero_reg));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ bind(&done);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, scratch1);
- }
- break;
- case Token::MOD: {
- Label done;
- __ SmiUntag(scratch2, right);
- __ SmiUntag(scratch1, left);
- __ Div(scratch1, scratch2);
- // A minor optimization: div may be calculated asynchronously, so we check
- // for division by 0 before calling mfhi.
- // Check for zero on the right hand side.
- __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
- // If the result is 0, we need to make sure the dividend (left) is
- // positive (or 0), otherwise it is a -0 case.
- // Remainder is in 'hi'.
- __ mfhi(scratch2);
- __ Branch(&done, ne, scratch2, Operand(zero_reg));
- __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
- __ bind(&done);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch1, scratch2, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, scratch2);
- }
- break;
- case Token::BIT_OR:
- __ Ret(USE_DELAY_SLOT);
- __ or_(v0, left, right);
- break;
- case Token::BIT_AND:
- __ Ret(USE_DELAY_SLOT);
- __ and_(v0, left, right);
- break;
- case Token::BIT_XOR:
- __ Ret(USE_DELAY_SLOT);
- __ xor_(v0, left, right);
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ srav(scratch1, left, scratch1);
- // Smi tag result.
- __ And(v0, scratch1, ~kSmiTagMask);
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ srlv(v0, scratch1, scratch2);
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ And(scratch1, v0, Operand(0xc0000000));
- __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
- // Smi tag result.
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0);
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ sllv(scratch1, scratch1, scratch2);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT);
- __ SmiTag(v0, scratch1); // SmiTag emits one instruction in delay slot.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&not_smi_result);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode) {
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
- Register scratch2 = t5;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands) {
- __ AssertSmi(left);
- __ AssertSmi(right);
- }
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
-
- Register heap_number_map = t2;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Allocate new heap number for result.
- Register result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
-
- // Load left and right operands into f12 and f14.
- if (smi_operands) {
- __ SmiUntag(scratch1, a0);
- __ mtc1(scratch1, f14);
- __ cvt_d_w(f14, f14);
- __ SmiUntag(scratch1, a1);
- __ mtc1(scratch1, f12);
- __ cvt_d_w(f12, f12);
- } else {
- // Load right operand to f14.
- if (right_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- right, f14, heap_number_map, scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(right, f14, heap_number_map, scratch1, fail);
- }
- // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
- // jumps to |miss|.
- if (left_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- left, f12, heap_number_map, scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(left, f12, heap_number_map, scratch1, fail);
- }
- }
-
- // Calculate the result.
- if (op != Token::MOD) {
- // Using FPU registers:
- // f12: Left value.
- // f14: Right value.
- switch (op) {
- case Token::ADD:
- __ add_d(f10, f12, f14);
- break;
- case Token::SUB:
- __ sub_d(f10, f12, f14);
- break;
- case Token::MUL:
- __ mul_d(f10, f12, f14);
- break;
- case Token::DIV:
- __ div_d(f10, f12, f14);
- break;
- default:
- UNREACHABLE();
- }
-
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, result);
- } else {
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op, result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(a3, left);
- __ SmiUntag(a2, right);
- } else {
- // Convert operands to 32-bit integers. Right in a2 and left in a3.
- __ TruncateNumberToI(left, a3, heap_number_map, scratch1, not_numbers);
- __ TruncateNumberToI(right, a2, heap_number_map, scratch1, not_numbers);
- }
- Label result_not_a_smi;
- switch (op) {
- case Token::BIT_OR:
- __ Or(a2, a3, Operand(a2));
- break;
- case Token::BIT_XOR:
- __ Xor(a2, a3, Operand(a2));
- break;
- case Token::BIT_AND:
- __ And(a2, a3, Operand(a2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ srav(a2, a3, a2);
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ srlv(a2, a3, a2);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ sllv(a2, a3, a2);
- break;
- default:
- UNREACHABLE();
- }
- // Check that the *signed* result fits in a smi.
- __ Addu(a3, a2, Operand(0x40000000));
- __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, a2);
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = t1;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
- }
-
- // a2: Answer as signed int32.
- // t1: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to v0, which is the
- // result.
- __ mov(v0, t1);
- // Convert the int32 in a2 to the heap number in a0. As
- // mentioned above SHR needs to always produce a positive result.
- __ mtc1(a2, f0);
- if (op == Token::SHR) {
- __ Cvt_d_uw(f0, f0, f22);
- } else {
- __ cvt_d_w(f0, f0);
- }
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
- Label not_smis;
-
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
-
- // Perform combined smi check on both operands.
- __ Or(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(scratch1, &not_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, &not_smis, op, mode);
- }
- __ bind(&not_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ Branch(&right_arg_changed,
- ne,
- a0,
- Operand(Smi::FromInt(fixed_right_arg_value())));
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = a1;
- Register right = a0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ GetObjectType(left, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ GetObjectType(right, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
- Register scratch2 = t5;
- FPURegister double_scratch = f0;
- FPURegister single_scratch = f6;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = t2;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ Or(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers a0 and a1 (right
- // and left) are preserved for the runtime call.
-
- __ LoadNumberAsInt32Double(
- right, f14, heap_number_map, scratch1, scratch2, f2, &transition);
- __ LoadNumberAsInt32Double(
- left, f12, heap_number_map, scratch1, scratch2, f2, &transition);
-
- if (op_ != Token::MOD) {
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ add_d(f10, f12, f14);
- break;
- case Token::SUB:
- __ sub_d(f10, f12, f14);
- break;
- case Token::MUL:
- __ mul_d(f10, f12, f14);
- break;
- case Token::DIV:
- __ div_d(f10, f12, f14);
- break;
- default:
- UNREACHABLE();
- }
-
- if (result_type_ <= BinaryOpIC::INT32) {
- Register except_flag = scratch2;
- const FPURoundingMode kRoundingMode = op_ == Token::DIV ?
- kRoundToMinusInf : kRoundToZero;
- const CheckForInexactConversion kConversion = op_ == Token::DIV ?
- kCheckForInexactConversion : kDontCheckForInexactConversion;
- __ EmitFPUTruncate(kRoundingMode,
- scratch1,
- f10,
- at,
- f16,
- except_flag,
- kConversion);
- // If except_flag != 0, result does not fit in a 32-bit integer.
- __ Branch(&transition, ne, except_flag, Operand(zero_reg));
- // Try to tag the result as a Smi, return heap number on overflow.
- __ SmiTagCheckOverflow(scratch1, scratch1, scratch2);
- __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
- // Check for minus zero, transition in that case (because we need
- // to return a heap number).
- Label not_zero;
- ASSERT(kSmiTag == 0);
- __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
- __ mfc1(scratch2, f11);
- __ And(scratch2, scratch2, HeapNumber::kSignMask);
- __ Branch(&transition, ne, scratch2, Operand(zero_reg));
- __ bind(&not_zero);
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, scratch1);
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- // We are using FPU registers so s0 is available.
- heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
- __ sdc1(f10,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- if (encoded_right_arg_.has_value) {
- __ Move(f16, fixed_right_arg_value());
- __ BranchF(&transition, NULL, ne, f14, f16);
- }
-
- Label pop_and_call_runtime;
-
- // Allocate a heap number to store the result.
- heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime,
- mode_);
-
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
- __ Branch(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- // Convert operands to 32-bit integers. Right in a2 and left in a3. The
- // registers a0 and a1 (right and left) are preserved for the runtime
- // call.
- __ LoadNumberAsInt32(
- left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition);
- __ LoadNumberAsInt32(
- right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ Or(a2, a3, Operand(a2));
- break;
- case Token::BIT_XOR:
- __ Xor(a2, a3, Operand(a2));
- break;
- case Token::BIT_AND:
- __ And(a2, a3, Operand(a2));
- break;
- case Token::SAR:
- __ And(a2, a2, Operand(0x1f));
- __ srav(a2, a3, a2);
- break;
- case Token::SHR:
- __ And(a2, a2, Operand(0x1f));
- __ srlv(a2, a3, a2);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (a2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- __ Branch((result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number,
- lt,
- a2,
- Operand(zero_reg));
- break;
- case Token::SHL:
- __ And(a2, a2, Operand(0x1f));
- __ sllv(a2, a3, a2);
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi.
- __ Addu(scratch1, a2, Operand(0x40000000));
- // If not try to return a heap number. (We know the result is an int32.)
- __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
- // Tag the result and return.
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, a2);
-
- __ bind(&return_heap_number);
- heap_number_result = t1;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
-
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ mtc1(a2, double_scratch);
- __ cvt_d_w(double_scratch, double_scratch);
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ mtc1(a2, double_scratch);
- __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
- }
-
- // Store the result.
- __ sdc1(double_scratch,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&check, ne, a1, Operand(t0));
- if (Token::IsBitOp(op_)) {
- __ li(a1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(a1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&done, ne, a0, Operand(t0));
- if (Token::IsBitOp(op_)) {
- __ li(a0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(a0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
-
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = a1;
- Register right = a0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ GetObjectType(left, a2, a2);
- __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ GetObjectType(right, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(a0) && !result.is(a1));
-
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ Branch(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, overwritable_operand);
- __ bind(&allocated);
- } else {
- ASSERT(mode == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(a1, a0);
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
}
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in f4, double result goes
// into f4.
@@ -2723,20 +1728,14 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated(Isolate* isolate) {
- return (!save_doubles_ || isolate->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
}
@@ -2755,16 +1754,13 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
store_buffer_overflow_code = *stub.GetCode(isolate);
}
- save_doubles_code->set_is_pregenerated(true);
- store_buffer_overflow_code->set_is_pregenerated(true);
isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
+ stub.GetCode(isolate);
}
@@ -2795,8 +1791,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (do_gc) {
// Move result passed in v0 into a0 to call PerformGC.
__ mov(a0, v0);
- __ PrepareCallCFunction(1, 0, a1);
- __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
+ __ PrepareCallCFunction(2, 0, a1);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
}
ExternalReference scope_depth =
@@ -2875,7 +1872,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(save_doubles_, s0, true);
+ __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
// Check if we should retry or throw exception.
Label retry;
@@ -3408,8 +2405,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
receiver = a0;
}
- StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
@@ -3942,7 +2938,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the RegExp has been compiled (data contains a fixed array).
__ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
- __ And(t0, regexp_data, Operand(kSmiTagMask));
+ __ SmiTst(regexp_data, t0);
__ Check(nz,
kUnexpectedTypeForRegExpDataFixedArrayExpected,
t0,
@@ -4156,7 +3152,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
DirectCEntryStub stub;
stub.GenerateCall(masm, t9);
- __ LeaveExitFrame(false, no_reg);
+ __ LeaveExitFrame(false, no_reg, true);
// v0: result
// subject: subject string (callee saved)
@@ -4424,6 +3420,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // a0 : number of arguments to the construct function
// a1 : the function to call
// a2 : cache cell for call target
Label initialize, done, miss, megamorphic, not_array_function;
@@ -4444,9 +3441,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in a3.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
__ lw(t1, FieldMemOperand(a3, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&miss, ne, t1, Operand(at));
@@ -4485,6 +3479,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
1 << 5 | // a1
1 << 6; // a2
+ // Arguments register must be smi-tagged to call out.
__ SmiTag(a0);
__ MultiPush(kSavedRegs);
@@ -5803,33 +4798,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm);
- // Build a frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ CallRuntime(Runtime::kStringAdd, 2);
- }
- __ Ret();
- } else {
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
- }
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm);
- // Build a frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
- }
- __ Ret();
- } else {
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
@@ -5863,13 +4836,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
// Check the number to string cache.
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
__ mov(arg, scratch1);
__ sw(arg, MemOperand(sp, stack_offset));
__ bind(&done);
@@ -6206,8 +5173,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
- __ push(ra);
- __ Push(a1, a0);
+ __ Push(ra, a1, a0);
__ li(t0, Operand(Smi::FromInt(op_)));
__ addiu(sp, sp, -kPointerSize);
__ CallExternalReference(miss, 3, USE_DELAY_SLOT);
@@ -6222,9 +5188,16 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // No need to pop or drop anything, LeaveExitFrame will restore the old
- // stack, thus dropping the allocated space for the return value.
- // The saved ra is after the reserved stack space for the 4 args.
+ // Make place for arguments to fit C calling convention. Most of the callers
+ // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
+ // so they handle stack restoring and we don't have to do that here.
+ // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
+ // kCArgsSlotsSize stack space after the call.
+ __ Subu(sp, sp, Operand(kCArgsSlotsSize));
+ // Place the return address on the stack, making the call
+ // GC safe. The RegExp backend also relies on this.
+ __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
+ __ Call(t9); // Call the C++ function.
__ lw(t9, MemOperand(sp, kCArgsSlotsSize));
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
@@ -6241,33 +5214,11 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
- __ Move(t9, target);
- __ AssertStackIsAligned();
- // Allocate space for arg slots.
- __ Subu(sp, sp, kCArgsSlotsSize);
-
- // Block the trampoline pool through the whole function to make sure the
- // number of generated instructions is constant.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
-
- // We need to get the current 'pc' value, which is not available on MIPS.
- Label find_ra;
- masm->bal(&find_ra); // ra = pc + 8.
- masm->nop(); // Branch delay slot nop.
- masm->bind(&find_ra);
-
- const int kNumInstructionsToJump = 6;
- masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
- // Push return address (accessible to GC through exit frame pc).
- // This spot for ra was reserved in EnterExitFrame.
- masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
intptr_t loc =
reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
- masm->li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
- // Call the function.
- masm->Jump(t9);
- // Make sure the stored 'ra' points to this position.
- ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
+ __ Move(t9, target);
+ __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
+ __ Call(ra);
}
@@ -6524,89 +5475,13 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
- { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
- { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
- { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
- { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
- { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
- // StoreArrayLiteralElementStub::Generate
- { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
- // FastNewClosureStub::Generate
- { REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
- // StringAddStub::Generate
- { REG(t3), REG(a1), REG(t0), EMIT_REMEMBERED_SET },
- { REG(t3), REG(a0), REG(t0), EMIT_REMEMBERED_SET },
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-
-bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
// Hydrogen code stubs need stub2 at snapshot time.
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
+ stub2.GetCode(isolate);
}
@@ -6896,9 +5771,26 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ mov(a1, v0);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ lw(a0, MemOperand(fp, parameter_count_offset));
+ // The parameter count above includes the receiver for the arguments passed to
+ // the deoptimization handler. Subtract the receiver for the parameter count
+ // for the call.
+ __ Subu(a0, a0, 1);
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ ParameterCount argument_count(a0);
+ __ InvokeFunction(
+ a1, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- AllowStubCallsScope allow_stub_calls(masm, true);
ProfileEntryHookStub stub;
__ push(ra);
__ CallStub(&stub);
@@ -7047,11 +5939,14 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
}
- // Save the resulting elements kind in type info
- __ SmiTag(a3);
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
- __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
- __ SmiUntag(a3);
+ // Save the resulting elements kind in type info. We can't just store a3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ lw(t0, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
+ __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
+ __ sw(t0, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
+
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -7083,12 +5978,12 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
(!FLAG_track_allocation_sites &&
(kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
}
}
}
@@ -7110,11 +6005,11 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things.
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ stubh1.GetCode(isolate);
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ stubh2.GetCode(isolate);
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ stubh3.GetCode(isolate);
}
}
@@ -7161,7 +6056,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ And(at, a3, Operand(kSmiTagMask));
+ __ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
@@ -7193,6 +6088,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
@@ -7248,7 +6145,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ And(at, a3, Operand(kSmiTagMask));
+ __ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
diff --git a/chromium/v8/src/mips/code-stubs-mips.h b/chromium/v8/src/mips/code-stubs-mips.h
index 8c9d22ae5dd..c3e05b8a2d4 100644
--- a/chromium/v8/src/mips/code-stubs-mips.h
+++ b/chromium/v8/src/mips/code-stubs-mips.h
@@ -69,7 +69,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -240,7 +239,6 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
ASSERT(SignRegisterBits::is_valid(sign_.code()));
}
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
@@ -268,31 +266,6 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
@@ -316,8 +289,6 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
@@ -480,22 +451,6 @@ class RecordWriteStub: public PlatformCodeStub {
};
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM and MIPS.
-class RegExpCEntryStub: public PlatformCodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
diff --git a/chromium/v8/src/mips/codegen-mips.cc b/chromium/v8/src/mips/codegen-mips.cc
index 5c847fc8f62..3a87c5af886 100644
--- a/chromium/v8/src/mips/codegen-mips.cc
+++ b/chromium/v8/src/mips/codegen-mips.cc
@@ -156,8 +156,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
- masm->TestJSArrayForAllocationMemento(a2, t0, eq,
- allocation_memento_found);
+ __ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found);
}
// Set transitioned map.
@@ -188,7 +187,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Register scratch = t6;
if (mode == TRACK_ALLOCATION_SITE) {
- masm->TestJSArrayForAllocationMemento(a2, t0, eq, fail);
+ __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -316,7 +315,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label entry, loop, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- masm->TestJSArrayForAllocationMemento(a2, t0, eq, fail);
+ __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -540,52 +539,67 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
ASSERT(!temp2.is(temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
- Label done;
+ Label zero, infinity, done;
__ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
__ ldc1(double_scratch1, ExpConstant(0, temp3));
- __ Move(result, kDoubleRegZero);
- __ BranchF(&done, NULL, ge, double_scratch1, input);
+ __ BranchF(&zero, NULL, ge, double_scratch1, input);
+
__ ldc1(double_scratch2, ExpConstant(1, temp3));
- __ ldc1(result, ExpConstant(2, temp3));
- __ BranchF(&done, NULL, ge, input, double_scratch2);
+ __ BranchF(&infinity, NULL, ge, input, double_scratch2);
+
__ ldc1(double_scratch1, ExpConstant(3, temp3));
__ ldc1(result, ExpConstant(4, temp3));
__ mul_d(double_scratch1, double_scratch1, input);
__ add_d(double_scratch1, double_scratch1, result);
- __ Move(temp2, temp1, double_scratch1);
+ __ FmoveLow(temp2, double_scratch1);
__ sub_d(double_scratch1, double_scratch1, result);
__ ldc1(result, ExpConstant(6, temp3));
__ ldc1(double_scratch2, ExpConstant(5, temp3));
__ mul_d(double_scratch1, double_scratch1, double_scratch2);
__ sub_d(double_scratch1, double_scratch1, input);
__ sub_d(result, result, double_scratch1);
- __ mul_d(input, double_scratch1, double_scratch1);
- __ mul_d(result, result, input);
- __ srl(temp1, temp2, 11);
+ __ mul_d(double_scratch2, double_scratch1, double_scratch1);
+ __ mul_d(result, result, double_scratch2);
__ ldc1(double_scratch2, ExpConstant(7, temp3));
__ mul_d(result, result, double_scratch2);
__ sub_d(result, result, double_scratch1);
- __ ldc1(double_scratch2, ExpConstant(8, temp3));
+ // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
+ ASSERT(*reinterpret_cast<double*>
+ (ExternalReference::math_exp_constants(8).address()) == 1);
+ __ Move(double_scratch2, 1);
__ add_d(result, result, double_scratch2);
- __ li(at, 0x7ff);
- __ And(temp2, temp2, at);
+ __ srl(temp1, temp2, 11);
+ __ Ext(temp2, temp2, 0, 11);
__ Addu(temp1, temp1, Operand(0x3ff));
- __ sll(temp1, temp1, 20);
// Must not call ExpConstant() after overwriting temp3!
__ li(temp3, Operand(ExternalReference::math_exp_log_table()));
__ sll(at, temp2, 3);
- __ addu(at, at, temp3);
- __ lw(at, MemOperand(at));
- __ Addu(temp3, temp3, Operand(kPointerSize));
- __ sll(temp2, temp2, 3);
- __ addu(temp2, temp2, temp3);
- __ lw(temp2, MemOperand(temp2));
- __ Or(temp1, temp1, temp2);
- __ Move(input, at, temp1);
- __ mul_d(result, result, input);
+ __ Addu(temp3, temp3, Operand(at));
+ __ lw(temp2, MemOperand(temp3, 0));
+ __ lw(temp3, MemOperand(temp3, kPointerSize));
+ // The first word is loaded is the lower number register.
+ if (temp2.code() < temp3.code()) {
+ __ sll(at, temp1, 20);
+ __ Or(temp1, temp3, at);
+ __ Move(double_scratch1, temp2, temp1);
+ } else {
+ __ sll(at, temp1, 20);
+ __ Or(temp1, temp2, at);
+ __ Move(double_scratch1, temp3, temp1);
+ }
+ __ mul_d(result, result, double_scratch1);
+ __ Branch(&done);
+
+ __ bind(&zero);
+ __ Move(result, kDoubleRegZero);
+ __ Branch(&done);
+
+ __ bind(&infinity);
+ __ ldc1(result, ExpConstant(2, temp3));
+
__ bind(&done);
}
@@ -604,7 +618,8 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
patcher.masm()->Push(ra, fp, cp, a1);
patcher.masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
+ patcher.masm()->Addu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
initialized = true;
}
return byte_sequence;
@@ -624,39 +639,41 @@ bool Code::IsYoungSequence(byte* sequence) {
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
- *age = kNoAge;
+ *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
- Address target_address = Memory::Address_at(
- sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
+ Address target_address = Assembler::target_address_at(
+ sequence + Assembler::kInstrSize);
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
+ if (age == kNoAgeCodeAge) {
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
- // Mark this code sequence for FindPlatformCodeAgeSequence()
+ // Mark this code sequence for FindPlatformCodeAgeSequence().
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
- // Save the function's original return address
- // (it will be clobbered by Call(t9))
- patcher.masm()->mov(at, ra);
- // Load the stub address to t9 and call it
- patcher.masm()->li(t9,
- Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
- patcher.masm()->Call(t9);
- // Record the stub address in the empty space for GetCodeAgeAndParity()
- patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ // Load the stub address to t9 and call it,
+ // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ patcher.masm()->li(
+ t9,
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
+ CONSTANT_SIZE);
+ patcher.masm()->nop(); // Prevent jalr to jal optimization.
+ patcher.masm()->jalr(t9, a0);
+ patcher.masm()->nop(); // Branch delay slot nop.
+ patcher.masm()->nop(); // Pad the empty space.
}
}
diff --git a/chromium/v8/src/mips/codegen-mips.h b/chromium/v8/src/mips/codegen-mips.h
index 32d7d0d65c7..822b94ad799 100644
--- a/chromium/v8/src/mips/codegen-mips.h
+++ b/chromium/v8/src/mips/codegen-mips.h
@@ -99,6 +99,7 @@ class StringCharLoadGenerator : public AllStatic {
class MathExpGenerator : public AllStatic {
public:
+ // Register input isn't modified. All other registers are clobbered.
static void EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
diff --git a/chromium/v8/src/mips/deoptimizer-mips.cc b/chromium/v8/src/mips/deoptimizer-mips.cc
index 16f75b86326..0662b17366b 100644
--- a/chromium/v8/src/mips/deoptimizer-mips.cc
+++ b/chromium/v8/src/mips/deoptimizer-mips.cc
@@ -78,88 +78,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-// This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
-// The back edge bookkeeping code matches the pattern:
-//
-// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
-// beq at, zero_reg, ok
-// lui t9, <interrupt stub address> upper
-// ori t9, <interrupt stub address> lower
-// jalr t9
-// nop
-// ok-label ----- pc_after points here
-//
-// We patch the code to the following form:
-//
-// addiu at, zero_reg, 1
-// beq at, zero_reg, ok ;; Not changed
-// lui t9, <on-stack replacement address> upper
-// ori t9, <on-stack replacement address> lower
-// jalr t9 ;; Not changed
-// nop ;; Not changed
-// ok-label ----- pc_after points here
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- static const int kInstrSize = Assembler::kInstrSize;
- // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
- CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- patcher.masm()->addiu(at, zero_reg, 1);
- // Replace the stack check address in the load-immediate (lui/ori pair)
- // with the entry address of the replacement code.
- Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- static const int kInstrSize = Assembler::kInstrSize;
- // Restore the sltu instruction so beq can be taken again.
- CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- patcher.masm()->slt(at, a3, zero_reg);
- // Restore the original call address.
- Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
- interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 4 * kInstrSize, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- static const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
- if (Assembler::IsAddImmediate(
- Assembler::instr_at(pc_after - 6 * kInstrSize))) {
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(osr_builtin->entry()));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(interrupt_builtin->entry()));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -186,10 +104,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
+ int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(s0.code(), params);
output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
output_frame->SetRegister(s2.code(), handler);
@@ -210,6 +125,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
+}
+
+
#define __ masm()->
diff --git a/chromium/v8/src/mips/frames-mips.h b/chromium/v8/src/mips/frames-mips.h
index 437bf3a9f13..55951b58c47 100644
--- a/chromium/v8/src/mips/frames-mips.h
+++ b/chromium/v8/src/mips/frames-mips.h
@@ -154,7 +154,8 @@ const int kSafepointRegisterStackIndexMap[kNumRegs] = {
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = -3 * kPointerSize;
+ static const int kCallerFPOffset =
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
diff --git a/chromium/v8/src/mips/full-codegen-mips.cc b/chromium/v8/src/mips/full-codegen-mips.cc
index df3f4170b1a..3ce2ab5f19c 100644
--- a/chromium/v8/src/mips/full-codegen-mips.cc
+++ b/chromium/v8/src/mips/full-codegen-mips.cc
@@ -171,12 +171,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- // The following three instructions must remain together and unmodified for
- // code aging to work properly.
- __ Push(ra, fp, cp, a1);
- __ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- // Adjust fp to point to caller's fp.
- __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -185,8 +180,20 @@ void FullCodeGenerator::Generate() {
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(at);
+ // Emit a loop to initialize stack cells for locals when optimizing for
+ // size. Otherwise, unroll the loop for maximum performance.
+ __ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
+ if (FLAG_optimize_for_size && locals_count > 4) {
+ Label loop;
+ __ li(a2, Operand(locals_count));
+ __ bind(&loop);
+ __ Subu(a2, a2, 1);
+ __ push(t5);
+ __ Branch(&loop, gt, a2, Operand(zero_reg));
+ } else {
+ for (int i = 0; i < locals_count; i++) {
+ __ push(t5);
+ }
}
}
}
@@ -624,6 +631,7 @@ void FullCodeGenerator::StackValueContext::Plug(
Label done;
__ bind(materialize_true);
__ LoadRoot(at, Heap::kTrueValueRootIndex);
+ // Push the value as the following branch can clobber at in long branch mode.
__ push(at);
__ Branch(&done);
__ bind(materialize_false);
@@ -1167,7 +1175,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(a1, cell);
+ __ li(a1, cell);
__ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ sw(a2, FieldMemOperand(a1, Cell::kValueOffset));
@@ -1610,9 +1618,8 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ jmp(&allocated);
__ bind(&runtime_allocate);
- __ push(t1);
__ li(a0, Operand(Smi::FromInt(size)));
- __ push(a0);
+ __ Push(t1, a0);
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ pop(t1);
@@ -1639,6 +1646,8 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
@@ -1653,13 +1662,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ li(a0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -1774,6 +1781,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
@@ -1786,6 +1798,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
__ mov(a0, result_register());
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
@@ -1795,29 +1815,24 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
1, a1, a2);
- } else if (expr->depth() > 1) {
- __ Push(a3, a2, a1);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ Push(a3, a2, a1);
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ li(a0, Operand(Smi::FromInt(flags)));
+ __ Push(a3, a2, a1, a0);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
if (has_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
@@ -2057,8 +2072,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
__ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ push(a3); // iter
- __ push(a0); // exception
+ __ Push(a3, a0); // iter, exception
__ jmp(&l_call);
// try { received = %yield result }
@@ -2096,8 +2110,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_next);
__ LoadRoot(a2, Heap::knext_stringRootIndex); // "next"
__ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ push(a3); // iter
- __ push(a0); // received
+ __ Push(a3, a0); // iter, received
// result = receiver[f](arg);
__ bind(&l_call);
@@ -2173,11 +2186,13 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Call(&resume_frame);
__ jmp(&done);
__ bind(&resume_frame);
- __ push(ra); // Return address.
- __ push(fp); // Caller's frame pointer.
- __ mov(fp, sp);
- __ push(cp); // Callee's context.
- __ push(t0); // Callee's JS Function.
+ // ra = return address.
+ // fp = caller's frame pointer.
+ // cp = callee's context,
+ // t0 = callee's JS function.
+ __ Push(ra, fp, cp, t0);
+ // Adjust FP to point to saved FP.
+ __ Addu(fp, sp, 2 * kPointerSize);
// Load the operand stack size.
__ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
@@ -2208,8 +2223,8 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(a2);
__ Branch(&push_operand_holes);
__ bind(&call_resume);
- __ push(a1);
- __ push(result_register());
+ ASSERT(!result_register().is(a1));
+ __ Push(a1, result_register());
__ Push(Smi::FromInt(resume_mode));
__ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
@@ -2304,7 +2319,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2313,7 +2328,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&smi_case);
// Smi case. This code works the same way as the smi-smi case in the type
// recording binary operation stub, see
- // BinaryOpStub::GenerateSmiSmiOperation for comments.
switch (op) {
case Token::SAR:
__ Branch(&stub_call);
@@ -2387,7 +2401,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
OverwriteMode mode) {
__ mov(a0, result_register());
__ pop(a1);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
@@ -2439,8 +2453,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(a1, result_register());
- __ pop(a2);
- __ pop(a0); // Restore value.
+ __ Pop(a0, a2); // a0 = restored value.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
@@ -2582,8 +2595,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// - a1 is the key,
// - a2 is the receiver.
__ mov(a0, result_register());
- __ pop(a1); // Key.
- __ pop(a2);
+ __ Pop(a2, a1); // a1 = key.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
@@ -2711,27 +2723,25 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
+ // t2: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+ __ lw(t2, MemOperand(sp, arg_count * kPointerSize));
} else {
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
}
- __ push(a1);
- // Push the receiver of the enclosing function.
+ // t1: the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
- __ lw(a1, MemOperand(fp, receiver_offset * kPointerSize));
- __ push(a1);
- // Push the language mode.
- __ li(a1, Operand(Smi::FromInt(language_mode())));
- __ push(a1);
+ __ lw(t1, MemOperand(fp, receiver_offset * kPointerSize));
+
+ // t0: the language mode.
+ __ li(t0, Operand(Smi::FromInt(language_mode())));
- // Push the start position of the scope the calls resides in.
+ // a1: the start position of the scope the calls resides in.
__ li(a1, Operand(Smi::FromInt(scope()->start_position())));
- __ push(a1);
// Do the runtime call.
+ __ Push(t2, t1, t0, a1);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
}
@@ -2804,9 +2814,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in v0)
// and the object holding it (returned in v1).
- __ push(context_register());
+ ASSERT(!context_register().is(a2));
__ li(a2, Operand(proxy->name()));
- __ push(a2);
+ __ Push(context_register(), a2);
__ CallRuntime(Runtime::kLoadContextSlot, 2);
__ Push(v0, v1); // Function, receiver.
@@ -2915,7 +2925,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ And(t0, v0, Operand(kSmiTagMask));
+ __ SmiTst(v0, t0);
Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2936,7 +2946,7 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
+ __ NonNegativeSmiTst(v0, at);
Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -3132,6 +3142,36 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+ __ lw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ lw(a1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ li(t0, 0x80000000);
+ Label not_nan;
+ __ Branch(&not_nan, ne, a2, Operand(t0));
+ __ mov(t0, zero_reg);
+ __ mov(a2, a1);
+ __ bind(&not_nan);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a2, Operand(t0), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -3349,48 +3389,6 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- // Save the new heap number in callee-saved register s0, since
- // we call out to external C code below.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(s0, a1, a2, t6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
-
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(s0, v0); // Save result in s0, so it is saved thru CFunc call.
-
- __ bind(&heapnumber_allocated);
-
- // Convert 32 random bits in v0 to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ PrepareCallCFunction(1, a0);
- __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- __ li(a1, Operand(0x41300000));
- // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
- __ Move(f12, v0, a1);
- // Move 0x4130000000000000 to FPU.
- __ Move(f14, zero_reg, a1);
- // Subtract and store the result in the heap number.
- __ sub_d(f0, f12, f14);
- __ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
- __ mov(v0, s0);
-
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
@@ -3485,29 +3483,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask) {
- __ And(at, index, Operand(kSmiTagMask));
- __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
- __ And(at, value, Operand(kSmiTagMask));
- __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
-
- __ lw(at, FieldMemOperand(string, String::kLengthOffset));
- __ Check(lt, kIndexIsTooLarge, index, Operand(at));
-
- __ Check(ge, kIndexIsNegative, index, Operand(zero_reg));
-
- __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
-
- __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
- __ Subu(at, at, Operand(encoding_mask));
- __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
@@ -3518,13 +3493,20 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(value);
- __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(index, value);
if (FLAG_debug_code) {
+ __ SmiTst(value, at);
+ __ ThrowIf(ne, kNonSmiValue, at, Operand(zero_reg));
+ __ SmiTst(index, at);
+ __ ThrowIf(ne, kNonSmiIndex, at, Operand(zero_reg));
+ __ SmiUntag(index, index);
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ Register scratch = t5;
+ __ EmitSeqStringSetCharCheck(
+ string, index, value, scratch, one_byte_seq_type);
+ __ SmiTag(index, index);
}
__ SmiUntag(value, value);
@@ -3548,13 +3530,20 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
- __ pop(value);
- __ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
+ __ Pop(index, value);
if (FLAG_debug_code) {
+ __ SmiTst(value, at);
+ __ ThrowIf(ne, kNonSmiValue, at, Operand(zero_reg));
+ __ SmiTst(index, at);
+ __ ThrowIf(ne, kNonSmiIndex, at, Operand(zero_reg));
+ __ SmiUntag(index, index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ Register scratch = t5;
+ __ EmitSeqStringSetCharCheck(
+ string, index, value, scratch, two_byte_seq_type);
+ __ SmiTag(index, index);
}
__ SmiUntag(value, value);
@@ -3613,8 +3602,9 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into a0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(a0, result_register());
NumberToStringStub stub;
__ CallStub(&stub);
@@ -3740,11 +3730,21 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
+ if (FLAG_new_string_add) {
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
- __ CallStub(&stub);
+ __ pop(a1);
+ __ mov(a0, result_register()); // NewStringAddStub requires args in a0, a1.
+ NewStringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+ } else {
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
+ __ CallStub(&stub);
+ }
context()->Plug(v0);
}
@@ -3762,45 +3762,6 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -4309,9 +4270,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ push(context_register());
+ ASSERT(!context_register().is(a2));
__ li(a2, Operand(var->name()));
- __ push(a2);
+ __ Push(context_register(), a2);
__ CallRuntime(Runtime::kDeleteContextSlot, 2);
context()->Plug(v0);
}
@@ -4442,15 +4403,48 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ __ mov(a0, v0);
if (ShouldInlineSmiCase(expr->op())) {
- __ JumpIfSmi(v0, &no_conversion);
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(v0, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(v0);
+ break;
+ case NAMED_PROPERTY:
+ __ sw(v0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ sw(v0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ Register scratch1 = a1;
+ Register scratch2 = t0;
+ __ li(scratch1, Operand(Smi::FromInt(count_value)));
+ __ AdduAndCheckForOverflow(v0, v0, scratch1, scratch2);
+ __ BranchOnNoOverflow(&done, scratch2);
+ // Call stub. Undo operation first.
+ __ Move(v0, a0);
+ __ jmp(&stub_call);
+ __ bind(&slow);
}
- __ mov(a0, v0);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4471,30 +4465,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
}
- __ mov(a0, result_register());
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- __ li(a1, Operand(Smi::FromInt(count_value)));
- __ AdduAndCheckForOverflow(v0, a0, a1, t0);
- __ BranchOnOverflow(&stub_call, t0); // Do stub on overflow.
-
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(v0, &done);
- __ bind(&stub_call);
- }
- __ mov(a1, a0);
+ __ bind(&stub_call);
+ __ mov(a1, v0);
__ li(a0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
- BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+ BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
CallIC(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
expr->CountBinOpFeedbackId());
@@ -4543,8 +4522,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_PROPERTY: {
__ mov(a0, result_register()); // Value.
- __ pop(a1); // Key.
- __ pop(a2); // Receiver.
+ __ Pop(a2, a1); // a1 = key, a2 = receiver.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
@@ -4926,6 +4904,83 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ Address branch_address = pc - 6 * kInstrSize;
+ CodePatcher patcher(branch_address, 1);
+
+ switch (target_state) {
+ case INTERRUPT:
+ // slt at, a3, zero_reg (in case of count based interrupts)
+ // beq at, zero_reg, ok
+ // lui t9, <interrupt stub address> upper
+ // ori t9, <interrupt stub address> lower
+ // jalr t9
+ // nop
+ // ok-label ----- pc_after points here
+ patcher.masm()->slt(at, a3, zero_reg);
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // addiu at, zero_reg, 1
+ // beq at, zero_reg, ok ;; Not changed
+ // lui t9, <on-stack replacement address> upper
+ // ori t9, <on-stack replacement address> lower
+ // jalr t9 ;; Not changed
+ // nop ;; Not changed
+ // ok-label ----- pc_after points here
+ patcher.masm()->addiu(at, zero_reg, 1);
+ break;
+ }
+ Address pc_immediate_load_address = pc - 4 * kInstrSize;
+ // Replace the stack check address in the load-immediate (lui/ori pair)
+ // with the entry address of the replacement code.
+ Assembler::set_target_address_at(pc_immediate_load_address,
+ replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_immediate_load_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ Address branch_address = pc - 6 * kInstrSize;
+ Address pc_immediate_load_address = pc - 4 * kInstrSize;
+
+ ASSERT(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize)));
+ if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) {
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_immediate_load_address)) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->InterruptCheck()->entry()));
+ return INTERRUPT;
+ }
+
+ ASSERT(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
+
+ if (reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_immediate_load_address)) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_immediate_load_address)) ==
+ reinterpret_cast<uint32_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry()));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/chromium/v8/src/mips/ic-mips.cc b/chromium/v8/src/mips/ic-mips.cc
index e250e0ee4a5..4c1ddbd5caf 100644
--- a/chromium/v8/src/mips/ic-mips.cc
+++ b/chromium/v8/src/mips/ic-mips.cc
@@ -346,7 +346,7 @@ Object* CallIC_Miss(Arguments args);
void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- a1 : receiver
// -- a2 : name
@@ -448,7 +448,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
void CallICBase::GenerateMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -512,7 +512,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
- Code::ExtraICState extra_ic_state) {
+ ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -578,8 +578,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a2); // Save the key.
- __ Push(a1, a2); // Pass the receiver and the key.
+ __ Push(a2, a1, a2); // Save the key and pass the receiver and the key.
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(a2); // Restore the key.
}
@@ -610,7 +609,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMonomorphicCacheProbe(masm,
argc,
Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
+ kNoExtraICState);
// Fall through on miss.
__ bind(&slow_call);
@@ -656,7 +655,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::HANDLER, MONOMORPHIC, kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
@@ -827,7 +826,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -862,7 +861,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -892,7 +891,7 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
@@ -905,9 +904,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ Push(a1, a0);
// Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
__ TailCallExternalReference(ref, 2, 1);
}
@@ -1132,7 +1130,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -1181,6 +1179,22 @@ static void KeyedStoreGenerateGenericHelper(
__ Branch(fast_double, ne, elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
}
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element.
+ Label holecheck_passed1;
+ __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(address, address, at);
+ __ lw(scratch_value, MemOperand(address));
+ __ Branch(&holecheck_passed1, ne, scratch_value,
+ Operand(masm->isolate()->factory()->the_hole_value()));
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
+ __ bind(&holecheck_passed1);
+
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(value, &non_smi_value);
@@ -1231,6 +1245,21 @@ static void KeyedStoreGenerateGenericHelper(
__ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
__ Branch(slow, ne, elements_map, Operand(at));
}
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ __ Addu(address, elements,
+ Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)
+ - kHeapObjectTag));
+ __ sll(at, key, kPointerSizeLog2);
+ __ addu(address, address, at);
+ __ lw(scratch_value, MemOperand(address));
+ __ Branch(&fast_double_without_map_check, ne, scratch_value,
+ Operand(kHoleNanUpper32));
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
@@ -1324,10 +1353,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
__ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded |
+ 1 << Map::kIsObserved));
__ Branch(&slow, ne, t0, Operand(zero_reg));
// Check if the object is a JS array or not.
__ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -1422,11 +1452,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -1437,10 +1467,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// Push receiver, key and value for runtime call.
__ Push(a2, a1, a0);
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
@@ -1486,7 +1514,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -1496,7 +1524,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, strict_mode,
+ Code::HANDLER, MONOMORPHIC, extra_ic_state,
Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
@@ -1622,12 +1650,10 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
return;
}
-#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
address, andi_instruction_address, delta);
}
-#endif
Address patch_address =
andi_instruction_address - delta * Instruction::kInstrSize;
diff --git a/chromium/v8/src/mips/lithium-codegen-mips.cc b/chromium/v8/src/mips/lithium-codegen-mips.cc
index 4964a242623..3bf0d130829 100644
--- a/chromium/v8/src/mips/lithium-codegen-mips.cc
+++ b/chromium/v8/src/mips/lithium-codegen-mips.cc
@@ -98,21 +98,35 @@ void LChunkBuilder::Abort(BailoutReason reason) {
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
}
@@ -151,21 +165,7 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ Push(ra, fp, cp);
- __ Push(Smi::FromInt(StackFrame::STUB));
- // Adjust FP to point to saved FP.
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- } else {
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ Push(ra, fp, cp, a1);
- // Add unused nop to ensure prologue sequence is identical for
- // full-codegen and lithium-codegen.
- __ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
- // Adj. FP to point to saved FP.
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- }
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
frame_is_built_ = true;
info_->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -192,16 +192,7 @@ bool LCodeGen::GeneratePrologue() {
}
if (info()->saves_caller_doubles()) {
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ SaveCallerDoubles();
}
// Possibly allocate a local context.
@@ -242,6 +233,8 @@ bool LCodeGen::GeneratePrologue() {
// Trace the call.
if (FLAG_trace && info()->IsOptimizing()) {
+ // We have not executed any compiled code yet, so cp still holds the
+ // incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
@@ -263,45 +256,15 @@ void LCodeGen::GenerateOsrPrologue() {
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
-
- // Don't emit code for basic blocks with a replacement.
- if (instr->IsLabel()) {
- emit_instructions = !LLabel::cast(instr)->HasReplacement();
- }
- if (!emit_instructions) continue;
-
- if (FLAG_code_comments && instr->HasInterestingComment(this)) {
- Comment(";;; <@%d,#%d> %s",
- current_instruction_,
- instr->hydrogen_value()->id(),
- instr->Mnemonic());
- }
-
- RecordAndUpdatePosition(instr->position());
-
- instr->CompileToNative(this);
- }
- EnsureSpaceForLazyDeopt();
- last_lazy_deopt_pc_ = masm()->pc_offset();
- return !is_aborted();
-}
-
-
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
- int pos = instructions_->at(code->instruction_index())->position();
- RecordAndUpdatePosition(pos);
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -317,7 +280,7 @@ bool LCodeGen::GenerateDeferredCode() {
__ MultiPush(cp.bit() | fp.bit() | ra.bit());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
Comment(";;; Deferred code");
}
code->Generate();
@@ -358,6 +321,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
}
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
if (deopt_jump_table_[i].needs_frame) {
+ ASSERT(!info()->saves_caller_doubles());
if (needs_frame.is_bound()) {
__ Branch(&needs_frame);
} else {
@@ -369,10 +333,14 @@ bool LCodeGen::GenerateDeoptJumpTable() {
ASSERT(info()->IsStub());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ Call(t9);
}
} else {
+ if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ RestoreCallerDoubles();
+ }
__ Call(t9);
}
}
@@ -426,7 +394,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
ASSERT(r.IsSmiOrTagged());
- __ LoadObject(scratch, literal);
+ __ li(scratch, literal);
}
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
@@ -548,17 +516,36 @@ Operand LCodeGen::ToOperand(LOperand* op) {
}
+static int ArgumentsOffsetWithoutFrame(int index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize;
+}
+
+
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()));
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
}
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
ASSERT(op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ if (NeedsEagerFrame()) {
+ return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return MemOperand(
+ sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+ }
}
@@ -701,10 +688,8 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ Call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
}
@@ -712,20 +697,36 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- ASSERT(pointers != NULL);
- RecordPosition(pointers->position());
- __ CallRuntime(function, num_arguments);
+ __ CallRuntime(function, num_arguments, save_doubles);
+
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
}
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ __ Move(cp, ToRegister(context));
+ } else if (context->IsStackSlot()) {
+ __ lw(cp, ToMemOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr) {
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
@@ -784,13 +785,23 @@ void LCodeGen::DeoptimizeIf(Condition condition,
return;
}
- ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
- if (FLAG_deopt_every_n_times == 1 &&
- !info()->IsStub() &&
- info()->opt_count() == id) {
- ASSERT(frame_is_built_);
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Register scratch = scratch0();
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+ Label no_deopt;
+ __ Push(a1, scratch);
+ __ li(scratch, Operand(count));
+ __ lw(a1, MemOperand(scratch));
+ __ Subu(a1, a1, Operand(1));
+ __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
+ __ li(a1, Operand(FLAG_deopt_every_n_times));
+ __ sw(a1, MemOperand(scratch));
+ __ Pop(a1, scratch);
+
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
- return;
+ __ bind(&no_deopt);
+ __ sw(a1, MemOperand(scratch));
+ __ Pop(a1, scratch);
}
if (info()->ShouldTrapOnDeopt()) {
@@ -803,7 +814,10 @@ void LCodeGen::DeoptimizeIf(Condition condition,
}
ASSERT(info()->IsStub() || frame_is_built_);
- if (condition == al && frame_is_built_) {
+ // Go through jump table if we need to handle condition, build frame, or
+ // restore caller doubles.
+ if (condition == al && frame_is_built_ &&
+ !info()->saves_caller_doubles()) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
} else {
// We often have several deopts to the same entry, reuse the last
@@ -835,26 +849,31 @@ void LCodeGen::DeoptimizeIf(Condition condition,
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
@@ -950,10 +969,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp, zone());
- }
}
@@ -964,7 +979,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -986,17 +1001,10 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles(
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::RecordAndUpdatePosition(int position) {
- if (position >= 0 && position != old_position_) {
- masm()->positions_recorder()->RecordPosition(position);
- old_position_ = position;
- }
+ masm()->positions_recorder()->WriteRecordedPositions();
}
@@ -1046,6 +1054,7 @@ void LCodeGen::DoParameter(LParameter* instr) {
void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
@@ -1063,11 +1072,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1118,35 +1122,6 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&left_is_not_negative);
__ And(result_reg, left_reg, divisor - 1);
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- const Register left_reg = ToRegister(instr->left());
- const Register result_reg = ToRegister(instr->result());
- const Register right_reg = ToRegister(instr->right());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- DeoptimizeIf(ne, instr->environment(), right_reg, Operand(divisor));
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
- &left_is_not_negative, ge, left_reg, Operand(zero_reg));
- __ subu(result_reg, zero_reg, left_reg);
- __ And(result_reg, result_reg, divisor - 1);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
- }
- __ Branch(USE_DELAY_SLOT, &done);
- __ subu(result_reg, zero_reg, result_reg);
- }
-
- __ bind(&left_is_not_negative);
- __ And(result_reg, left_reg, divisor - 1);
- __ bind(&done);
-
} else {
const Register scratch = scratch0();
const Register left_reg = ToRegister(instr->left());
@@ -1408,11 +1383,11 @@ void LCodeGen::DoMulI(LMulI* instr) {
Register left = ToRegister(instr->left());
LOperand* right_op = instr->right();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (right_op->IsConstantOperand() && !can_overflow) {
+ if (right_op->IsConstantOperand()) {
int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
if (bailout_on_minus_zero && (constant < 0)) {
@@ -1423,7 +1398,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
switch (constant) {
case -1:
- __ Subu(result, zero_reg, left);
+ if (overflow) {
+ __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
+ DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ Subu(result, zero_reg, left);
+ }
break;
case 0:
if (bailout_on_minus_zero) {
@@ -1444,27 +1424,23 @@ void LCodeGen::DoMulI(LMulI* instr) {
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
- if (IsPowerOf2(constant_abs) ||
- IsPowerOf2(constant_abs - 1) ||
- IsPowerOf2(constant_abs + 1)) {
- if (IsPowerOf2(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ sll(result, left, shift);
- } else if (IsPowerOf2(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ sll(scratch, left, shift);
- __ Addu(result, scratch, left);
- } else if (IsPowerOf2(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ sll(scratch, left, shift);
- __ Subu(result, scratch, left);
- }
-
- // Correct the sign of the result is the constant is negative.
- if (constant < 0) {
- __ Subu(result, zero_reg, result);
- }
-
+ if (IsPowerOf2(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ sll(result, left, shift);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Subu(result, zero_reg, result);
+ } else if (IsPowerOf2(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ sll(scratch, left, shift);
+ __ Addu(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Subu(result, zero_reg, result);
+ } else if (IsPowerOf2(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ sll(scratch, left, shift);
+ __ Subu(result, scratch, left);
+ // Correct the sign of the result if the constant is negative.
+ if (constant < 0) __ Subu(result, zero_reg, result);
} else {
// Generate standard code.
__ li(at, constant);
@@ -1473,12 +1449,10 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else {
- Register right = EmitLoadRegister(right_op, scratch);
- if (bailout_on_minus_zero) {
- __ Or(ToRegister(instr->temp()), left, right);
- }
+ ASSERT(right_op->IsRegister());
+ Register right = ToRegister(right_op);
- if (can_overflow) {
+ if (overflow) {
// hi:lo = left * right.
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1502,12 +1476,13 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (bailout_on_minus_zero) {
- // Bail out if the result is supposed to be negative zero.
Label done;
- __ Branch(&done, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt,
+ __ Xor(at, left, right);
+ __ Branch(&done, ge, at, Operand(zero_reg));
+ // Bail out if the result is minus zero.
+ DeoptimizeIf(eq,
instr->environment(),
- ToRegister(instr->temp()),
+ result,
Operand(zero_reg));
__ bind(&done);
}
@@ -1703,7 +1678,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
- __ LoadObject(ToRegister(instr->result()), value);
+ __ li(ToRegister(instr->result()), value);
}
@@ -1760,7 +1735,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
ASSERT(!scratch.is(scratch0()));
ASSERT(!scratch.is(object));
- __ And(at, object, Operand(kSmiTagMask));
+ __ SmiTst(object, at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
__ GetObjectType(object, scratch, scratch);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
@@ -1787,42 +1762,87 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register value = ToRegister(instr->value());
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+ }
Register scratch = scratch0();
- String::Encoding encoding = instr->encoding();
+ ASSERT(!scratch.is(string));
+ ASSERT(!scratch.is(ToRegister(index)));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Addu(scratch, string, ToRegister(index));
+ } else {
+ STATIC_ASSERT(kUC16Size == 2);
+ __ sll(scratch, ToRegister(index), 1);
+ __ Addu(scratch, string, scratch);
+ }
+ return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
if (FLAG_debug_code) {
- __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+ Register scratch = scratch0();
+ __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
+ __ And(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
+ __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
}
- __ Addu(scratch,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ lbu(result, operand);
+ } else {
+ __ lhu(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+ Register value = ToRegister(instr->value());
+
+ if (FLAG_debug_code) {
+ Register scratch = scratch0();
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
+ }
+
+ MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (encoding == String::ONE_BYTE_ENCODING) {
- __ Addu(at, scratch, index);
- __ sb(value, MemOperand(at));
+ __ sb(value, operand);
} else {
- __ sll(at, index, 1);
- __ Addu(at, scratch, at);
- __ sh(value, MemOperand(at));
+ __ sh(value, operand);
}
}
void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->value(), at);
- __ push(input_reg);
+ __ push(ToRegister(instr->value()));
+ ASSERT(ToRegister(instr->context()).is(cp));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
@@ -1974,11 +1994,12 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->left()).is(a1));
ASSERT(ToRegister(instr->right()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
@@ -1986,13 +2007,6 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
}
-int LCodeGen::GetNextEmittedBlock() const {
- for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
- if (!chunk_->GetLabel(i)->HasReplacement()) return i;
- }
- return -1;
-}
-
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr,
Condition condition,
@@ -2042,6 +2056,16 @@ void LCodeGen::EmitBranchF(InstrType instr,
template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr,
+ Condition condition,
+ Register src1,
+ const Operand& src2) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
+}
+
+
+template<class InstrType>
void LCodeGen::EmitFalseBranchF(InstrType instr,
Condition condition,
FPURegister src1,
@@ -2057,25 +2081,6 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, al, zero_reg, Operand(zero_reg));
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, al, zero_reg, Operand(zero_reg));
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ lw(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- EmitBranch(instr, eq, scratch0(), Operand(at));
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
@@ -2140,7 +2145,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
- __ And(at, reg, Operand(kSmiTagMask));
+ __ SmiTst(reg, at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
@@ -2223,6 +2228,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = eq;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = ne;
+ break;
case Token::LT:
cond = is_unsigned ? lo : lt;
break;
@@ -2329,6 +2338,32 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+ Register scratch = ToRegister(instr->temp());
+
+ if (rep.IsDouble()) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
+ __ FmoveHigh(scratch, value);
+ __ li(at, 0x80000000);
+ } else {
+ Register value = ToRegister(instr->value());
+ __ CheckMap(value,
+ scratch,
+ Heap::kHeapNumberMapRootIndex,
+ instr->FalseLabel(chunk()),
+ DO_SMI_CHECK);
+ __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
+ __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+ __ mov(at, zero_reg);
+ }
+ EmitBranch(instr, eq, scratch, Operand(at));
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Register temp2,
@@ -2439,6 +2474,7 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -2598,6 +2634,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Label true_label, done;
ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
@@ -2708,13 +2745,14 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub stub(flags);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
// Get the temp register reserved by the instruction. This needs to be t0 as
// its slot of the pushing of safepoint registers is used to communicate the
// offset to the location of the map check.
Register temp = ToRegister(instr->temp());
ASSERT(temp.is(t0));
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
+ __ li(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 7;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
@@ -2736,15 +2774,8 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -2768,21 +2799,15 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in v0.
+ // Runtime::TraceExit returns its parameter in v0. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
__ push(v0);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
- ASSERT(NeedsEagerFrame());
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ RestoreCallerDoubles();
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
@@ -2814,7 +2839,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
+ __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ lw(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -2824,6 +2849,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->global_object()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -2840,7 +2866,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register cell = scratch0();
// Load the cell.
- __ li(cell, Operand(instr->hydrogen()->cell()));
+ __ li(cell, Operand(instr->hydrogen()->cell().handle()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -2861,6 +2887,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->global_object()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
@@ -2937,7 +2964,8 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (access.IsExternalMemory()) {
Register result = ToRegister(instr->result());
- __ lw(result, MemOperand(object, offset));
+ MemOperand operand = MemOperand(object, offset);
+ __ Load(result, operand, access.representation());
return;
}
@@ -2948,16 +2976,17 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (access.IsInobject()) {
- __ lw(result, FieldMemOperand(object, offset));
- } else {
+ if (!access.IsInobject()) {
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ lw(result, FieldMemOperand(result, offset));
+ object = result;
}
+ MemOperand operand = FieldMemOperand(object, offset);
+ __ Load(result, operand, access.representation());
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -3011,6 +3040,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
@@ -3023,22 +3058,44 @@ void LCodeGen::DoLoadExternalArrayPointer(
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
- if (instr->length()->IsConstantOperand() &&
- instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ if (instr->length()->IsConstantOperand()) {
int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- int index = (const_length - const_index) + 1;
- __ lw(result, MemOperand(arguments, index * kPointerSize));
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int index = (const_length - const_index) + 1;
+ __ lw(result, MemOperand(arguments, index * kPointerSize));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ li(at, Operand(const_length + 1));
+ __ Subu(result, at, index);
+ __ sll(at, result, kPointerSizeLog2);
+ __ Addu(at, arguments, at);
+ __ lw(result, MemOperand(at));
+ }
+ } else if (instr->index()->IsConstantOperand()) {
+ Register length = ToRegister(instr->length());
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int loc = const_index - 1;
+ if (loc != 0) {
+ __ Subu(result, length, Operand(loc));
+ __ sll(at, result, kPointerSizeLog2);
+ __ Addu(at, arguments, at);
+ __ lw(result, MemOperand(at));
+ } else {
+ __ sll(at, length, kPointerSizeLog2);
+ __ Addu(at, arguments, at);
+ __ lw(result, MemOperand(at));
+ }
} else {
Register length = ToRegister(instr->length());
Register index = ToRegister(instr->index());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them, add one more.
- __ subu(length, length, index);
- __ Addu(length, length, Operand(1));
- __ sll(length, length, kPointerSizeLog2);
- __ Addu(at, arguments, Operand(length));
- __ lw(result, MemOperand(at, 0));
+ __ Subu(result, length, index);
+ __ Addu(result, result, 1);
+ __ sll(at, result, kPointerSizeLog2);
+ __ Addu(at, arguments, at);
+ __ lw(result, MemOperand(at));
}
}
@@ -3132,28 +3189,31 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
Register scratch = scratch0();
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int constant_key = 0;
+
+ int base_offset =
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ (instr->additional_index() << element_size_shift);
if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- } else {
- key = ToRegister(instr->key());
+ base_offset += constant_key << element_size_shift;
}
+ __ Addu(scratch, elements, Operand(base_offset));
- int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- ((constant_key + instr->additional_index()) << element_size_shift);
if (!key_is_constant) {
- __ sll(scratch, key, shift_size);
- __ Addu(elements, elements, scratch);
+ key = ToRegister(instr->key());
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ sll(at, key, shift_size);
+ __ Addu(scratch, scratch, at);
}
- __ Addu(elements, elements, Operand(base_offset));
- __ ldc1(result, MemOperand(elements));
+
+ __ ldc1(result, MemOperand(scratch));
+
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
}
}
@@ -3172,7 +3232,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
instr->additional_index());
store_base = elements;
} else {
- Register key = EmitLoadRegister(instr->key(), scratch0());
+ Register key = ToRegister(instr->key());
// Even though the HLoadKeyed instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
@@ -3191,7 +3251,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ And(scratch, result, Operand(kSmiTagMask));
+ __ SmiTst(result, scratch);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
@@ -3257,6 +3317,7 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(a1));
ASSERT(ToRegister(instr->key()).is(a0));
@@ -3311,12 +3372,13 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
Register scratch = scratch0();
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
+ Label global_object, result_in_receiver;
// Do not transform the receiver to object for strict mode
// functions.
@@ -3330,7 +3392,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
__ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
- __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
+ __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
// Normal function. Replace undefined or null with global receiver.
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
@@ -3339,19 +3401,29 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ Branch(&global_object, eq, receiver, Operand(scratch));
// Deoptimize if the receiver is not a JS object.
- __ And(scratch, receiver, Operand(kSmiTagMask));
+ __ SmiTst(receiver, scratch);
DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
DeoptimizeIf(lt, instr->environment(),
scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(&receiver_ok);
+ __ Branch(&result_in_receiver);
__ bind(&global_object);
- __ lw(receiver, GlobalObjectOperand());
- __ lw(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
- __ bind(&receiver_ok);
+
+ __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ lw(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(result,
+ FieldMemOperand(result, JSGlobalObject::kGlobalReceiverOffset));
+ if (result.is(receiver)) {
+ __ bind(&result_in_receiver);
+ } else {
+ Label result_ok;
+ __ Branch(&result_ok);
+ __ bind(&result_in_receiver);
+ __ mov(result, receiver);
+ __ bind(&result_ok);
+ }
}
@@ -3394,7 +3466,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is a0, as expected
@@ -3402,7 +3473,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
ParameterCount actual(receiver);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3431,11 +3501,11 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
// If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
- for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- __ mov(result, cp);
- return;
- }
+ if (info()->IsOptimizing()) {
+ __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in cp.
+ ASSERT(result.is(cp));
}
}
@@ -3449,7 +3519,8 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
+ ASSERT(ToRegister(instr->context()).is(cp));
+ __ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
// The context is the first argument.
__ Push(cp, scratch0(), scratch1());
@@ -3458,8 +3529,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ lw(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
}
@@ -3482,11 +3554,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
dont_adapt_arguments || formal_parameter_count == arity;
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
if (can_invoke_directly) {
if (a1_state == A1_UNINITIALIZED) {
- __ LoadHeapObject(a1, function);
+ __ li(a1, function);
}
// Change context.
@@ -3512,9 +3583,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ InvokeFunction(
function, expected, count, CALL_FUNCTION, generator, call_kind);
}
-
- // Restore context.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3531,6 +3599,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+ ASSERT(instr->context() != NULL);
+ ASSERT(ToRegister(instr->context()).is(cp));
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3572,7 +3642,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(v0))
__ mov(tmp1, v0);
@@ -3812,68 +3883,6 @@ void LCodeGen::DoPower(LPower* instr) {
}
-void LCodeGen::DoRandom(LRandom* instr) {
- // Assert that the register size is indeed the size of each seed.
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- // Load native context.
- Register global_object = ToRegister(instr->global_object());
- Register native_context = global_object;
- __ lw(native_context, FieldMemOperand(
- global_object, GlobalObject::kNativeContextOffset));
-
- // Load state (FixedArray of the native context's random seeds).
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- Register state = native_context;
- __ lw(state, FieldMemOperand(native_context, kRandomSeedOffset));
-
- // Load state[0].
- Register state0 = ToRegister(instr->scratch());
- __ lw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
- // Load state[1].
- Register state1 = ToRegister(instr->scratch2());
- __ lw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- Register scratch3 = ToRegister(instr->scratch3());
- Register scratch4 = scratch0();
- __ And(scratch3, state0, Operand(0xFFFF));
- __ li(scratch4, Operand(18273));
- __ Mul(scratch3, scratch3, scratch4);
- __ srl(state0, state0, 16);
- __ Addu(state0, scratch3, state0);
- // Save state[0].
- __ sw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ And(scratch3, state1, Operand(0xFFFF));
- __ li(scratch4, Operand(36969));
- __ Mul(scratch3, scratch3, scratch4);
- __ srl(state1, state1, 16),
- __ Addu(state1, scratch3, state1);
- // Save state[1].
- __ sw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- Register random = scratch4;
- __ And(random, state1, Operand(0x3FFFF));
- __ sll(state0, state0, 14);
- __ Addu(random, random, state0);
-
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- __ li(scratch3, Operand(0x41300000));
- // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
- DoubleRegister result = ToDoubleRegister(instr->result());
- __ Move(result, random, scratch3);
- // Move 0x4130000000000000 to FPU.
- DoubleRegister scratch5 = double_scratch0();
- __ Move(scratch5, zero_reg, scratch3);
- __ sub_d(result, result, scratch5);
-}
-
-
void LCodeGen::DoMathExp(LMathExp* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
@@ -3890,6 +3899,9 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3898,6 +3910,9 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3906,6 +3921,9 @@ void LCodeGen::DoMathTan(LMathTan* instr) {
void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3914,6 +3932,9 @@ void LCodeGen::DoMathCos(LMathCos* instr) {
void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ mov(cp, zero_reg);
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3921,17 +3942,16 @@ void LCodeGen::DoMathSin(LMathSin* instr) {
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(instr->HasPointerMap());
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3944,17 +3964,18 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallNamed(LCallNamed* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
@@ -3963,23 +3984,27 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ li(a2, Operand(instr->name()));
CallCode(ic, mode, instr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ mov(sp, fp);
+ __ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ } else {
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ }
}
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
@@ -3988,7 +4013,6 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ li(a2, Operand(instr->name()));
CallCode(ic, mode, instr);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -4004,6 +4028,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->constructor()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -4017,6 +4042,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->constructor()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -4077,7 +4103,13 @@ void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
- __ Addu(result, base, Operand(instr->offset()));
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ Addu(result, base, Operand(ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ Addu(result, base, offset);
+ }
}
@@ -4091,7 +4123,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (access.IsExternalMemory()) {
Register value = ToRegister(instr->value());
- __ sw(value, MemOperand(object, offset));
+ MemOperand operand = MemOperand(object, offset);
+ __ Store(value, operand, representation);
return;
}
@@ -4100,7 +4133,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- __ And(scratch, value, Operand(kSmiTagMask));
+ __ SmiTst(value, scratch);
DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
}
} else if (FLAG_track_double_fields && representation.IsDouble()) {
@@ -4136,7 +4169,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
instr->hydrogen()->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
- __ sw(value, FieldMemOperand(object, offset));
+ MemOperand operand = FieldMemOperand(object, offset);
+ __ Store(value, operand, representation);
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
@@ -4150,7 +4184,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
} else {
__ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ sw(value, FieldMemOperand(scratch, offset));
+ MemOperand operand = FieldMemOperand(scratch, offset);
+ __ Store(value, operand, representation);
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
@@ -4168,6 +4203,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
@@ -4241,20 +4277,25 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ Register address = scratch0();
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key <<
- element_size_shift);
+ if (constant_key != 0) {
+ __ Addu(address, external_pointer,
+ Operand(constant_key << element_size_shift));
+ } else {
+ address = external_pointer;
+ }
} else {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), scratch0(), external_pointer);
+ __ sll(address, key, shift_size);
+ __ Addu(address, external_pointer, address);
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
+ __ swc1(double_scratch0(), MemOperand(address, additional_offset));
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ sdc1(value, MemOperand(scratch0(), additional_offset));
+ __ sdc1(value, MemOperand(address, additional_offset));
}
} else {
Register value(ToRegister(instr->value()));
@@ -4296,33 +4337,29 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
DoubleRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
- Register key = no_reg;
Register scratch = scratch0();
+ DoubleRegister double_scratch = double_scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- Label not_nan;
+ Label not_nan, done;
// Calculate the effective address of the slot in the array to store the
// double value.
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
+ __ Addu(scratch, elements,
+ Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- if (key_is_constant) {
- __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(scratch, elements, Operand(scratch));
- __ Addu(scratch, scratch,
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ Addu(scratch, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ sll(at, ToRegister(instr->key()), shift_size);
+ __ Addu(scratch, scratch, at);
}
if (instr->NeedsCanonicalization()) {
@@ -4333,12 +4370,17 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
// Only load canonical NaN if the comparison above set the overflow.
__ bind(&is_nan);
- __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ Move(double_scratch,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
+ element_size_shift));
+ __ Branch(&done);
}
__ bind(&not_nan);
__ sdc1(value, MemOperand(scratch, instr->additional_index() <<
element_size_shift));
+ __ bind(&done);
}
@@ -4404,6 +4446,7 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->object()).is(a2));
ASSERT(ToRegister(instr->key()).is(a1));
ASSERT(ToRegister(instr->value()).is(a0));
@@ -4436,6 +4479,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, GetRAState(), kDontSaveFPRegs);
} else {
+ ASSERT(ToRegister(instr->context()).is(cp));
PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles);
__ mov(a0, object_reg);
@@ -4452,18 +4496,28 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- Label fail;
- __ TestJSArrayForAllocationMemento(object, temp, ne, &fail);
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
+ ne, &no_memento_found);
DeoptimizeIf(al, instr->environment());
- __ bind(&fail);
+ __ bind(&no_memento_found);
}
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- StringAddStub stub(instr->hydrogen()->flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ ASSERT(ToRegister(instr->context()).is(cp));
+ if (FLAG_new_string_add) {
+ ASSERT(ToRegister(instr->left()).is(a1));
+ ASSERT(ToRegister(instr->right()).is(a0));
+ NewStringAddStub stub(instr->hydrogen()->flags(),
+ isolate()->heap()->GetPretenureMode());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ push(ToRegister(instr->left()));
+ __ push(ToRegister(instr->right()));
+ StringAddStub stub(instr->hydrogen()->flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4514,7 +4568,8 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ instr->context());
__ AssertSmi(v0);
__ SmiUntag(v0);
__ StoreToSafepointRegisterSlot(v0, result);
@@ -4567,7 +4622,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -4594,10 +4649,13 @@ void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
LOperand* output = instr->result();
Register scratch = scratch0();
- __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
+ ASSERT(output->IsRegister());
if (!instr->hydrogen()->value()->HasRange() ||
!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ SmiTag(ToRegister(output), ToRegister(input));
}
}
@@ -4666,13 +4724,12 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
LNumberTagU* instr_;
};
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ Branch(deferred->entry(), hi, reg, Operand(Smi::kMaxValue));
- __ SmiTag(reg, reg);
+ __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
+ __ SmiTag(result, input);
__ bind(deferred->exit());
}
@@ -4718,7 +4775,15 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// register is stored, as this register is in the pointer map, but contains an
// integer value.
__ StoreToSafepointRegisterSlot(zero_reg, dst);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ Move(dst, v0);
__ Subu(dst, dst, kHeapObjectTag);
@@ -4774,7 +4839,15 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ mov(reg, zero_reg);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ Subu(v0, v0, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(v0, reg);
}
@@ -4809,34 +4882,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
LEnvironment* env,
NumberUntagDMode mode) {
Register scratch = scratch0();
-
- Label load_smi, heap_number, done;
-
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
// Heap number map check.
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(ne, env, scratch, Operand(at));
+ if (can_convert_undefined_to_nan) {
+ __ Branch(&convert, ne, scratch, Operand(at));
} else {
- Label heap_number, convert;
- __ Branch(&heap_number, eq, scratch, Operand(at));
-
- // Convert undefined (and hole) to NaN.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, env, input_reg, Operand(at));
-
- __ bind(&convert);
- __ LoadRoot(at, Heap::kNanValueRootIndex);
- __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
- __ Branch(&done);
-
- __ bind(&heap_number);
+ DeoptimizeIf(ne, env, scratch, Operand(at));
}
- // Heap number to double register conversion.
+ // Load heap number.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ mfc1(at, result_reg.low());
@@ -4845,11 +4903,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, env, input_reg, Operand(at));
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ Branch(&done);
+ }
} else {
__ SmiUntag(scratch, input_reg);
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
-
// Smi to double register conversion
__ bind(&load_smi);
// scratch: untagged value of input_reg
@@ -4881,19 +4947,32 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
if (instr->truncating()) {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
- Label heap_number;
- __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map?
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
+ Label no_heap_number, check_bools, check_false;
+ __ Branch(&no_heap_number, ne, scratch1, Operand(at)); // HeapNumber map?
+ __ mov(scratch2, input_reg);
+ __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ Branch(&done);
+
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
+ __ bind(&no_heap_number);
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
+ __ Branch(&check_bools, ne, input_reg, Operand(at));
ASSERT(ToRegister(instr->result()).is(input_reg));
- __ mov(input_reg, zero_reg);
- __ Branch(&done);
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(input_reg, zero_reg); // In delay slot.
- __ bind(&heap_number);
- __ mov(scratch2, input_reg);
- __ TruncateHeapNumberToI(input_reg, scratch2);
+ __ bind(&check_bools);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&check_false, ne, scratch2, Operand(at));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ li(input_reg, Operand(1)); // In delay slot.
+
+ __ bind(&check_false);
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ mov(input_reg, zero_reg); // In delay slot.
} else {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
@@ -4945,14 +5024,18 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- // Let the deferred code handle the HeapObject case.
- __ JumpIfNotSmi(input_reg, deferred->entry());
+ // Let the deferred code handle the HeapObject case.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
- // Smi to int32 conversion.
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
+ // Smi to int32 conversion.
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -5047,7 +5130,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
- __ And(at, ToRegister(input), Operand(kSmiTagMask));
+ __ SmiTst(ToRegister(input), at);
DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
}
@@ -5055,7 +5138,7 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->IsHeapObject()) {
LOperand* input = instr->value();
- __ And(at, ToRegister(input), Operand(kSmiTagMask));
+ __ SmiTst(ToRegister(input), at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
}
@@ -5102,7 +5185,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object();
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
@@ -5122,10 +5205,13 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(object);
- CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
+ __ mov(cp, zero_reg);
+ __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
- __ And(at, scratch0(), Operand(kSmiTagMask));
+ __ SmiTst(scratch0(), at);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
@@ -5153,7 +5239,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
@@ -5162,12 +5247,13 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
__ CompareMapAndBranch(map_reg, map, &success, eq, &success);
}
- Handle<Map> map = map_set->last();
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
// Do the CompareMap() directly within the Branch() and DeoptimizeIf().
if (instr->hydrogen()->has_migration_target()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
@@ -5264,7 +5350,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
+ }
} else {
Register size = ToRegister(instr->size());
__ Allocate(size,
@@ -5317,16 +5407,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(size));
}
+ int flags = AllocateDoubleAlignFlag::encode(
+ instr->hydrogen()->MustAllocateDoubleAligned());
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
} else {
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
+ __ Push(Smi::FromInt(flags));
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -5340,6 +5436,7 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
Label materialized;
// Registers will be used as follows:
// t3 = literals array.
@@ -5348,7 +5445,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// a2 and t0-t2 are used as temporaries.
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(t3, instr->hydrogen()->literals());
+ __ li(t3, instr->hydrogen()->literals());
__ lw(a1, FieldMemOperand(t3, literal_offset));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&materialized, ne, a1, Operand(at));
@@ -5392,6 +5489,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(cp));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
@@ -5574,14 +5672,13 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) {
__ nop();
@@ -5592,7 +5689,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5616,6 +5713,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
}
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
void LCodeGen::DoDummyUse(LDummyUse* instr) {
// Nothing to see here, move on!
}
@@ -5623,6 +5725,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ LoadContextFromDeferred(instr->context());
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -5654,10 +5757,12 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
Label done;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&done, hs, sp, Operand(at));
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(cp));
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5669,7 +5774,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
new(zone()) DeferredStackCheck(this, instr);
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
diff --git a/chromium/v8/src/mips/lithium-codegen-mips.h b/chromium/v8/src/mips/lithium-codegen-mips.h
index 84105cae35f..9fbd336b1e6 100644
--- a/chromium/v8/src/mips/lithium-codegen-mips.h
+++ b/chromium/v8/src/mips/lithium-codegen-mips.h
@@ -31,6 +31,7 @@
#include "deoptimizer.h"
#include "mips/lithium-gap-resolver-mips.h"
#include "mips/lithium-mips.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
@@ -42,43 +43,26 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen V8_FINAL BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- old_position_(RelocInfo::kNoPosition) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -177,31 +161,16 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk()->graph(); }
Register scratch0() { return kLithiumScratchReg; }
Register scratch1() { return kLithiumScratchReg2; }
DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
- int GetNextEmittedBlock() const;
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
@@ -214,14 +183,15 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
- void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
@@ -245,7 +215,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
@@ -254,9 +225,11 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
CallRuntime(function, num_arguments, instr);
}
+ void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ LOperand* context);
enum A1State {
A1_UNINITIALIZED,
@@ -272,8 +245,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
CallKind call_kind,
A1State a1_state);
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -309,6 +280,10 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
+ MemOperand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
@@ -324,11 +299,13 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
- void RecordAndUpdatePosition(int position);
+
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr,
Condition condition,
@@ -340,6 +317,11 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
FPURegister src1,
FPURegister src2);
template<class InstrType>
+ void EmitFalseBranch(InstrType instr,
+ Condition condition,
+ Register src1,
+ const Operand& src2);
+ template<class InstrType>
void EmitFalseBranchF(InstrType instr,
Condition condition,
FPURegister src1,
@@ -404,7 +386,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
LEnvironment* environment);
- void EnsureSpaceForLazyDeopt();
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -412,24 +394,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@@ -441,8 +413,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- int old_position_;
-
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
diff --git a/chromium/v8/src/mips/lithium-gap-resolver-mips.cc b/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
index 460e13bf0a9..3ee74866c75 100644
--- a/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -256,7 +256,7 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsInteger32(constant_source)) {
__ li(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ __ li(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
@@ -271,8 +271,7 @@ void LGapResolver::EmitMove(int index) {
__ li(kLithiumScratchReg,
Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
- __ LoadObject(kLithiumScratchReg,
- cgen_->ToHandle(constant_source));
+ __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
}
__ sw(kLithiumScratchReg, cgen_->ToMemOperand(destination));
}
diff --git a/chromium/v8/src/mips/lithium-mips.cc b/chromium/v8/src/mips/lithium-mips.cc
index 06bb33abc01..a441ba515a9 100644
--- a/chromium/v8/src/mips/lithium-mips.cc
+++ b/chromium/v8/src/mips/lithium-mips.cc
@@ -277,7 +277,8 @@ void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
- stream->Add(" + %d", offset());
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
}
@@ -417,18 +418,19 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
+ if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
return spill_slot_count_++;
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -444,7 +446,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// which will be subsumed into this frame.
if (graph()->has_osr()) {
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(false);
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
}
}
@@ -660,7 +662,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -715,51 +717,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
- }
-
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseRegisterAtStart(right_value);
}
- } else {
- right = UseRegisterAtStart(right_value);
- }
- // Shift operations can deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
}
- }
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -768,29 +763,34 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), f2);
+ LOperand* right = UseFixedDouble(instr->right(), f4);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ return MarkAsCall(DefineFixedDouble(result, f2), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left_operand = UseFixed(left, a1);
LOperand* right_operand = UseFixed(right, a0);
LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -866,9 +866,33 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
@@ -898,14 +922,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
- instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -997,19 +1019,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ HValue* value = instr->value();
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
// deoptimization environment. If the instruction is generic no
@@ -1047,8 +1065,9 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LInstanceOf* result =
- new(zone()) LInstanceOf(UseFixed(instr->left(), a0),
+ new(zone()) LInstanceOf(context, UseFixed(instr->left(), a0),
UseFixed(instr->right(), a1));
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1057,23 +1076,19 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), a0),
- FixedTemp(t0));
+ new(zone()) LInstanceOfKnownGlobal(
+ UseFixed(instr->context(), cp),
+ UseFixed(instr->left(), a0),
+ FixedTemp(t0));
return MarkAsCall(DefineFixed(result, v0), instr);
}
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineSameAsFirst(result));
+ return AssignEnvironment(DefineAsRegister(result));
}
@@ -1091,7 +1106,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = Use(instr->argument());
return new(zone()) LPushArgument(argument);
}
@@ -1106,11 +1120,11 @@ LInstruction* LChunkBuilder::DoStoreCodeEntry(
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* inner_object) {
- LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
- LInnerAllocatedObject* result =
- new(zone()) LInnerAllocatedObject(base_object);
- return DefineAsRegister(result);
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
}
@@ -1122,14 +1136,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- // If there is a non-return use, the context must be allocated in a register.
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- return DefineAsRegister(new(zone()) LContext);
- }
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, cp);
}
- return NULL;
+ return DefineAsRegister(new(zone()) LContext);
}
@@ -1140,7 +1153,8 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
@@ -1158,15 +1172,14 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, v0), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1221,7 +1234,7 @@ LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
+ LOperand* input = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll.
@@ -1240,8 +1253,12 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ Representation r = instr->value()->representation();
+ LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+ ? NULL
+ : UseFixed(instr->context(), cp);
LOperand* input = UseRegister(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(input);
+ LMathAbs* result = new(zone()) LMathAbs(context, input);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
@@ -1271,57 +1288,59 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
- argument_count_ -= instr->argument_count();
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* key = UseFixed(instr->key(), a2);
- return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), v0), instr);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LCallKeyed(context, key), v0), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), v0), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), v0), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, v0), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), a1);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), a1);
- argument_count_ -= instr->argument_count();
- LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), a1);
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), v0),
- instr);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ LInstruction* result = DefineFixed(call, v0);
+ if (instr->IsTailCall()) return result;
+ return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
}
@@ -1349,33 +1368,27 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LDivI* div = new(zone()) LDivI(dividend, divisor);
return AssignEnvironment(DefineAsRegister(div));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
return DoArithmeticT(Token::DIV, instr);
}
@@ -1442,16 +1455,12 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->HasPowerOf2Divisor()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseOrConstant(right));
+ UseConstant(right));
LInstruction* result = DefineAsRegister(mod);
return (left->CanBeNegative() &&
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- return AssignEnvironment(DefineAsRegister(mod));
} else {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegister(right),
@@ -1466,17 +1475,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, f2),
- UseFixedDouble(right, f4));
- return MarkAsCall(DefineFixedDouble(mod, f2), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1485,20 +1487,39 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left;
- LOperand* right = UseOrConstant(instr->BetterRightOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (instr->CheckFlag(HValue::kCanOverflow) ||
- !right->IsConstantOperand())) {
- left = UseRegister(instr->BetterLeftOperand());
- temp = TempRegister();
+ HValue* left = instr->BetterLeftOperand();
+ HValue* right = instr->BetterRightOperand();
+ LOperand* left_op;
+ LOperand* right_op;
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (right->IsConstant()) {
+ HConstant* constant = HConstant::cast(right);
+ int32_t constant_value = constant->Integer32Value();
+ // Constants -1, 0 and 1 can be optimized if the result can overflow.
+ // For other constants, it can be optimized only without overflow.
+ if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+ left_op = UseRegisterAtStart(left);
+ right_op = UseConstant(right);
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
} else {
- left = UseRegisterAtStart(instr->BetterLeftOperand());
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
}
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ LMulI* mul = new(zone()) LMulI(left_op, right_op);
+ if (can_overflow || bailout_on_minus_zero) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
@@ -1567,6 +1588,15 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
result = AssignEnvironment(result);
}
return result;
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = DefineAsRegister(add);
+ return result;
} else if (instr->representation().IsDouble()) {
if (kArchVariant == kMips32r2) {
if (instr->left()->IsMul())
@@ -1579,7 +1609,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1621,25 +1650,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseTempRegister(instr->global_object());
- LOperand* scratch = TempRegister();
- LOperand* scratch2 = TempRegister();
- LOperand* scratch3 = TempRegister();
- LRandom* result = new(zone()) LRandom(
- global_object, scratch, scratch2, scratch3);
- return DefineFixedDouble(result, f0);
-}
-
-
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
LOperand* right = UseFixed(instr->right(), a0);
- LCmpT* result = new(zone()) LCmpT(left, right);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1666,6 +1683,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1674,8 +1693,18 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ LOperand* scratch = TempRegister();
+ return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
}
@@ -1713,10 +1742,11 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
HStringCompareAndBranch* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
LOperand* right = UseFixed(instr->right(), a0);
LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
+ new(zone()) LStringCompareAndBranch(context, left, right);
return MarkAsCall(result, instr);
}
@@ -1781,13 +1811,21 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- LOperand* value = UseTempRegister(instr->value());
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineAsRegister(result);
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+ return new(zone()) LSeqStringSetChar(context, string, index, value);
}
@@ -1805,9 +1843,17 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), a0);
- return MarkAsCall(new(zone()) LThrow(value), instr);
+ return MarkAsCall(new(zone()) LThrow(context, value), instr);
}
@@ -1836,7 +1882,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
if (from.IsTagged()) {
if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@@ -1896,7 +1941,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* value = UseRegisterAtStart(val);
if (val->CheckFlag(HInstruction::kUint32)) {
LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineAsRegister(new(zone()) LSmiTag(value));
} else {
@@ -1907,8 +1952,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
- : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
+ : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
@@ -1941,12 +1986,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone())
- LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
@@ -1995,8 +2034,11 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), cp)
+ : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), v0),
+ return new(zone()) LReturn(UseFixed(instr->value(), v0), context,
parameter_count);
}
@@ -2029,8 +2071,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), a0);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -2046,10 +2090,11 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), a1);
LOperand* value = UseFixed(instr->value(), a0);
LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(global_object, value);
+ new(zone()) LStoreGlobalGeneric(context, global_object, value);
return MarkAsCall(result, instr);
}
@@ -2084,8 +2129,10 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), a0);
- LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), v0);
+ LInstruction* result =
+ DefineFixed(new(zone()) LLoadNamedGeneric(context, object), v0);
return MarkAsCall(result, instr);
}
@@ -2097,6 +2144,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2113,7 +2165,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (!instr->is_external()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
- obj = UseTempRegister(instr->elements());
+ obj = UseRegister(instr->elements());
} else {
ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
@@ -2141,18 +2193,17 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), a1);
LOperand* key = UseFixed(instr->key(), a0);
LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), v0);
+ DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), v0);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
-
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2163,14 +2214,18 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
key = UseRegisterOrConstantAtStart(instr->key());
- val = UseTempRegister(instr->value());
+ val = UseRegister(instr->value());
} else {
ASSERT(instr->value()->representation().IsSmiOrTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = needs_write_barrier ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
}
return new(zone()) LStoreKeyed(object, key, val);
@@ -2178,17 +2233,13 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
+ (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
+ (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->elements()->representation().IsExternal());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
@@ -2197,6 +2248,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), a2);
LOperand* key = UseFixed(instr->key(), a1);
LOperand* val = UseFixed(instr->value(), a0);
@@ -2205,7 +2257,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
- return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
+ return MarkAsCall(
+ new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
}
@@ -2215,11 +2268,12 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg);
+ new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
return result;
} else {
+ LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL);
+ new(zone()) LTransitionElementsKind(object, context, NULL);
return AssignPointerMap(result);
}
}
@@ -2278,56 +2332,72 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), a1);
LOperand* val = UseFixed(instr->value(), a0);
- LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), v0),
- instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* left = FLAG_new_string_add
+ ? UseFixed(instr->left(), a1)
+ : UseRegisterAtStart(instr->left());
+ LOperand* right = FLAG_new_string_add
+ ? UseFixed(instr->right(), a0)
+ : UseRegisterAtStart(instr->right());
+ return MarkAsCall(
+ DefineFixed(new(zone()) LStringAdd(context, left, right), v0),
+ instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
+ LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LRegExpLiteral(context), v0), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(
+ DefineFixed(new(zone()) LFunctionLiteral(context), v0), instr);
}
@@ -2349,7 +2419,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
int index = static_cast<int>(instr->index());
- Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2374,8 +2444,8 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, v0), instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr);
}
@@ -2399,15 +2469,8 @@ LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
- LOperand* length;
- LOperand* index;
- if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
- length = UseRegisterOrConstant(instr->length());
- index = UseOrConstant(instr->index());
- } else {
- length = UseTempRegister(instr->length());
- index = UseRegisterAtStart(instr->index());
- }
+ LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2420,12 +2483,16 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), a0));
+ LOperand* context = UseFixed(instr->context(), cp);
+ LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), a0));
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
@@ -2459,10 +2526,13 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
+ LOperand* context = UseFixed(instr->context(), cp);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
}
}
@@ -2495,7 +2565,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
@@ -2507,8 +2577,9 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->enumerable(), a0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
diff --git a/chromium/v8/src/mips/lithium-mips.h b/chromium/v8/src/mips/lithium-mips.h
index 91dea44045c..dbb78ea0846 100644
--- a/chromium/v8/src/mips/lithium-mips.h
+++ b/chromium/v8/src/mips/lithium-mips.h
@@ -72,6 +72,7 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
+ V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -91,6 +92,7 @@ class LCodeGen;
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
+ V(Dummy) \
V(DummyUse) \
V(ElementsKind) \
V(ForInCacheArray) \
@@ -105,7 +107,6 @@ class LCodeGen;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
@@ -113,13 +114,13 @@ class LCodeGen;
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
- V(IsNumberAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -153,9 +154,9 @@ class LCodeGen;
V(Parameter) \
V(Power) \
V(PushArgument) \
- V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringGetChar) \
V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
@@ -214,7 +215,6 @@ class LInstruction : public ZoneObject {
: environment_(NULL),
hydrogen_value_(NULL),
bit_field_(IsCallBits::encode(false)) {
- set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() {}
@@ -255,15 +255,6 @@ class LInstruction : public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- // The 31 bits PositionBits is used to store the int position value. And the
- // position value may be RelocInfo::kNoPosition (-1). The accessor always
- // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
- // and can fit into the 31 bits PositionBits.
- void set_position(int pos) {
- bit_field_ = PositionBits::update(bit_field_, pos + 1);
- }
- int position() { return PositionBits::decode(bit_field_) - 1; }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -275,7 +266,7 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return IsCall(); }
@@ -303,7 +294,6 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
- class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -402,17 +392,17 @@ class LInstructionGap V8_FINAL : public LGap {
class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
virtual bool IsControl() const V8_OVERRIDE { return true; }
- int block_id() const { return block_id_; }
+ int block_id() const { return block_->block_id(); }
private:
- int block_id_;
+ HBasicBlock* block_;
};
@@ -432,6 +422,13 @@ class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
@@ -483,8 +480,14 @@ class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -689,17 +692,15 @@ class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
@@ -783,12 +784,14 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LMathAbs(LOperand* value) {
+ LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
inputs_[0] = value;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
@@ -920,9 +923,9 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+ LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -930,23 +933,26 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
- explicit LIsNumberAndBranch(LOperand* value) {
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
};
@@ -1000,15 +1006,17 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
public:
- LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
@@ -1084,15 +1092,17 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -1101,28 +1111,32 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
temps_[0] = temp;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@@ -1143,19 +1157,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1301,7 +1302,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1356,45 +1357,59 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
Smi* index() const { return index_; }
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
- inputs_[2] = value;
}
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ }
+
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
+ LThrow(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@@ -1445,28 +1460,6 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
- public:
- LRandom(LOperand* global_object,
- LOperand* scratch,
- LOperand* scratch2,
- LOperand* scratch3) {
- inputs_[0] = global_object;
- temps_[0] = scratch;
- temps_[1] = scratch2;
- temps_[2] = scratch3;
- }
-
- LOperand* global_object() const { return inputs_[0]; }
- LOperand* scratch() const { return temps_[0]; }
- LOperand* scratch2() const { return temps_[1]; }
- LOperand* scratch3() const { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1490,16 +1483,21 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
: op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
virtual Opcode opcode() const V8_FINAL { return LInstruction::kArithmeticT; }
@@ -1511,11 +1509,12 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value, LOperand* parameter_count) {
+ LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
inputs_[0] = value;
- inputs_[1] = parameter_count;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
}
LOperand* value() { return inputs_[0]; }
@@ -1527,7 +1526,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
ASSERT(has_constant_parameter_count());
return LConstantOperand::cast(parameter_count());
}
- LOperand* parameter_count() { return inputs_[1]; }
+ LOperand* parameter_count() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
@@ -1546,13 +1545,15 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
+ LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1574,6 +1575,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -1612,15 +1622,17 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyedGeneric(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
+ LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
};
@@ -1633,13 +1645,15 @@ class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@@ -1664,16 +1678,19 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
+ LStoreGlobalGeneric(LOperand* context,
+ LOperand* global_object,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ inputs_[2] = value;
}
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
@@ -1761,19 +1778,19 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInnerAllocatedObject(LOperand* base_object) {
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
+ inputs_[1] = offset;
}
- LOperand* base_object() { return inputs_[0]; }
- int offset() { return hydrogen()->offset(); }
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
- DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@@ -1803,8 +1820,14 @@ class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
@@ -1846,13 +1869,15 @@ class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
@@ -1863,13 +1888,15 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
+ LCallKeyed(LOperand* context, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = key;
}
- LOperand* key() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
@@ -1881,8 +1908,14 @@ class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallNamed(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
@@ -1893,13 +1926,15 @@ class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@@ -1908,8 +1943,14 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallGlobal(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
@@ -1931,13 +1972,15 @@ class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
@@ -1948,13 +1991,15 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNewArray(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
@@ -1965,13 +2010,24 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
@@ -2113,7 +2169,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2185,15 +2241,17 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2230,17 +2288,22 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = value;
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* obj,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
+ inputs_[3] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2251,14 +2314,17 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LTransitionElementsKind(LOperand* object,
+ LOperand* context,
LOperand* new_map_temp) {
inputs_[0] = object;
+ inputs_[1] = context;
temps_[0] = new_map_temp;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* object() { return inputs_[0]; }
LOperand* new_map_temp() { return temps_[0]; }
@@ -2268,8 +2334,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2291,15 +2359,17 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
@@ -2307,28 +2377,32 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
}
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
}
- LOperand* char_code() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
@@ -2441,12 +2515,17 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ LAllocate(LOperand* context,
+ LOperand* size,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = context;
inputs_[1] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
+ LOperand* context() { return inputs_[0]; }
LOperand* size() { return inputs_[1]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
@@ -2456,15 +2535,27 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
@@ -2483,13 +2574,15 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -2536,8 +2629,14 @@ class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2548,13 +2647,15 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
@@ -2610,8 +2711,8 @@ class LPlatformChunk V8_FINAL : public LChunk {
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
};
@@ -2635,6 +2736,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2767,7 +2870,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
diff --git a/chromium/v8/src/mips/macro-assembler-mips.cc b/chromium/v8/src/mips/macro-assembler-mips.cc
index a85b0d80344..f33e6fa063c 100644
--- a/chromium/v8/src/mips/macro-assembler-mips.cc
+++ b/chromium/v8/src/mips/macro-assembler-mips.cc
@@ -35,6 +35,7 @@
#include "codegen.h"
#include "cpu-profiler.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime.h"
namespace v8 {
@@ -43,7 +44,6 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -52,6 +52,38 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
+void MacroAssembler::Load(Register dst,
+ const MemOperand& src,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ lb(dst, src);
+ } else if (r.IsUInteger8()) {
+ lbu(dst, src);
+ } else if (r.IsInteger16()) {
+ lh(dst, src);
+ } else if (r.IsUInteger16()) {
+ lhu(dst, src);
+ } else {
+ lw(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src,
+ const MemOperand& dst,
+ Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ sb(src, dst);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ sh(src, dst);
+ } else {
+ sw(src, dst);
+ }
+}
+
+
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
@@ -82,19 +114,6 @@ void MacroAssembler::StoreRoot(Register source,
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- li(result, Operand(cell));
- lw(result, FieldMemOperand(result, Cell::kValueOffset));
- } else {
- li(result, Operand(object));
- }
-}
-
-
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
@@ -248,10 +267,6 @@ void MacroAssembler::RecordWrite(Register object,
SmiCheck smi_check) {
ASSERT(!AreAliased(object, address, value, t8));
ASSERT(!AreAliased(object, address, value, t9));
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
if (emit_debug_code()) {
lw(at, MemOperand(address));
@@ -259,6 +274,12 @@ void MacroAssembler::RecordWrite(Register object,
eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ // TODO(mstarzinger): Dynamic counter missing.
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
@@ -494,8 +515,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Subu(reg1, reg1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use reg2 for index calculations and keep the hash intact in reg0.
mov(reg2, reg0);
// Compute the masked index: (hash + i + i * i) & mask.
@@ -514,7 +534,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
addu(reg2, elements, at);
lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
- if (i != kProbes - 1) {
+ if (i != kNumberDictionaryProbes - 1) {
Branch(&done, eq, key, Operand(at));
} else {
Branch(miss, ne, key, Operand(at));
@@ -771,6 +791,23 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
//------------Pseudo-instructions-------------
+void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
+ AllowDeferredHandleDereference smi_check;
+ if (value->IsSmi()) {
+ li(dst, Operand(value), mode);
+ } else {
+ ASSERT(value->IsHeapObject());
+ if (isolate()->heap()->InNewSpace(*value)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(value);
+ li(dst, Operand(cell));
+ lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
+ } else {
+ li(dst, Operand(value));
+ }
+ }
+}
+
+
void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
ASSERT(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3220,11 +3257,10 @@ void MacroAssembler::CopyBytes(Register src,
Register dst,
Register length,
Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+ Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
- bind(&align_loop);
- Branch(&done, eq, length, Operand(zero_reg));
+ Branch(&byte_loop, le, length, Operand(kPointerSize));
bind(&align_loop_1);
And(scratch, src, kPointerSize - 1);
Branch(&word_loop, eq, scratch, Operand(zero_reg));
@@ -3233,7 +3269,7 @@ void MacroAssembler::CopyBytes(Register src,
sb(scratch, MemOperand(dst));
Addu(dst, dst, 1);
Subu(length, length, Operand(1));
- Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+ Branch(&align_loop_1, ne, length, Operand(zero_reg));
// Copy bytes in word size chunks.
bind(&word_loop);
@@ -3691,7 +3727,7 @@ void MacroAssembler::InvokeFunction(Register function,
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
@@ -3700,8 +3736,10 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
+ // Contract with called JS functions requires that function is passed in a1.
+ ASSERT(function.is(a1));
+
// Get the function and setup the context.
- LoadHeapObject(a1, function);
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
@@ -3712,6 +3750,17 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
}
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ li(a1, function);
+ InvokeFunction(a1, expected, actual, flag, call_wrapper, call_kind);
+}
+
+
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
Register map,
Register scratch,
@@ -3836,8 +3885,6 @@ void MacroAssembler::CallStub(CodeStub* stub,
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ ||
- stub->CompilingCallsToThisStubIsGCSafe(isolate()));
Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -3847,12 +3894,14 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
}
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- Address function_address,
- ExternalReference thunk_ref,
- Register thunk_last_arg,
- int stack_space,
- int return_value_offset_from_fp) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
@@ -3915,12 +3964,13 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
}
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
// Load value from ReturnValue.
- lw(v0, MemOperand(fp, return_value_offset_from_fp*kPointerSize));
+ lw(v0, return_value_operand);
bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
@@ -3941,14 +3991,23 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
lw(t1, MemOperand(at));
Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+ bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ lw(cp, *context_restore_operand);
+ }
li(s0, Operand(stack_space));
- LeaveExitFrame(false, s0, true);
+ LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -3965,8 +4024,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -4125,7 +4183,8 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst,
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -4142,25 +4201,11 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference(f, isolate()));
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- PrepareCEntryArgs(function->nargs);
- PrepareCEntryFunction(ExternalReference(function, isolate()));
- CEntryStub stub(1, kSaveFPRegs);
+ CEntryStub stub(1, save_doubles);
CallStub(&stub);
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments,
BranchDelaySlot bd) {
@@ -4481,113 +4526,37 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
-void MacroAssembler::LoadNumber(Register object,
- FPURegister dst,
- Register heap_number_map,
- Register scratch,
- Label* not_number) {
- Label is_smi, done;
-
- UntagAndJumpIfSmi(scratch, object, &is_smi);
- JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
-
- ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
- Branch(&done);
-
- bind(&is_smi);
- mtc1(scratch, dst);
- cvt_d_w(dst, dst);
-
- bind(&done);
-}
-
-
-void MacroAssembler::LoadNumberAsInt32Double(Register object,
- DoubleRegister double_dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister double_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
-
- Label done, obj_is_not_smi;
-
- UntagAndJumpIfNotSmi(scratch1, object, &obj_is_not_smi);
- mtc1(scratch1, double_scratch);
- cvt_d_w(double_dst, double_scratch);
- Branch(&done);
-
- bind(&obj_is_not_smi);
- JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Load the number.
- // Load the double value.
- ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- EmitFPUTruncate(kRoundToZero,
- scratch1,
- double_dst,
- at,
- double_scratch,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- Branch(not_int32, ne, except_flag, Operand(zero_reg));
- bind(&done);
-}
-
-
-void MacroAssembler::LoadNumberAsInt32(Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister double_scratch0,
- FPURegister double_scratch1,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
-
- Label done, maybe_undefined;
-
- UntagAndJumpIfSmi(dst, object, &done);
-
- JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- // Load the double value.
- ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- EmitFPUTruncate(kRoundToZero,
- dst,
- double_scratch0,
- scratch1,
- double_scratch1,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- Branch(not_int32, ne, except_flag, Operand(zero_reg));
- Branch(&done);
-
- bind(&maybe_undefined);
- LoadRoot(at, Heap::kUndefinedValueRootIndex);
- Branch(not_int32, ne, object, Operand(at));
- // |undefined| is truncated to 0.
- li(dst, Operand(Smi::FromInt(0)));
- // Fall through.
-
- bind(&done);
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ Push(ra, fp, cp);
+ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ nop(Assembler::CODE_AGE_MARKER_NOP);
+ // Load the stub address to t9 and call it,
+ // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ li(t9,
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
+ CONSTANT_SIZE);
+ nop(); // Prevent jalr to jal optimization.
+ jalr(t9, a0);
+ nop(); // Branch delay slot nop.
+ nop(); // Pad the empty space.
+ } else {
+ Push(ra, fp, cp, a1);
+ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ // Adjust fp to point to caller's fp.
+ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ }
+ }
}
@@ -4600,7 +4569,9 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
sw(cp, MemOperand(sp, 2 * kPointerSize));
sw(t8, MemOperand(sp, 1 * kPointerSize));
sw(t9, MemOperand(sp, 0 * kPointerSize));
- addiu(fp, sp, 3 * kPointerSize);
+ // Adjust FP to point to saved FP.
+ Addu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -4684,6 +4655,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count,
+ bool restore_context,
bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
@@ -4700,9 +4672,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
sw(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- lw(cp, MemOperand(t8));
+ if (restore_context) {
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ lw(cp, MemOperand(t8));
+ }
#ifdef DEBUG
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
sw(a3, MemOperand(t8));
#endif
@@ -4885,7 +4860,7 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
- And(t0, object, Operand(kSmiTagMask));
+ SmiTst(object, t0);
Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
push(object);
lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
@@ -4899,7 +4874,7 @@ void MacroAssembler::AssertString(Register object) {
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
- And(t0, object, Operand(kSmiTagMask));
+ SmiTst(object, t0);
Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
push(object);
lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
@@ -4929,6 +4904,86 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ sra(mask, mask, kSmiTagSize + 1);
+ Addu(mask, mask, -1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ Addu(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ lw(scratch2, MemOperand(scratch1, kPointerSize));
+ lw(scratch1, MemOperand(scratch1, 0));
+ Xor(scratch1, scratch1, Operand(scratch2));
+ And(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+ Addu(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
+ Branch(not_found);
+
+ bind(&is_smi);
+ Register scratch = scratch1;
+ sra(scratch, object, 1); // Shift away the tag.
+ And(scratch, mask, Operand(scratch));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ sll(scratch, scratch, kPointerSizeLog2 + 1);
+ Addu(scratch, number_string_cache, scratch);
+
+ // Check if the entry is the smi we are looking for.
+ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ Branch(not_found, ne, object, Operand(probe));
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+ IncrementCounter(isolate()->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
@@ -5013,6 +5068,42 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ Register scratch,
+ uint32_t encoding_mask) {
+ Label is_object;
+ SmiTst(string, at);
+ ThrowIf(eq, kNonObject, at, Operand(zero_reg));
+
+ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
+ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+
+ andi(at, at, kStringRepresentationMask | kStringEncodingMask);
+ li(scratch, Operand(encoding_mask));
+ ThrowIf(ne, kUnexpectedStringType, at, Operand(scratch));
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ Label index_tag_ok, index_tag_bad;
+ TrySmiTag(index, scratch, &index_tag_bad);
+ Branch(&index_tag_ok);
+ bind(&index_tag_bad);
+ Throw(kIndexIsTooLarge);
+ bind(&index_tag_ok);
+
+ lw(at, FieldMemOperand(string, String::kLengthOffset));
+ ThrowIf(ge, kIndexIsTooLarge, index, Operand(at));
+
+ ASSERT(Smi::FromInt(0) == 0);
+ ThrowIf(lt, kIndexIsNegative, index, Operand(zero_reg));
+
+ SmiUntag(index, index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
@@ -5393,6 +5484,57 @@ void MacroAssembler::EnsureNotWhite(
}
+void MacroAssembler::Throw(BailoutReason reason) {
+ Label throw_start;
+ bind(&throw_start);
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ li(a0, Operand(Smi::FromInt(reason)));
+ push(a0);
+ // Disable stub call restrictions to always allow calls to throw.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // will not return here
+ if (is_trampoline_pool_blocked()) {
+ // If the calling code cares throw the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the ThrowMessage macro constant.
+ // Currently in debug mode with debug_code enabled the number of
+ // generated instructions is 14, so we use this as a maximum value.
+ static const int kExpectedThrowMessageInstructions = 14;
+ int throw_instructions = InstructionsGeneratedSince(&throw_start);
+ ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
+ while (throw_instructions++ < kExpectedThrowMessageInstructions) {
+ nop();
+ }
+ }
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc,
+ BailoutReason reason,
+ Register rs,
+ Operand rt) {
+ Label L;
+ Branch(&L, NegateCondition(cc), rs, rt);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
@@ -5423,7 +5565,8 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
EnumLength(a3, a1);
- Branch(call_runtime, eq, a3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
+ Branch(
+ call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
jmp(&start);
@@ -5492,23 +5635,24 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg,
+ Label* no_memento_found,
Condition cond,
Label* allocation_memento_present) {
- Label no_memento_available;
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
Addu(scratch_reg, receiver_reg,
Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- Branch(&no_memento_available, lt, scratch_reg, Operand(new_space_start));
+ Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
li(at, Operand(new_space_allocation_top));
lw(at, MemOperand(at));
- Branch(&no_memento_available, gt, scratch_reg, Operand(at));
+ Branch(no_memento_found, gt, scratch_reg, Operand(at));
lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
- Branch(allocation_memento_present, cond, scratch_reg,
- Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
- bind(&no_memento_available);
+ if (allocation_memento_present) {
+ Branch(allocation_memento_present, cond, scratch_reg,
+ Operand(isolate()->factory()->allocation_memento_map()));
+ }
}
@@ -5536,6 +5680,30 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
}
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // Scratch contained elements pointer.
+ Move(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ Ext(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
+ lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ Branch(&loop_again, ne, current, Operand(factory->null_value()));
+}
+
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
diff --git a/chromium/v8/src/mips/macro-assembler-mips.h b/chromium/v8/src/mips/macro-assembler-mips.h
index 75ded884909..4e30c353e2c 100644
--- a/chromium/v8/src/mips/macro-assembler-mips.h
+++ b/chromium/v8/src/mips/macro-assembler-mips.h
@@ -51,6 +51,12 @@ class JumpTarget;
// MIPS generated code calls C code, it must be via t9 register.
+// Flags used for LeaveExitFrame function.
+enum LeaveExitFrameMode {
+ EMIT_RETURN = true,
+ NO_EMIT_RETURN = false
+};
+
// Flags used for AllocateHeapNumber
enum TaggingMode {
// Tag the result.
@@ -273,6 +279,9 @@ class MacroAssembler: public Assembler {
Branch(L);
}
+ void Load(Register dst, const MemOperand& src, Representation r);
+ void Store(Register src, const MemOperand& dst, Representation r);
+
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index);
@@ -287,17 +296,6 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
- void LoadHeapObject(Register dst, Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- li(result, object);
- }
- }
-
// ---------------------------------------------------------------------------
// GC Support
@@ -614,10 +612,7 @@ class MacroAssembler: public Assembler {
inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
li(rd, Operand(j), mode);
}
- inline void li(Register dst, Handle<Object> value,
- LiFlags mode = OPTIMIZE_SIZE) {
- li(dst, Operand(value), mode);
- }
+ void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
@@ -848,7 +843,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame.
void LeaveExitFrame(bool save_doubles,
Register arg_count,
- bool do_return = false);
+ bool restore_context,
+ bool do_return = NO_EMIT_RETURN);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -921,6 +917,13 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
+
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
@@ -971,6 +974,12 @@ class MacroAssembler: public Assembler {
// handler chain.
void ThrowUncatchable(Register value);
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason, Register rs, Operand rt);
+
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count);
@@ -1194,11 +1203,20 @@ class MacroAssembler: public Assembler {
void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@@ -1271,7 +1289,8 @@ class MacroAssembler: public Assembler {
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- int return_value_offset_from_fp);
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
@@ -1329,8 +1348,6 @@ class MacroAssembler: public Assembler {
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
@@ -1361,6 +1378,21 @@ class MacroAssembler: public Assembler {
Addu(dst, src, src);
}
+ // Try to convert int32 to smi. If the value is to large, preserve
+ // the original value and jump to not_a_smi. Destroys scratch and
+ // sets flags.
+ void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
+ TrySmiTag(reg, reg, scratch, not_a_smi);
+ }
+ void TrySmiTag(Register dst,
+ Register src,
+ Register scratch,
+ Label* not_a_smi) {
+ SmiTagCheckOverflow(at, src, scratch);
+ BranchOnOverflow(not_a_smi, scratch);
+ mov(dst, at);
+ }
+
void SmiUntag(Register reg) {
sra(reg, reg, kSmiTagSize);
}
@@ -1369,6 +1401,14 @@ class MacroAssembler: public Assembler {
sra(dst, src, kSmiTagSize);
}
+ // Test if the register contains a smi.
+ inline void SmiTst(Register value, Register scratch) {
+ And(scratch, value, Operand(kSmiTagMask));
+ }
+ inline void NonNegativeSmiTst(Register value, Register scratch) {
+ And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
+ }
+
// Untag the source value into destination and jump if source is a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
@@ -1419,6 +1459,18 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// String utilities.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
// Checks if both instance types are sequential ASCII strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialAscii(
@@ -1436,6 +1488,12 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ Register scratch,
+ uint32_t encoding_mask);
+
// Test that both first and second are sequential ASCII strings.
// Assume that they are non-smis.
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
@@ -1471,6 +1529,9 @@ class MacroAssembler: public Assembler {
And(reg, reg, Operand(mask));
}
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
@@ -1493,11 +1554,26 @@ class MacroAssembler: public Assembler {
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
- // If allocation info is present, jump to allocation_info_present
- void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Condition cond,
- Label* allocation_memento_present);
+ // If allocation info is present, jump to allocation_memento_present.
+ void TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found,
+ Condition cond = al,
+ Label* allocation_memento_present = NULL);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found, eq, memento_found);
+ bind(&no_memento_found);
+ }
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
private:
void CallCFunctionHelper(Register function,
@@ -1568,7 +1644,6 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
bool generating_stub_;
- bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
diff --git a/chromium/v8/src/mips/regexp-macro-assembler-mips.cc b/chromium/v8/src/mips/regexp-macro-assembler-mips.cc
index 1a04fd10292..49dec3c0246 100644
--- a/chromium/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/chromium/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -1063,15 +1063,56 @@ bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
// Private methods:
void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
+ int stack_alignment = OS::ActivationFrameAlignment();
+
+ // Align the stack pointer and save the original sp value on the stack.
+ __ mov(scratch, sp);
+ __ Subu(sp, sp, Operand(kPointerSize));
+ ASSERT(IsPowerOf2(stack_alignment));
+ __ And(sp, sp, Operand(-stack_alignment));
+ __ sw(scratch, MemOperand(sp));
+
__ mov(a2, frame_pointer());
// Code* of self.
__ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
- // a0 becomes return address pointer.
+
+ // We need to make room for the return address on the stack.
+ ASSERT(IsAligned(stack_alignment, kPointerSize));
+ __ Subu(sp, sp, Operand(stack_alignment));
+
+ // Stack pointer now points to cell where return address is to be written.
+ // Arguments are in registers, meaning we teat the return address as
+ // argument 5. Since DirectCEntryStub will handleallocating space for the C
+ // argument slots, we don't need to care about that here. This is how the
+ // stack will look (sp meaning the value of sp at this moment):
+ // [sp + 3] - empty slot if needed for alignment.
+ // [sp + 2] - saved sp.
+ // [sp + 1] - second word reserved for return value.
+ // [sp + 0] - first word reserved for return value.
+
+ // a0 will point to the return address, placed by DirectCEntry.
+ __ mov(a0, sp);
+
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(masm_->isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
+ __ li(t9, Operand(stack_guard_check));
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm_, t9);
+
+ // DirectCEntryStub allocated space for the C argument slots so we have to
+ // drop them with the return address from the stack with loading saved sp.
+ // At this point stack must look:
+ // [sp + 7] - empty slot if needed for alignment.
+ // [sp + 6] - saved sp.
+ // [sp + 5] - second word reserved for return value.
+ // [sp + 4] - first word reserved for return value.
+ // [sp + 3] - C argument slot.
+ // [sp + 2] - C argument slot.
+ // [sp + 1] - C argument slot.
+ // [sp + 0] - C argument slot.
+ __ lw(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize));
+
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -1276,21 +1317,6 @@ void RegExpMacroAssemblerMIPS::CheckStackLimit() {
}
-void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- // Must pass all arguments in registers. The stub pushes on the stack.
- ASSERT(num_arguments <= 4);
- __ li(code_pointer(), Operand(function));
- RegExpCEntryStub stub;
- __ CallStub(&stub);
- if (OS::ActivationFrameAlignment() != 0) {
- __ lw(sp, MemOperand(sp, 16));
- }
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
-}
-
-
void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
@@ -1312,23 +1338,6 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
}
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- int stack_alignment = OS::ActivationFrameAlignment();
- if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
- // Stack is already aligned for call, so decrement by alignment
- // to make room for storing the return address.
- __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
- const int return_address_offset = kCArgsSlotsSize;
- __ Addu(a0, sp, return_address_offset);
- __ sw(ra, MemOperand(a0, 0));
- __ mov(t9, t1);
- __ Call(t9);
- __ lw(ra, MemOperand(sp, return_address_offset));
- __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
- __ Jump(ra);
-}
-
-
#undef __
#endif // V8_INTERPRETED_REGEXP
diff --git a/chromium/v8/src/mips/regexp-macro-assembler-mips.h b/chromium/v8/src/mips/regexp-macro-assembler-mips.h
index 86ae4d45eeb..063582c6485 100644
--- a/chromium/v8/src/mips/regexp-macro-assembler-mips.h
+++ b/chromium/v8/src/mips/regexp-macro-assembler-mips.h
@@ -217,14 +217,6 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// and increments it by a word size.
inline void Pop(Register target);
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
diff --git a/chromium/v8/src/mips/simulator-mips.cc b/chromium/v8/src/mips/simulator-mips.cc
index ea8b65948af..acc65251e23 100644
--- a/chromium/v8/src/mips/simulator-mips.cc
+++ b/chromium/v8/src/mips/simulator-mips.cc
@@ -1722,6 +1722,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
int64_t& i64hilo,
uint64_t& u64hilo,
int32_t& next_pc,
+ int32_t& return_addr_reg,
bool& do_interrupt) {
// Every local variable declared here needs to be const.
// This is to make sure that changed values are sent back to
@@ -1782,6 +1783,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
case JR:
case JALR:
next_pc = get_register(instr->RsValue());
+ return_addr_reg = instr->RdValue();
break;
case SLL:
alu_out = rt << sa;
@@ -1986,6 +1988,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
int32_t current_pc = get_pc();
// Next pc
int32_t next_pc = 0;
+ int32_t return_addr_reg = 31;
// Set up the variables if needed before executing the instruction.
ConfigureTypeRegister(instr,
@@ -1993,6 +1996,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
i64hilo,
u64hilo,
next_pc,
+ return_addr_reg,
do_interrupt);
// ---------- Raise exceptions triggered.
@@ -2258,7 +2262,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
- set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ set_register(return_addr_reg,
+ current_pc + 2 * Instruction::kInstrSize);
set_pc(next_pc);
pc_modified_ = true;
break;
@@ -2274,9 +2279,13 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case DIV:
// Divide by zero and overflow was not checked in the configuration
- // step - div and divu do not raise exceptions. On division by 0 and
- // on overflow (INT_MIN/-1), the result will be UNPREDICTABLE.
- if (rt != 0 && !(rs == INT_MIN && rt == -1)) {
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ if (rs == INT_MIN && rt == -1) {
+ set_register(LO, INT_MIN);
+ set_register(HI, 0);
+ } else if (rt != 0) {
set_register(LO, rs / rt);
set_register(HI, rs % rt);
}
diff --git a/chromium/v8/src/mips/simulator-mips.h b/chromium/v8/src/mips/simulator-mips.h
index 601cd6d99d1..d9fd10f245c 100644
--- a/chromium/v8/src/mips/simulator-mips.h
+++ b/chromium/v8/src/mips/simulator-mips.h
@@ -289,6 +289,7 @@ class Simulator {
int64_t& i64hilo,
uint64_t& u64hilo,
int32_t& next_pc,
+ int32_t& return_addr_reg,
bool& do_interrupt);
void DecodeTypeImmediate(Instruction* instr);
diff --git a/chromium/v8/src/mips/stub-cache-mips.cc b/chromium/v8/src/mips/stub-cache-mips.cc
index 6d68bbd7d0d..9f5089d55d9 100644
--- a/chromium/v8/src/mips/stub-cache-mips.cc
+++ b/chromium/v8/src/mips/stub-cache-mips.cc
@@ -370,30 +370,26 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
// Load length directly from the string.
__ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
- // Unwrap the value and check if the wrapped value is a string.
- __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
- }
+ // Unwrap the value and check if the wrapped value is a string.
+ __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ Ret(USE_DELAY_SLOT);
+ __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
}
@@ -422,7 +418,7 @@ void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+void StoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -441,19 +437,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
// Generate StoreTransition code, value is passed in a0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
-void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss_label,
- Label* slow) {
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss_label,
+ Label* slow) {
// a0 : value.
Label exit;
@@ -465,7 +461,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
- __ LoadObject(scratch1, constant);
+ __ li(scratch1, constant);
__ Branch(miss_label, ne, value_reg, Operand(scratch1));
} else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
@@ -605,15 +601,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
-void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// a0 : value
Label exit;
@@ -726,9 +722,9 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ li(this->name(), Operand(name));
@@ -736,35 +732,6 @@ void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
}
-static void GenerateCallFunction(MacroAssembler* masm,
- Handle<Object> object,
- const ParameterCount& arguments,
- Label* miss,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- a0: receiver
- // -- a1: function to call
- // -----------------------------------
- // Check that the function really is a function.
- __ JumpIfSmi(a1, miss);
- __ GetObjectType(a1, a3, a3);
- __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, arguments.immediate() * kPointerSize));
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(a1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
@@ -789,17 +756,12 @@ static void CompileCallLoadPropertyWithInterceptor(
Register receiver,
Register holder,
Register name,
- Handle<JSObject> holder_obj) {
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ PrepareCEntryArgs(StubCache::kInterceptorArgsLength);
- __ PrepareCEntryFunction(ref);
-
- CEntryStub stub(1);
- __ CallStub(&stub);
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
}
@@ -826,25 +788,26 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee JS function
- // -- sp[8] : call data
- // -- sp[12] : isolate
- // -- sp[16] : ReturnValue default value
- // -- sp[20] : ReturnValue
- // -- sp[24] : last JS argument
+ // -- sp[0] - sp[24] : FunctionCallbackInfo, incl.
+ // : holder (set by CheckPrototypes)
+ // -- sp[28] : last JS argument
// -- ...
- // -- sp[(argc + 5) * 4] : first JS argument
- // -- sp[(argc + 6) * 4] : receiver
+ // -- sp[(argc + 6) * 4] : first JS argument
+ // -- sp[(argc + 7) * 4] : receiver
// -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+ // Save calling context.
+ __ sw(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize));
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(t1, function);
+ __ li(t1, function);
__ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
+ __ sw(t1, MemOperand(sp, FCA::kCalleeIndex * kPointerSize));
- // Pass the additional arguments.
+ // Construct the FunctionCallbackInfo.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
@@ -853,18 +816,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
} else {
__ li(t2, call_data);
}
-
+ // Store call data.
+ __ sw(t2, MemOperand(sp, FCA::kDataIndex * kPointerSize));
+ // Store isolate.
__ li(t3, Operand(ExternalReference::isolate_address(masm->isolate())));
- // Store JS function, call data, isolate ReturnValue default and ReturnValue.
- __ sw(t1, MemOperand(sp, 1 * kPointerSize));
- __ sw(t2, MemOperand(sp, 2 * kPointerSize));
- __ sw(t3, MemOperand(sp, 3 * kPointerSize));
+ __ sw(t3, MemOperand(sp, FCA::kIsolateIndex * kPointerSize));
+ // Store ReturnValue default and ReturnValue.
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
- __ sw(t1, MemOperand(sp, 4 * kPointerSize));
- __ sw(t1, MemOperand(sp, 5 * kPointerSize));
+ __ sw(t1, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize));
+ __ sw(t1, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize));
// Prepare arguments.
- __ Addu(a2, sp, Operand(5 * kPointerSize));
+ __ Move(a2, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -873,19 +836,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // a0 = v8::Arguments&
+ // a0 = FunctionCallbackInfo&
// Arguments is built at sp + 1 (sp is a reserved spot for ra).
__ Addu(a0, sp, kPointerSize);
-
- // v8::Arguments::implicit_args_
+ // FunctionCallbackInfo::implicit_args_
__ sw(a2, MemOperand(a0, 0 * kPointerSize));
- // v8::Arguments::values_
- __ Addu(t0, a2, Operand(argc * kPointerSize));
+ // FunctionCallbackInfo::values_
+ __ Addu(t0, a2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize));
__ sw(t0, MemOperand(a0, 1 * kPointerSize));
- // v8::Arguments::length_ = argc
+ // FunctionCallbackInfo::length_ = argc
__ li(t0, Operand(argc));
__ sw(t0, MemOperand(a0, 2 * kPointerSize));
- // v8::Arguments::is_construct_call = 0
+ // FunctionCallbackInfo::is_construct_call = 0
__ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
@@ -903,12 +865,19 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+ MemOperand return_value_operand(
+ fp, (2 + FCA::kReturnValueOffset) * kPointerSize);
+
__ CallApiFunctionAndReturn(ref,
function_address,
thunk_ref,
a1,
kStackUnwindSpace,
- kFastApiCallArguments + 1);
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
}
@@ -922,11 +891,12 @@ static void GenerateFastApiCall(MacroAssembler* masm,
ASSERT(optimization.is_simple_api_call());
ASSERT(!receiver.is(scratch));
+ typedef FunctionCallbackArguments FCA;
const int stack_space = kFastApiCallArguments + argc + 1;
// Assign stack space for the call arguments.
__ Subu(sp, sp, Operand(stack_space * kPointerSize));
// Write holder to stack frame.
- __ sw(receiver, MemOperand(sp, 0));
+ __ sw(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
// Write receiver to stack frame.
int index = stack_space - 1;
__ sw(receiver, MemOperand(sp, index * kPointerSize));
@@ -937,16 +907,16 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ sw(receiver, MemOperand(sp, index-- * kPointerSize));
}
- GenerateFastApiDirectCall(masm, optimization, argc);
+ GenerateFastApiDirectCall(masm, optimization, argc, true);
}
class CallInterceptorCompiler BASE_EMBEDDED {
public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
+ CallInterceptorCompiler(CallStubCompiler* stub_compiler,
const ParameterCount& arguments,
Register name,
- Code::ExtraICState extra_ic_state)
+ ExtraICState extra_ic_state)
: stub_compiler_(stub_compiler),
arguments_(arguments),
name_(name),
@@ -1021,9 +991,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(object, masm->isolate()), receiver,
+ interceptor_holder, scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -1037,10 +1008,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(interceptor_holder, masm->isolate()), holder,
+ handle(lookup->holder()), scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -1051,15 +1022,12 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiDirectCall(
+ masm, optimization, arguments_.immediate(), false);
} else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
Handle<JSFunction> function = optimization.constant_function();
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ __ Move(a0, receiver);
+ stub_compiler_->GenerateJumpFunction(object, function);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -1086,22 +1054,19 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(object, masm->isolate()), receiver,
+ interceptor_holder, scratch1, scratch2, scratch3, name, miss_label);
// Call a runtime function to load the interceptor property.
FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
+ CompileCallLoadPropertyWithInterceptor(
+ masm, receiver, holder, name_, interceptor_holder,
+ IC::kLoadPropertyWithInterceptorForCall);
- __ CallExternalReference(
- ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
// Leave the internal frame.
@@ -1116,47 +1081,26 @@ class CallInterceptorCompiler BASE_EMBEDDED {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(holder, name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
+ __ Push(receiver, holder, name_);
+ CompileCallLoadPropertyWithInterceptor(
+ masm, receiver, holder, name_, holder_obj,
+ IC::kLoadPropertyWithInterceptorOnly);
+ __ pop(name_);
+ __ pop(holder);
+ __ pop(receiver);
}
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
__ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
}
- StubCompiler* stub_compiler_;
+ CallStubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
- Code::ExtraICState extra_ic_state_;
+ ExtraICState extra_ic_state_;
};
-void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<JSGlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1166,7 +1110,7 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
@@ -1176,11 +1120,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ li(scratch1, Operand(Handle<Map>(object->map())));
+ __ li(scratch1, Operand(receiver_map));
- Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1190,29 +1134,36 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
+ typedef FunctionCallbackArguments FCA;
if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp));
+ __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
if (!name->IsUniqueName()) {
ASSERT(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
+ ASSERT(current.is_null() ||
+ current->property_dictionary()->FindEntry(*name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -1223,19 +1174,24 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
__ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Register map_reg = scratch1;
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- Handle<Map> current_map(current->map());
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
// CheckMap implicitly loads the map of |reg| into |map_reg|.
__ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
} else {
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
}
+
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
- if (current->IsJSGlobalProxy()) {
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
}
+
reg = holder_reg; // From now on the object will be in holder_reg.
if (heap()->InNewSpace(*prototype)) {
@@ -1249,70 +1205,65 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp));
+ __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
}
// Go to the next object in the prototype chain.
current = prototype;
+ current_map = handle(current->map());
}
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
- DONT_DO_SMI_CHECK);
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
// Return the register containing the holder.
return reg;
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ Branch(success);
+ Label success;
+ __ Branch(&success);
__ bind(miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ b(success);
+ Label success;
+ __ Branch(&success);
GenerateRestoreName(masm(), miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
+Register LoadStubCompiler::CallbackHandlerFrontend(
+ Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
- Label* success,
Handle<Object> callback) {
Label miss;
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
ASSERT(!reg.is(scratch2()));
@@ -1344,15 +1295,15 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
__ Branch(&miss, ne, scratch2(), Operand(callback));
}
- HandlerFrontendFooter(name, success, &miss);
+ HandlerFrontendFooter(name, &miss);
return reg;
}
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
@@ -1368,36 +1319,36 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadObject(v0, value);
+ __ li(v0, value);
__ Ret();
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 0, NULL);
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
ASSERT(!scratch2().is(reg));
ASSERT(!scratch3().is(reg));
ASSERT(!scratch4().is(reg));
__ push(receiver());
- __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
__ li(scratch3(), callback);
__ lw(scratch3(), FieldMemOperand(scratch3(),
@@ -1415,6 +1366,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ sw(scratch4(), MemOperand(sp, 2 * kPointerSize));
__ sw(reg, MemOperand(sp, 1 * kPointerSize));
__ sw(name(), MemOperand(sp, 0 * kPointerSize));
+ __ Addu(scratch2(), sp, 1 * kPointerSize);
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
__ mov(a0, sp); // (first argument - a0) = Handle<Name>
@@ -1423,13 +1375,13 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // Create AccessorInfo instance on the stack above the exit frame with
+ // Create PropertyAccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object** args_) as the data.
__ sw(a2, MemOperand(sp, kPointerSize));
// (second argument - a1) = AccessorInfo&
__ Addu(a1, sp, kPointerSize);
- const int kStackUnwindSpace = kFastApiCallArguments + 1;
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
@@ -1446,13 +1398,14 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
thunk_ref,
a2,
kStackUnwindSpace,
- 6);
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
}
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
+void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
- Handle<JSObject> object,
+ Handle<Object> object,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Handle<Name> name) {
@@ -1501,11 +1454,10 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method).
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
+
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
@@ -1543,21 +1495,12 @@ void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ lw(a0, MemOperand(sp, argc * kPointerSize));
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(a0, miss);
- CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
+void CallStubCompiler::GenerateFunctionCheck(Register function,
+ Register scratch,
+ Label* miss) {
+ __ JumpIfSmi(function, miss);
+ __ GetObjectType(function, scratch, scratch);
+ __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
}
@@ -1576,9 +1519,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
- __ JumpIfSmi(a1, miss);
- __ GetObjectType(a1, a3, a3);
- __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
+ GenerateFunctionCheck(a1, a3, miss);
// Check the shared function info. Make sure it hasn't changed.
__ li(a3, Handle<SharedFunctionInfo>(function->shared()));
@@ -1594,7 +1535,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
- extra_state_);
+ extra_state());
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1603,34 +1544,18 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
PropertyIndex index,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
Label miss;
- GenerateNameCheck(name, &miss);
-
- const int argc = arguments().immediate();
-
- // Get the receiver of the function from the stack into a0.
- __ lw(a0, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(a0, &miss, t0);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
+ Register reg = HandlerFrontendHeader(
+ object, holder, name, RECEIVER_MAP_CHECK, &miss);
GenerateFastPropertyLoad(masm(), a1, reg, index.is_inobject(holder),
index.translate(holder), Representation::Tagged());
+ GenerateJumpFunction(object, a1, &miss);
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
- return GetCode(Code::FIELD, name);
+ return GetCode(Code::FAST, name);
}
@@ -1643,30 +1568,16 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
Code::StubType type) {
Label miss;
- // Check that function is still array.
- const int argc = arguments().immediate();
- GenerateNameCheck(name, &miss);
- Register receiver = a1;
-
- if (cell.is_null()) {
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, a0,
- t0, name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ site->SetElementsKind(GetInitialFastElementsKind());
Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
+ const int argc = arguments().immediate();
__ li(a0, Operand(argc));
__ li(a2, Operand(site_feedback_cell));
__ li(a1, Operand(function));
@@ -1674,8 +1585,7 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
ArrayConstructorStub stub(isolate());
__ TailCallStub(&stub);
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -1689,33 +1599,21 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
+ // If object is not an array or is observed or sealed, bail out to regular
+ // call.
+ if (!object->IsJSArray() ||
+ !cell.is_null() ||
+ Handle<JSArray>::cast(object)->map()->is_observed() ||
+ !Handle<JSArray>::cast(object)->map()->is_extensible()) {
+ return Handle<Code>::null();
+ }
Label miss;
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ Register receiver = a0;
+ Register scratch = a1;
- GenerateNameCheck(name, &miss);
-
- Register receiver = a1;
-
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, v0, t0,
- name, &miss);
if (argc == 0) {
// Nothing to do, just return the length.
@@ -1733,34 +1631,34 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
- v0,
+ scratch,
Heap::kFixedArrayMapRootIndex,
&check_double,
DONT_DO_SMI_CHECK);
- // Get the array's length into v0 and calculate new length.
- __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ // Get the array's length into scratch and calculate new length.
+ __ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
+ __ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
- __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
+ __ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0));
// Check if value is a smi.
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(t0, &with_write_barrier);
// Save new length.
- __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
- __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
@@ -1768,38 +1666,39 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ sw(t0, MemOperand(end_elements));
// Check for a smi.
+ __ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&check_double);
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
- a0,
+ scratch,
Heap::kFixedDoubleArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
- // Get the array's length into v0 and calculate new length.
- __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ // Get the array's length into scratch and calculate new length.
+ __ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
+ __ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
- __ Branch(&call_builtin, gt, v0, Operand(t0));
+ __ Branch(&call_builtin, gt, scratch, Operand(t0));
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(
- t0, v0, elements, a3, t1, a2,
+ t0, scratch, elements, a3, t1, a2,
&call_builtin, argc * kDoubleSize);
// Save new length.
- __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- // Check for a smi.
+ __ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&with_write_barrier);
@@ -1849,12 +1748,12 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
}
// Save new length.
- __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
- __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
__ Addu(end_elements, end_elements, kEndElementsOffset);
__ sw(t0, MemOperand(end_elements));
@@ -1866,10 +1765,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
+ __ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&attempt_to_grow_elements);
- // v0: array's length + 1.
+ // scratch: array's length + 1.
// t0: elements' length.
if (!FLAG_inline_new) {
@@ -1892,7 +1792,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
const int kAllocationDelta = 4;
// Load top and check if it is the end of elements.
- __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
__ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
__ li(t3, Operand(new_space_allocation_top));
@@ -1916,11 +1816,12 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
}
// Update elements' and array's sizes.
- __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
__ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Elements are in new space, so write barrier is not required.
+ __ mov(v0, scratch);
__ DropAndRet(argc + 1);
}
__ bind(&call_builtin);
@@ -1928,9 +1829,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
}
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -1944,38 +1843,27 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
+ // If object is not an array or is observed or sealed, bail out to regular
+ // call.
+ if (!object->IsJSArray() ||
+ !cell.is_null() ||
+ Handle<JSArray>::cast(object)->map()->is_observed() ||
+ !Handle<JSArray>::cast(object)->map()->is_extensible()) {
+ return Handle<Code>::null();
+ }
Label miss, return_undefined, call_builtin;
- Register receiver = a1;
+ Register receiver = a0;
+ Register scratch = a1;
Register elements = a3;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
- t0, v0, name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
// Get the elements array of the object.
__ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
- v0,
+ scratch,
Heap::kFixedArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
@@ -1993,14 +1881,16 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// expensive shift first, and use an offset later on.
__ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(elements, elements, t1);
- __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Branch(&call_builtin, eq, v0, Operand(t2));
+ __ lw(scratch, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ Branch(&call_builtin, eq, scratch, Operand(t2));
// Set the array's length.
__ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Fill with the hole.
__ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ const int argc = arguments().immediate();
+ __ mov(v0, scratch);
__ DropAndRet(argc + 1);
__ bind(&return_undefined);
@@ -2011,9 +1901,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
__ TailCallExternalReference(
ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1);
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2027,18 +1915,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
// If object is not a string, bail out to regular call.
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
- const int argc = arguments().immediate();
Label miss;
Label name_miss;
Label index_out_of_range;
@@ -2046,26 +1925,17 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
+ (CallICBase::StringStubState::decode(extra_state()) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- v0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- v0, holder, a1, a3, t0, name, &miss);
+ HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
- Register receiver = a1;
+ Register receiver = a0;
Register index = t1;
- Register result = v0;
+ Register result = a1;
+ const int argc = arguments().immediate();
__ lw(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
__ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
@@ -2081,6 +1951,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm());
+ __ mov(v0, result);
__ DropAndRet(argc + 1);
StubRuntimeCallHelper call_helper;
@@ -2095,8 +1966,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in a2.
__ li(a2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&name_miss);
// Return the generated code.
return GetCode(type, name);
@@ -2110,14 +1980,6 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
// If object is not a string, bail out to regular call.
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
@@ -2127,27 +1989,17 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
+ (CallICBase::StringStubState::decode(extra_state()) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- v0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- v0, holder, a1, a3, t0, name, &miss);
- Register receiver = v0;
+ HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
+
+ Register receiver = a0;
Register index = t1;
Register scratch = a3;
- Register result = v0;
- __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+ Register result = a1;
if (argc > 0) {
__ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
} else {
@@ -2163,6 +2015,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm());
+ __ mov(v0, result);
__ DropAndRet(argc + 1);
StubRuntimeCallHelper call_helper;
@@ -2177,8 +2030,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
__ bind(&miss);
// Restore function name in a2.
__ li(a2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&name_miss);
// Return the generated code.
return GetCode(type, name);
@@ -2192,14 +2044,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
@@ -2207,20 +2051,9 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(a1, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2243,16 +2076,12 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // a2: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2266,33 +2095,15 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
-
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss, slow;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(a1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2301,7 +2112,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// If the argument is a smi, just return.
STATIC_ASSERT(kSmiTag == 0);
- __ And(t0, v0, Operand(kSmiTagMask));
+ __ SmiTst(v0, t0);
__ DropAndRet(argc + 1, eq, t0, Operand(zero_reg));
__ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
@@ -2374,15 +2185,11 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ ctc1(a3, FCSR);
__ bind(&slow);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // a2: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2396,14 +2203,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- a2 : function name
- // -- ra : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@@ -2411,17 +2210,9 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
Label miss;
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(a1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
@@ -2473,16 +2264,12 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
__ DropAndRet(argc + 1);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // a2: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2526,41 +2313,64 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
ReserveSpaceForFastApiCall(masm(), a0);
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
- depth, &miss);
+ CheckPrototypes(
+ IC::CurrentTypeOf(object, isolate()),
+ a1, holder, a0, a3, t0, name, depth, &miss);
- GenerateFastApiDirectCall(masm(), optimization, argc);
+ GenerateFastApiDirectCall(masm(), optimization, argc, false);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss_before_stack_reserved);
// Return the generated code.
return GetCode(function);
}
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* success) {
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
+ Label success;
+ // Check that the object is a boolean.
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&success, eq, object, Operand(at));
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ __ Branch(miss, ne, object, Operand(at));
+ __ bind(&success);
+}
+
+
+void CallStubCompiler::PatchGlobalProxy(Handle<Object> object) {
+ if (object->IsGlobalObject()) {
+ const int argc = arguments().immediate();
+ const int receiver_offset = argc * kPointerSize;
+ __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a3, MemOperand(sp, receiver_offset));
+ }
+}
+
+
+Register CallStubCompiler::HandlerFrontendHeader(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Label* miss) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
+ GenerateNameCheck(name, miss);
+
+ Register reg = a0;
// Get the receiver from the stack.
const int argc = arguments().immediate();
- __ lw(a1, MemOperand(sp, argc * kPointerSize));
+ const int receiver_offset = argc * kPointerSize;
+ __ lw(a0, MemOperand(sp, receiver_offset));
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ JumpIfSmi(a1, &miss);
+ __ JumpIfSmi(a0, miss);
}
// Make sure that it's okay not to patch the on stack receiver
@@ -2568,130 +2378,81 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
switch (check) {
case RECEIVER_MAP_CHECK:
- __ IncrementCounter(isolate()->counters()->call_const(), 1, a0, a3);
+ __ IncrementCounter(isolate()->counters()->call_const(), 1, a1, a3);
// Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
- name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ lw(a3, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, argc * kPointerSize));
- }
+ reg = CheckPrototypes(
+ IC::CurrentTypeOf(object, isolate()),
+ reg, holder, a1, a3, t0, name, miss);
break;
- case STRING_CHECK:
+ case STRING_CHECK: {
// Check that the object is a string.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+ __ GetObjectType(reg, a3, a3);
+ __ Branch(miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- a0, holder, a3, a1, t0, name, &miss);
+ masm(), Context::STRING_FUNCTION_INDEX, a1, miss);
break;
-
- case SYMBOL_CHECK:
+ }
+ case SYMBOL_CHECK: {
// Check that the object is a symbol.
- __ GetObjectType(a1, a1, a3);
- __ Branch(&miss, ne, a3, Operand(SYMBOL_TYPE));
+ __ GetObjectType(reg, a1, a3);
+ __ Branch(miss, ne, a3, Operand(SYMBOL_TYPE));
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- a0, holder, a3, a1, t0, name, &miss);
+ masm(), Context::SYMBOL_FUNCTION_INDEX, a1, miss);
break;
-
+ }
case NUMBER_CHECK: {
Label fast;
// Check that the object is a smi or a heap number.
- __ JumpIfSmi(a1, &fast);
- __ GetObjectType(a1, a0, a0);
- __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
+ __ JumpIfSmi(reg, &fast);
+ __ GetObjectType(reg, a3, a3);
+ __ Branch(miss, ne, a3, Operand(HEAP_NUMBER_TYPE));
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- a0, holder, a3, a1, t0, name, &miss);
+ masm(), Context::NUMBER_FUNCTION_INDEX, a1, miss);
break;
}
case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- __ Branch(&fast, eq, a1, Operand(t0));
- __ LoadRoot(t0, Heap::kFalseValueRootIndex);
- __ Branch(&miss, ne, a1, Operand(t0));
- __ bind(&fast);
+ GenerateBooleanCheck(reg, miss);
+
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- a0, holder, a3, a1, t0, name, &miss);
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, a1, miss);
break;
}
}
- __ jmp(success);
-
- // Handle call cache miss.
- __ bind(&miss);
-
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Handle<JSFunction> function) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<Cell>::null(),
- function, Handle<String>::cast(name),
- Code::CONSTANT);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
+ if (check != RECEIVER_MAP_CHECK) {
+ Handle<Object> prototype(object->GetPrototype(isolate()), isolate());
+ reg = CheckPrototypes(
+ IC::CurrentTypeOf(prototype, isolate()),
+ a1, holder, a1, a3, t0, name, miss);
}
- Label success;
+ return reg;
+}
- CompileHandlerFrontend(object, holder, name, check, &success);
- __ bind(&success);
- CompileHandlerBackend(function);
- // Return the generated code.
- return GetCode(function);
+void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
+ Register function,
+ Label* miss) {
+ ASSERT(function.is(a1));
+ // Check that the function really is a function.
+ GenerateFunctionCheck(function, a3, miss);
+ PatchGlobalProxy(object);
+ // Invoke the function.
+ __ InvokeFunction(a1, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind());
}
Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
Label miss;
GenerateNameCheck(name, &miss);
@@ -2704,7 +2465,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
// Get the receiver from the stack.
__ lw(a1, MemOperand(sp, argc * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), a2, extra_state_);
+ CallInterceptorCompiler compiler(this, arguments(), a2, extra_state());
compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
&miss);
@@ -2713,14 +2474,12 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
// Restore receiver.
__ lw(a0, MemOperand(sp, argc * kPointerSize));
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
+ GenerateJumpFunction(object, a1, &miss);
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
+ return GetCode(Code::FAST, name);
}
@@ -2730,11 +2489,6 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<PropertyCell> cell,
Handle<JSFunction> function,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
-
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(
object, holder, cell, function, Handle<String>::cast(name),
@@ -2744,41 +2498,14 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
}
Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ // Potentially loads a closure that matches the shared function info of the
+ // function, rather than function.
GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
- __ sw(a3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Set up the context (function already in r1).
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
- GenerateMissBranch();
+ GenerateJumpFunction(object, a1, function);
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(Code::NORMAL, name);
@@ -2790,9 +2517,8 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
// Stub never generated for non-global objects that require access
// checks.
@@ -2810,7 +2536,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ TailCallExternalReference(store_callback_property, 4, 1);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2819,16 +2545,15 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
Register values[] = { value() };
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 1, values);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2899,39 +2624,31 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
- __ li(scratch1(), Operand(Smi::FromInt(strict_mode())));
- __ push(scratch1()); // strict mode
-
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
+ __ TailCallExternalReference(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Handle<JSGlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<Type> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
- __ bind(&success);
// Return undefined if maps of the full prototype chain is still the same.
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
__ Ret();
// Return the generated code.
- return GetCode(kind(), Code::NONEXISTENT, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2982,6 +2699,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- a0 : receiver
@@ -2993,7 +2711,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(a0);
+ __ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
@@ -3016,17 +2734,14 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
+ Handle<Type> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
- Label success, miss;
+ Label miss;
- __ CheckMap(
- receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
__ li(a3, Operand(cell));
@@ -3038,8 +2753,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Branch(&miss, eq, t0, Operand(at));
}
- HandlerFrontendFooter(name, &success, &miss);
- __ bind(&success);
+ HandlerFrontendFooter(name, &miss);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
@@ -3047,12 +2761,12 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ mov(v0, t0);
// Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), Code::NORMAL, name);
}
Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
+ TypeHandleList* types,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -3063,18 +2777,30 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
GenerateNameCheck(name, this->name(), &miss);
}
- __ JumpIfSmi(receiver(), &miss);
- Register map_reg = scratch1();
+ Label number_case;
+ Register match = scratch1();
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
- int receiver_count = receiver_maps->length();
+ Register map_reg = scratch2();
+
+ int receiver_count = types->length();
int number_of_handled_maps = 0;
__ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = receiver_maps->at(current);
+ Handle<Type> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
+ // Check map and tail call if there's a match.
+ // Separate compare from branch, to provide path for above JumpIfSmi().
+ __ Subu(match, map_reg, Operand(map));
+ if (type->Is(Type::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ bind(&number_case);
+ }
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
- eq, map_reg, Operand(receiver_maps->at(current)));
+ eq, match, Operand(zero_reg));
}
}
ASSERT(number_of_handled_maps != 0);
@@ -3131,12 +2857,12 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
- Label slow, miss_force_generic;
+ Label slow, miss;
Register key = a0;
Register receiver = a1;
- __ JumpIfNotSmi(key, &miss_force_generic);
+ __ JumpIfNotSmi(key, &miss);
__ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ sra(a2, a0, kSmiTagSize);
__ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
@@ -3156,14 +2882,14 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
// Miss case, call the runtime.
- __ bind(&miss_force_generic);
+ __ bind(&miss);
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
diff --git a/chromium/v8/src/mirror-debugger.js b/chromium/v8/src/mirror-debugger.js
index 3b360bb5d77..4277136b609 100644
--- a/chromium/v8/src/mirror-debugger.js
+++ b/chromium/v8/src/mirror-debugger.js
@@ -117,7 +117,7 @@ function LookupMirror(handle) {
* @returns {Mirror} the mirror reflects the undefined value
*/
function GetUndefinedMirror() {
- return MakeMirror(void 0);
+ return MakeMirror(UNDEFINED);
}
@@ -482,7 +482,7 @@ ValueMirror.prototype.value = function() {
* @extends ValueMirror
*/
function UndefinedMirror() {
- %_CallFunction(this, UNDEFINED_TYPE, void 0, ValueMirror);
+ %_CallFunction(this, UNDEFINED_TYPE, UNDEFINED, ValueMirror);
}
inherits(UndefinedMirror, ValueMirror);
@@ -957,7 +957,7 @@ FunctionMirror.prototype.scopeCount = function() {
FunctionMirror.prototype.scope = function(index) {
if (this.resolved()) {
- return new ScopeMirror(void 0, this, index);
+ return new ScopeMirror(UNDEFINED, this, index);
}
};
@@ -1670,7 +1670,7 @@ FrameMirror.prototype.scopeCount = function() {
FrameMirror.prototype.scope = function(index) {
- return new ScopeMirror(this, void 0, index);
+ return new ScopeMirror(this, UNDEFINED, index);
};
diff --git a/chromium/v8/src/mksnapshot.cc b/chromium/v8/src/mksnapshot.cc
index 9cf9e2e8a42..457f7b3a9a3 100644
--- a/chromium/v8/src/mksnapshot.cc
+++ b/chromium/v8/src/mksnapshot.cc
@@ -43,49 +43,6 @@
using namespace v8;
-static const unsigned int kMaxCounters = 256;
-
-// A single counter in a counter collection.
-class Counter {
- public:
- static const int kMaxNameSize = 64;
- int32_t* Bind(const char* name) {
- int i;
- for (i = 0; i < kMaxNameSize - 1 && name[i]; i++) {
- name_[i] = name[i];
- }
- name_[i] = '\0';
- return &counter_;
- }
- private:
- int32_t counter_;
- uint8_t name_[kMaxNameSize];
-};
-
-
-// A set of counters and associated information. An instance of this
-// class is stored directly in the memory-mapped counters file if
-// the --save-counters options is used
-class CounterCollection {
- public:
- CounterCollection() {
- magic_number_ = 0xDEADFACE;
- max_counters_ = kMaxCounters;
- max_name_size_ = Counter::kMaxNameSize;
- counters_in_use_ = 0;
- }
- Counter* GetNextCounter() {
- if (counters_in_use_ == kMaxCounters) return NULL;
- return &counters_[counters_in_use_++];
- }
- private:
- uint32_t magic_number_;
- uint32_t max_counters_;
- uint32_t max_name_size_;
- uint32_t counters_in_use_;
- Counter counters_[kMaxCounters];
-};
-
class Compressor {
public:
@@ -310,6 +267,7 @@ void DumpException(Handle<Message> message) {
int main(int argc, char** argv) {
V8::InitializeICU();
+ i::Isolate::SetCrashIfDefaultIsolateInitialized();
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
@@ -330,7 +288,10 @@ int main(int argc, char** argv) {
exit(1);
}
#endif
- Isolate* isolate = Isolate::GetCurrent();
+ i::FLAG_logfile_per_isolate = false;
+
+ Isolate* isolate = v8::Isolate::New();
+ isolate->Enter();
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Serializer::Enable(internal_isolate);
Persistent<Context> context;
@@ -348,7 +309,7 @@ int main(int argc, char** argv) {
// Capture 100 frames if anything happens.
V8::SetCaptureStackTraceForUncaughtExceptions(true, 100);
HandleScope scope(isolate);
- v8::Context::Scope(v8::Local<v8::Context>::New(isolate, context));
+ v8::Context::Scope cscope(v8::Local<v8::Context>::New(isolate, context));
const char* name = i::FLAG_extra_code;
FILE* file = i::OS::FOpen(name, "rb");
if (file == NULL) {
@@ -371,7 +332,7 @@ int main(int argc, char** argv) {
i += read;
}
fclose(file);
- Local<String> source = String::New(chars);
+ Local<String> source = String::NewFromUtf8(isolate, chars);
TryCatch try_catch;
Local<Script> script = Script::Compile(source);
if (try_catch.HasCaught()) {
@@ -397,7 +358,7 @@ int main(int argc, char** argv) {
internal_isolate->heap()->CollectAllGarbage(
i::Heap::kNoGCFlags, "mksnapshot");
i::Object* raw_context = *v8::Utils::OpenPersistent(context);
- context.Dispose();
+ context.Reset();
CppByteSink sink(argv[1]);
// This results in a somewhat smaller snapshot, probably because it gets rid
// of some things that are cached between garbage collections.
diff --git a/chromium/v8/src/v8preparserdll-main.cc b/chromium/v8/src/msan.h
index c0344d344ab..484c9fa3979 100644
--- a/chromium/v8/src/v8preparserdll-main.cc
+++ b/chromium/v8/src/msan.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,15 +25,25 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <windows.h>
+// MemorySanitizer support.
-#include "../include/v8-preparser.h"
+#ifndef V8_MSAN_H_
+#define V8_MSAN_H_
-extern "C" {
-BOOL WINAPI DllMain(HANDLE hinstDLL,
- DWORD dwReason,
- LPVOID lpvReserved) {
- // Do nothing.
- return TRUE;
-}
-}
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+#if __has_feature(memory_sanitizer) && !defined(MEMORY_SANITIZER)
+# define MEMORY_SANITIZER
+#endif
+
+#ifdef MEMORY_SANITIZER
+# include <sanitizer/msan_interface.h>
+// Marks a memory range as fully initialized.
+# define MSAN_MEMORY_IS_INITIALIZED(p, s) __msan_unpoison((p), (s))
+#else
+# define MSAN_MEMORY_IS_INITIALIZED(p, s)
+#endif
+
+#endif // V8_MSAN_H_
diff --git a/chromium/v8/src/object-observe.js b/chromium/v8/src/object-observe.js
index 1035792e8b6..dfa57b83126 100644
--- a/chromium/v8/src/object-observe.js
+++ b/chromium/v8/src/object-observe.js
@@ -72,12 +72,12 @@ function ObservationWeakMap(map) {
ObservationWeakMap.prototype = {
get: function(key) {
key = %UnwrapGlobalProxy(key);
- if (!IS_SPEC_OBJECT(key)) return void 0;
+ if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
return %WeakCollectionGet(this.map_, key);
},
set: function(key, value) {
key = %UnwrapGlobalProxy(key);
- if (!IS_SPEC_OBJECT(key)) return void 0;
+ if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
%WeakCollectionSet(this.map_, key, value);
},
has: function(key) {
@@ -128,11 +128,12 @@ function TypeMapIsDisjointFrom(typeMap1, typeMap2) {
}
var defaultAcceptTypes = TypeMapCreateFromList([
- 'new',
- 'updated',
- 'deleted',
- 'prototype',
- 'reconfigured'
+ 'add',
+ 'update',
+ 'delete',
+ 'setPrototype',
+ 'reconfigure',
+ 'preventExtensions'
]);
// An Observer is a registration to observe an object by a callback with
@@ -284,11 +285,6 @@ function AcceptArgIsValid(arg) {
arg.length < 0)
return false;
- var length = arg.length;
- for (var i = 0; i < length; i++) {
- if (!IS_STRING(arg[i]))
- return false;
- }
return true;
}
@@ -357,9 +353,9 @@ function ObjectUnobserve(object, callback) {
}
function ArrayObserve(object, callback) {
- return ObjectObserve(object, callback, ['new',
- 'updated',
- 'deleted',
+ return ObjectObserve(object, callback, ['add',
+ 'update',
+ 'delete',
'splice']);
}
@@ -388,11 +384,31 @@ function ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
observationState.pendingObservers = { __proto__: null };
observationState.pendingObservers[callbackInfo.priority] = callback;
callbackInfo.push(changeRecord);
- %SetObserverDeliveryPending();
+ %SetMicrotaskPending(true);
}
-function ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord,
- skipAccessCheck) {
+function ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, type) {
+ if (!ObjectInfoHasActiveObservers(objectInfo))
+ return;
+
+ var hasType = !IS_UNDEFINED(type);
+ var newRecord = hasType ?
+ { object: ObjectInfoGetObject(objectInfo), type: type } :
+ { object: ObjectInfoGetObject(objectInfo) };
+
+ for (var prop in changeRecord) {
+ if (prop === 'object' || (hasType && prop === 'type')) continue;
+ %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
+ READ_ONLY + DONT_DELETE);
+ }
+ ObjectFreeze(newRecord);
+
+ ObjectInfoEnqueueInternalChangeRecord(objectInfo, newRecord,
+ true /* skip access check */);
+}
+
+function ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord,
+ skipAccessCheck) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(changeRecord.name)) return;
@@ -440,7 +456,7 @@ function EnqueueSpliceRecord(array, index, removed, addedCount) {
ObjectFreeze(changeRecord);
ObjectFreeze(changeRecord.removed);
- ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord);
+ ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
}
function NotifyChange(type, object, name, oldValue) {
@@ -448,11 +464,22 @@ function NotifyChange(type, object, name, oldValue) {
if (!ObjectInfoHasActiveObservers(objectInfo))
return;
- var changeRecord = (arguments.length < 4) ?
- { type: type, object: object, name: name } :
- { type: type, object: object, name: name, oldValue: oldValue };
+ var changeRecord;
+ if (arguments.length == 2) {
+ changeRecord = { type: type, object: object };
+ } else if (arguments.length == 3) {
+ changeRecord = { type: type, object: object, name: name };
+ } else {
+ changeRecord = {
+ type: type,
+ object: object,
+ name: name,
+ oldValue: oldValue
+ };
+ }
+
ObjectFreeze(changeRecord);
- ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord);
+ ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
}
var notifierPrototype = {};
@@ -467,19 +494,7 @@ function ObjectNotifierNotify(changeRecord) {
if (!IS_STRING(changeRecord.type))
throw MakeTypeError("observe_type_non_string");
- if (!ObjectInfoHasActiveObservers(objectInfo))
- return;
-
- var newRecord = { object: ObjectInfoGetObject(objectInfo) };
- for (var prop in changeRecord) {
- if (prop === 'object') continue;
- %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
- READ_ONLY + DONT_DELETE);
- }
- ObjectFreeze(newRecord);
-
- ObjectInfoEnqueueChangeRecord(objectInfo, newRecord,
- true /* skip access check */);
+ ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord);
}
function ObjectNotifierPerformChange(changeType, changeFn) {
@@ -496,11 +511,16 @@ function ObjectNotifierPerformChange(changeType, changeFn) {
throw MakeTypeError("observe_perform_non_function");
ObjectInfoAddPerformingType(objectInfo, changeType);
+
+ var changeRecord;
try {
- %_CallFunction(void 0, changeFn);
+ changeRecord = %_CallFunction(UNDEFINED, changeFn);
} finally {
ObjectInfoRemovePerformingType(objectInfo, changeType);
}
+
+ if (IS_SPEC_OBJECT(changeRecord))
+ ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, changeType);
}
function ObjectGetNotifier(object) {
@@ -530,8 +550,8 @@ function CallbackDeliverPending(callback) {
%MoveArrayContents(callbackInfo, delivered);
try {
- %_CallFunction(void 0, delivered, callback);
- } catch (ex) {}
+ %_CallFunction(UNDEFINED, delivered, callback);
+ } catch (ex) {} // TODO(rossberg): perhaps log uncaught exceptions.
return true;
}
@@ -542,15 +562,16 @@ function ObjectDeliverChangeRecords(callback) {
while (CallbackDeliverPending(callback)) {}
}
-function DeliverChangeRecords() {
- while (observationState.pendingObservers) {
- var pendingObservers = observationState.pendingObservers;
+function ObserveMicrotaskRunner() {
+ var pendingObservers = observationState.pendingObservers;
+ if (pendingObservers) {
observationState.pendingObservers = null;
for (var i in pendingObservers) {
CallbackDeliverPending(pendingObservers[i]);
}
}
}
+RunMicrotasks.runners.push(ObserveMicrotaskRunner);
function SetupObjectObserve() {
%CheckIsBootstrapping();
diff --git a/chromium/v8/src/objects-debug.cc b/chromium/v8/src/objects-debug.cc
index 5d9e161a7e5..ed93e1dc9e1 100644
--- a/chromium/v8/src/objects-debug.cc
+++ b/chromium/v8/src/objects-debug.cc
@@ -95,6 +95,9 @@ void HeapObject::HeapObjectVerify() {
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayVerify();
break;
+ case CONSTANT_POOL_ARRAY_TYPE:
+ ConstantPoolArray::cast(this)->ConstantPoolArrayVerify();
+ break;
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayVerify();
break;
@@ -240,6 +243,7 @@ void Symbol::SymbolVerify() {
CHECK(HasHashCode());
CHECK_GT(Hash(), 0);
CHECK(name()->IsUndefined() || name()->IsString());
+ CHECK(flags()->IsSmi());
}
@@ -303,6 +307,13 @@ void ExternalDoubleArray::ExternalDoubleArrayVerify() {
}
+bool JSObject::ElementsAreSafeToExamine() {
+ return (FLAG_use_gvn && FLAG_use_allocation_folding) ||
+ reinterpret_cast<Map*>(elements()) !=
+ GetHeap()->one_pointer_filler_map();
+}
+
+
void JSObject::JSObjectVerify() {
VerifyHeapPointer(properties());
VerifyHeapPointer(elements());
@@ -330,10 +341,9 @@ void JSObject::JSObjectVerify() {
}
}
- // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
- // allocation folding is turned off.
- if (reinterpret_cast<Map*>(elements()) !=
- GetHeap()->one_pointer_filler_map()) {
+ // If a GC was caused while constructing this object, the elements
+ // pointer may point to a one pointer filler map.
+ if (ElementsAreSafeToExamine()) {
CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
(elements() == GetHeap()->empty_fixed_array())),
(elements()->map() == GetHeap()->fixed_array_map() ||
@@ -357,9 +367,6 @@ void Map::MapVerify() {
SLOW_ASSERT(transitions()->IsSortedNoDuplicates());
SLOW_ASSERT(transitions()->IsConsistentWithBackPointers(this));
}
- ASSERT(!is_observed() || instance_type() < FIRST_JS_OBJECT_TYPE ||
- instance_type() > LAST_JS_OBJECT_TYPE ||
- has_slow_elements_kind() || has_external_array_elements());
}
@@ -438,6 +445,11 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
}
+void ConstantPoolArray::ConstantPoolArrayVerify() {
+ CHECK(IsConstantPoolArray());
+}
+
+
void JSGeneratorObject::JSGeneratorObjectVerify() {
// In an expression like "new g()", there can be a point where a generator
// object is allocated but its fields are all undefined, as it hasn't yet been
@@ -664,16 +676,20 @@ void Code::CodeVerify() {
}
-void Code::VerifyEmbeddedMapsDependency() {
+void Code::VerifyEmbeddedObjectsDependency() {
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Map* map = Map::cast(it.rinfo()->target_object());
- if (map->CanTransition()) {
+ Object* obj = it.rinfo()->target_object();
+ if (IsWeakEmbeddedObject(kind(), obj)) {
+ if (obj->IsMap()) {
+ Map* map = Map::cast(obj);
CHECK(map->dependent_code()->Contains(
DependentCode::kWeaklyEmbeddedGroup, this));
+ } else if (obj->IsJSObject()) {
+ Object* raw_table = GetIsolate()->heap()->weak_object_to_code_table();
+ WeakHashTable* table = WeakHashTable::cast(raw_table);
+ CHECK(DependentCode::cast(table->Lookup(obj))->Contains(
+ DependentCode::kWeaklyEmbeddedGroup, this));
}
}
}
@@ -683,10 +699,9 @@ void Code::VerifyEmbeddedMapsDependency() {
void JSArray::JSArrayVerify() {
JSObjectVerify();
CHECK(length()->IsNumber() || length()->IsUndefined());
- // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
- // allocation folding is turned off.
- if (reinterpret_cast<Map*>(elements()) !=
- GetHeap()->one_pointer_filler_map()) {
+ // If a GC was caused while constructing this array, the elements
+ // pointer may point to a one pointer filler map.
+ if (ElementsAreSafeToExamine()) {
CHECK(elements()->IsUndefined() ||
elements()->IsFixedArray() ||
elements()->IsFixedDoubleArray());
diff --git a/chromium/v8/src/objects-inl.h b/chromium/v8/src/objects-inl.h
index 89abe504335..2db3c04f1f2 100644
--- a/chromium/v8/src/objects-inl.h
+++ b/chromium/v8/src/objects-inl.h
@@ -80,7 +80,7 @@ PropertyDetails PropertyDetails::AsDeleted() {
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
- ASSERT(object->Is##type()); \
+ SLOW_ASSERT(object->Is##type()); \
return reinterpret_cast<type*>(object); \
}
@@ -133,7 +133,7 @@ PropertyDetails PropertyDetails::AsDeleted() {
bool Object::IsFixedArrayBase() {
- return IsFixedArray() || IsFixedDoubleArray();
+ return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray();
}
@@ -150,25 +150,6 @@ bool Object::IsAccessorInfo() {
}
-bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
- // There is a constraint on the object; check.
- if (!this->IsJSObject()) return false;
- // Fetch the constructor function of the object.
- Object* cons_obj = JSObject::cast(this)->map()->constructor();
- if (!cons_obj->IsJSFunction()) return false;
- JSFunction* fun = JSFunction::cast(cons_obj);
- // Iterate through the chain of inheriting function templates to
- // see if the required one occurs.
- for (Object* type = fun->shared()->function_data();
- type->IsFunctionTemplateInfo();
- type = FunctionTemplateInfo::cast(type)->parent_template()) {
- if (type == expected) return true;
- }
- // Didn't find the required type in the inheritance chain.
- return false;
-}
-
-
bool Object::IsSmi() {
return HAS_SMI_TAG(this);
}
@@ -285,14 +266,13 @@ bool Object::HasValidElements() {
MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
- Representation representation,
- PretenureFlag tenure) {
+ Representation representation) {
if (!FLAG_track_double_fields) return this;
if (!representation.IsDouble()) return this;
if (IsUninitialized()) {
- return heap->AllocateHeapNumber(0, tenure);
+ return heap->AllocateHeapNumber(0);
}
- return heap->AllocateHeapNumber(Number(), tenure);
+ return heap->AllocateHeapNumber(Number());
}
@@ -572,6 +552,7 @@ TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
TYPE_CHECKER(Map, MAP_TYPE)
TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
+TYPE_CHECKER(ConstantPoolArray, CONSTANT_POOL_ARRAY_TYPE)
bool Object::IsJSWeakCollection() {
@@ -1028,6 +1009,12 @@ MaybeObject* Object::GetProperty(Name* key, PropertyAttributes* attributes) {
#define WRITE_UINT32_FIELD(p, offset, value) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
+#define READ_INT32_FIELD(p, offset) \
+ (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INT32_FIELD(p, offset, value) \
+ (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
+
#define READ_INT64_FIELD(p, offset) \
(*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)))
@@ -1184,7 +1171,7 @@ void HeapObject::VerifySmiField(int offset) {
Heap* HeapObject::GetHeap() {
Heap* heap =
MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
- ASSERT(heap != NULL);
+ SLOW_ASSERT(heap != NULL);
return heap;
}
@@ -1301,7 +1288,7 @@ FixedArrayBase* JSObject::elements() {
void JSObject::ValidateElements() {
-#if DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
ElementsAccessor* accessor = GetElementsAccessor();
accessor->Validate(this);
@@ -1323,6 +1310,28 @@ bool JSObject::ShouldTrackAllocationInfo() {
}
+void AllocationSite::Initialize() {
+ set_transition_info(Smi::FromInt(0));
+ SetElementsKind(GetInitialFastElementsKind());
+ set_nested_site(Smi::FromInt(0));
+ set_memento_create_count(Smi::FromInt(0));
+ set_memento_found_count(Smi::FromInt(0));
+ set_pretenure_decision(Smi::FromInt(0));
+ set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+}
+
+
+void AllocationSite::MarkZombie() {
+ ASSERT(!IsZombie());
+ set_pretenure_decision(Smi::FromInt(kZombie));
+ // Clear all non-smi fields
+ set_transition_info(Smi::FromInt(0));
+ set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+}
+
+
// Heuristic: We only need to create allocation site info if the boilerplate
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSite::GetMode(
@@ -1349,10 +1358,67 @@ AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
inline bool AllocationSite::CanTrack(InstanceType type) {
+ if (FLAG_allocation_site_pretenuring) {
+ return type == JS_ARRAY_TYPE || type == JS_OBJECT_TYPE;
+ }
return type == JS_ARRAY_TYPE;
}
+inline DependentCode::DependencyGroup AllocationSite::ToDependencyGroup(
+ Reason reason) {
+ switch (reason) {
+ case TENURING:
+ return DependentCode::kAllocationSiteTenuringChangedGroup;
+ break;
+ case TRANSITIONS:
+ return DependentCode::kAllocationSiteTransitionChangedGroup;
+ break;
+ }
+ UNREACHABLE();
+ return DependentCode::kAllocationSiteTransitionChangedGroup;
+}
+
+
+inline void AllocationSite::IncrementMementoFoundCount() {
+ int value = memento_found_count()->value();
+ set_memento_found_count(Smi::FromInt(value + 1));
+}
+
+
+inline void AllocationSite::IncrementMementoCreateCount() {
+ ASSERT(FLAG_allocation_site_pretenuring);
+ int value = memento_create_count()->value();
+ set_memento_create_count(Smi::FromInt(value + 1));
+}
+
+
+inline bool AllocationSite::DigestPretenuringFeedback() {
+ bool decision_made = false;
+ if (!PretenuringDecisionMade()) {
+ int create_count = memento_create_count()->value();
+ if (create_count >= kPretenureMinimumCreated) {
+ int found_count = memento_found_count()->value();
+ double ratio = static_cast<double>(found_count) / create_count;
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("AllocationSite: %p (created, found, ratio) (%d, %d, %f)\n",
+ static_cast<void*>(this), create_count, found_count, ratio);
+ }
+ int result = ratio >= kPretenureRatio ? kTenure : kDontTenure;
+ set_pretenure_decision(Smi::FromInt(result));
+ decision_made = true;
+ // TODO(mvstanton): if the decision represents a change, any dependent
+ // code registered for pretenuring changes should be deopted.
+ }
+ }
+
+ // Clear feedback calculation fields until the next gc.
+ set_memento_found_count(Smi::FromInt(0));
+ set_memento_create_count(Smi::FromInt(0));
+ return decision_made;
+}
+
+
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
object->ValidateElements();
ElementsKind elements_kind = object->map()->elements_kind();
@@ -1535,65 +1601,6 @@ MaybeObject* JSObject::ResetElements() {
}
-MaybeObject* JSObject::AllocateStorageForMap(Map* map) {
- ASSERT(this->map()->inobject_properties() == map->inobject_properties());
- ElementsKind obj_kind = this->map()->elements_kind();
- ElementsKind map_kind = map->elements_kind();
- if (map_kind != obj_kind) {
- ElementsKind to_kind = map_kind;
- if (IsMoreGeneralElementsKindTransition(map_kind, obj_kind) ||
- IsDictionaryElementsKind(obj_kind)) {
- to_kind = obj_kind;
- }
- MaybeObject* maybe_obj =
- IsDictionaryElementsKind(to_kind) ? NormalizeElements()
- : TransitionElementsKind(to_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
- MaybeObject* maybe_map = map->AsElementsKind(to_kind);
- if (!maybe_map->To(&map)) return maybe_map;
- }
- int total_size =
- map->NumberOfOwnDescriptors() + map->unused_property_fields();
- int out_of_object = total_size - map->inobject_properties();
- if (out_of_object != properties()->length()) {
- FixedArray* new_properties;
- MaybeObject* maybe_properties = properties()->CopySize(out_of_object);
- if (!maybe_properties->To(&new_properties)) return maybe_properties;
- set_properties(new_properties);
- }
- set_map(map);
- return this;
-}
-
-
-MaybeObject* JSObject::MigrateInstance() {
- // Converting any field to the most specific type will cause the
- // GeneralizeFieldRepresentation algorithm to create the most general existing
- // transition that matches the object. This achieves what is needed.
- Map* original_map = map();
- MaybeObject* maybe_result = GeneralizeFieldRepresentation(
- 0, Representation::None(), ALLOW_AS_CONSTANT);
- JSObject* result;
- if (FLAG_trace_migration && maybe_result->To(&result)) {
- PrintInstanceMigration(stdout, original_map, result->map());
- }
- return maybe_result;
-}
-
-
-MaybeObject* JSObject::TryMigrateInstance() {
- Map* new_map = map()->CurrentMapForDeprecated();
- if (new_map == NULL) return Smi::FromInt(0);
- Map* original_map = map();
- MaybeObject* maybe_result = MigrateToMap(new_map);
- JSObject* result;
- if (FLAG_trace_migration && maybe_result->To(&result)) {
- PrintInstanceMigration(stdout, original_map, result->map());
- }
- return maybe_result;
-}
-
-
Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
DisallowHeapAllocation no_gc;
if (!map->HasTransitionArray()) return Handle<String>::null();
@@ -1629,13 +1636,6 @@ Handle<Map> JSObject::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
}
-int JSObject::LastAddedFieldIndex() {
- Map* map = this->map();
- int last_added = map->LastAdded();
- return map->instance_descriptors()->GetFieldIndex(last_added);
-}
-
-
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
@@ -1719,7 +1719,9 @@ int JSObject::GetHeaderSize() {
case JS_MESSAGE_OBJECT_TYPE:
return JSMessageObject::kSize;
default:
- UNREACHABLE();
+ // TODO(jkummerow): Re-enable this. Blink currently hits this
+ // from its CustomElementConstructorBuilder.
+ // UNREACHABLE();
return 0;
}
}
@@ -1946,13 +1948,14 @@ void Object::VerifyApiCallResultType() {
FixedArrayBase* FixedArrayBase::cast(Object* object) {
- ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray());
+ ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray() ||
+ object->IsConstantPoolArray());
return reinterpret_cast<FixedArrayBase*>(object);
}
Object* FixedArray::get(int index) {
- ASSERT(index >= 0 && index < this->length());
+ SLOW_ASSERT(index >= 0 && index < this->length());
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
@@ -2045,6 +2048,98 @@ bool FixedDoubleArray::is_the_hole(int index) {
}
+SMI_ACCESSORS(ConstantPoolArray, first_ptr_index, kFirstPointerIndexOffset)
+SMI_ACCESSORS(ConstantPoolArray, first_int32_index, kFirstInt32IndexOffset)
+
+
+int ConstantPoolArray::first_int64_index() {
+ return 0;
+}
+
+
+int ConstantPoolArray::count_of_int64_entries() {
+ return first_ptr_index();
+}
+
+
+int ConstantPoolArray::count_of_ptr_entries() {
+ return first_int32_index() - first_ptr_index();
+}
+
+
+int ConstantPoolArray::count_of_int32_entries() {
+ return length() - first_int32_index();
+}
+
+
+void ConstantPoolArray::SetEntryCounts(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ set_first_ptr_index(number_of_int64_entries);
+ set_first_int32_index(number_of_int64_entries + number_of_ptr_entries);
+ set_length(number_of_int64_entries + number_of_ptr_entries +
+ number_of_int32_entries);
+}
+
+
+int64_t ConstantPoolArray::get_int64_entry(int index) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= 0 && index < first_ptr_index());
+ return READ_INT64_FIELD(this, OffsetOfElementAt(index));
+}
+
+double ConstantPoolArray::get_int64_entry_as_double(int index) {
+ STATIC_ASSERT(kDoubleSize == kInt64Size);
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= 0 && index < first_ptr_index());
+ return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index));
+}
+
+
+Object* ConstantPoolArray::get_ptr_entry(int index) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_ptr_index() && index < first_int32_index());
+ return READ_FIELD(this, OffsetOfElementAt(index));
+}
+
+
+int32_t ConstantPoolArray::get_int32_entry(int index) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_int32_index() && index < length());
+ return READ_INT32_FIELD(this, OffsetOfElementAt(index));
+}
+
+
+void ConstantPoolArray::set(int index, Object* value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_ptr_index() && index < first_int32_index());
+ WRITE_FIELD(this, OffsetOfElementAt(index), value);
+ WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
+}
+
+
+void ConstantPoolArray::set(int index, int64_t value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_int64_index() && index < first_ptr_index());
+ WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value);
+}
+
+
+void ConstantPoolArray::set(int index, double value) {
+ STATIC_ASSERT(kDoubleSize == kInt64Size);
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= first_int64_index() && index < first_ptr_index());
+ WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value);
+}
+
+
+void ConstantPoolArray::set(int index, int32_t value) {
+ ASSERT(map() == GetHeap()->constant_pool_array_map());
+ ASSERT(index >= this->first_int32_index() && index < length());
+ WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value);
+}
+
+
WriteBarrierMode HeapObject::GetWriteBarrierMode(
const DisallowHeapAllocation& promise) {
Heap* heap = GetHeap();
@@ -2250,6 +2345,11 @@ int DescriptorArray::SearchWithCache(Name* name, Map* map) {
}
+PropertyDetails Map::GetLastDescriptorDetails() {
+ return instance_descriptors()->GetDetails(LastAdded());
+}
+
+
void Map::LookupDescriptor(JSObject* holder,
Name* name,
LookupResult* result) {
@@ -2267,7 +2367,8 @@ void Map::LookupTransition(JSObject* holder,
TransitionArray* transition_array = transitions();
int number = transition_array->Search(name);
if (number != TransitionArray::kNotFound) {
- return result->TransitionResult(holder, number);
+ return result->TransitionResult(
+ holder, transition_array->GetTarget(number));
}
}
result->NotFound();
@@ -2537,6 +2638,7 @@ void SeededNumberDictionary::set_requires_slow_elements() {
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedDoubleArray)
+CAST_ACCESSOR(ConstantPoolArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
@@ -2648,6 +2750,8 @@ bool Name::Equals(Name* other) {
ACCESSORS(Symbol, name, Object, kNameOffset)
+ACCESSORS(Symbol, flags, Smi, kFlagsOffset)
+BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
bool String::Equals(String* other) {
@@ -3432,6 +3536,12 @@ int HeapObject::SizeFromMap(Map* map) {
return FixedDoubleArray::SizeFor(
reinterpret_cast<FixedDoubleArray*>(this)->length());
}
+ if (instance_type == CONSTANT_POOL_ARRAY_TYPE) {
+ return ConstantPoolArray::SizeFor(
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_int64_entries(),
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_ptr_entries(),
+ reinterpret_cast<ConstantPoolArray*>(this)->count_of_int32_entries());
+ }
ASSERT(instance_type == CODE_TYPE);
return reinterpret_cast<Code*>(this)->CodeSize();
}
@@ -3599,16 +3709,13 @@ bool Map::owns_descriptors() {
}
-void Map::set_is_observed(bool is_observed) {
- ASSERT(instance_type() < FIRST_JS_OBJECT_TYPE ||
- instance_type() > LAST_JS_OBJECT_TYPE ||
- has_slow_elements_kind() || has_external_array_elements());
- set_bit_field3(IsObserved::update(bit_field3(), is_observed));
+void Map::set_has_instance_call_handler() {
+ set_bit_field3(HasInstanceCallHandler::update(bit_field3(), true));
}
-bool Map::is_observed() {
- return IsObserved::decode(bit_field3());
+bool Map::has_instance_call_handler() {
+ return HasInstanceCallHandler::decode(bit_field3());
}
@@ -3788,14 +3895,14 @@ InlineCacheState Code::ic_state() {
}
-Code::ExtraICState Code::extra_ic_state() {
+ExtraICState Code::extra_ic_state() {
ASSERT((is_inline_cache_stub() && !needs_extended_extra_ic_state(kind()))
|| ic_state() == DEBUG_STUB);
return ExtractExtraICStateFromFlags(flags());
}
-Code::ExtraICState Code::extended_extra_ic_state() {
+ExtraICState Code::extended_extra_ic_state() {
ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
ASSERT(needs_extended_extra_ic_state(kind()));
return ExtractExtendedExtraICStateFromFlags(flags());
@@ -3808,11 +3915,23 @@ Code::StubType Code::type() {
int Code::arguments_count() {
- ASSERT(is_call_stub() || is_keyed_call_stub() || kind() == STUB);
+ ASSERT(is_call_stub() || is_keyed_call_stub() ||
+ kind() == STUB || is_handler());
return ExtractArgumentsCountFromFlags(flags());
}
+// For initialization.
+void Code::set_raw_kind_specific_flags1(int value) {
+ WRITE_INT_FIELD(this, kKindSpecificFlags1Offset, value);
+}
+
+
+void Code::set_raw_kind_specific_flags2(int value) {
+ WRITE_INT_FIELD(this, kKindSpecificFlags2Offset, value);
+}
+
+
inline bool Code::is_crankshafted() {
return IsCrankshaftedField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
@@ -3827,29 +3946,14 @@ inline void Code::set_is_crankshafted(bool value) {
int Code::major_key() {
- ASSERT(kind() == STUB ||
- kind() == BINARY_OP_IC ||
- kind() == COMPARE_IC ||
- kind() == COMPARE_NIL_IC ||
- kind() == STORE_IC ||
- kind() == LOAD_IC ||
- kind() == KEYED_LOAD_IC ||
- kind() == TO_BOOLEAN_IC);
+ ASSERT(has_major_key());
return StubMajorKeyField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
void Code::set_major_key(int major) {
- ASSERT(kind() == STUB ||
- kind() == BINARY_OP_IC ||
- kind() == COMPARE_IC ||
- kind() == COMPARE_NIL_IC ||
- kind() == LOAD_IC ||
- kind() == KEYED_LOAD_IC ||
- kind() == STORE_IC ||
- kind() == KEYED_STORE_IC ||
- kind() == TO_BOOLEAN_IC);
+ ASSERT(has_major_key());
ASSERT(0 <= major && major < 256);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = StubMajorKeyField::update(previous, major);
@@ -3857,16 +3961,18 @@ void Code::set_major_key(int major) {
}
-bool Code::is_pregenerated() {
- return (kind() == STUB && IsPregeneratedField::decode(flags()));
-}
-
-
-void Code::set_is_pregenerated(bool value) {
- ASSERT(kind() == STUB);
- Flags f = flags();
- f = static_cast<Flags>(IsPregeneratedField::update(f, value));
- set_flags(f);
+bool Code::has_major_key() {
+ return kind() == STUB ||
+ kind() == HANDLER ||
+ kind() == BINARY_OP_IC ||
+ kind() == COMPARE_IC ||
+ kind() == COMPARE_NIL_IC ||
+ kind() == LOAD_IC ||
+ kind() == KEYED_LOAD_IC ||
+ kind() == STORE_IC ||
+ kind() == KEYED_STORE_IC ||
+ kind() == KEYED_CALL_IC ||
+ kind() == TO_BOOLEAN_IC;
}
@@ -4077,6 +4183,11 @@ bool Code::is_inline_cache_stub() {
}
+bool Code::is_keyed_stub() {
+ return is_keyed_load_stub() || is_keyed_store_stub() || is_keyed_call_stub();
+}
+
+
bool Code::is_debug_stub() {
return ic_state() == DEBUG_STUB;
}
@@ -4109,9 +4220,9 @@ Code::Flags Code::ComputeFlags(Kind kind,
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
ExtraICState extra_ic_state,
+ InlineCacheHolderFlag holder,
StubType type,
- int argc,
- InlineCacheHolderFlag holder) {
+ int argc) {
return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, argc, holder);
}
@@ -4126,12 +4237,12 @@ InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
}
-Code::ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
+ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
return ExtraICStateField::decode(flags);
}
-Code::ExtraICState Code::ExtractExtendedExtraICStateFromFlags(
+ExtraICState Code::ExtractExtendedExtraICStateFromFlags(
Flags flags) {
return ExtendedExtraICStateField::decode(flags);
}
@@ -4364,6 +4475,17 @@ void Map::set_transitions(TransitionArray* transition_array,
// When there is another reference to the array somewhere (e.g. a handle),
// not zapping turns from a waste of memory into a source of crashes.
if (HasTransitionArray()) {
+#ifdef DEBUG
+ for (int i = 0; i < transitions()->number_of_transitions(); i++) {
+ Map* target = transitions()->GetTarget(i);
+ if (target->instance_descriptors() == instance_descriptors()) {
+ Name* key = transitions()->GetKey(i);
+ int new_target_index = transition_array->Search(key);
+ ASSERT(new_target_index != TransitionArray::kNotFound);
+ ASSERT(transition_array->GetTarget(new_target_index) == target);
+ }
+ }
+#endif
ASSERT(transitions() != transition_array);
ZapTransitions();
}
@@ -4495,6 +4617,13 @@ ACCESSORS(SignatureInfo, args, Object, kArgsOffset)
ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
+ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
+ACCESSORS_TO_SMI(AllocationSite, memento_found_count, kMementoFoundCountOffset)
+ACCESSORS_TO_SMI(AllocationSite, memento_create_count,
+ kMementoCreateCountOffset)
+ACCESSORS_TO_SMI(AllocationSite, pretenure_decision, kPretenureDecisionOffset)
+ACCESSORS(AllocationSite, dependent_code, DependentCode,
+ kDependentCodeOffset)
ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset)
ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset)
@@ -4746,6 +4875,8 @@ bool SharedFunctionInfo::is_classic_mode() {
BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
kExtendedModeFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, inline_builtin,
+ kInlineBuiltin)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
name_should_print_as_anonymous,
kNameShouldPrintAsAnonymous)
@@ -4806,6 +4937,7 @@ Code* SharedFunctionInfo::code() {
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
+ ASSERT(value->kind() != Code::OPTIMIZED_FUNCTION);
WRITE_FIELD(this, kCodeOffset, value);
CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
}
@@ -5010,6 +5142,11 @@ void JSFunction::ReplaceCode(Code* code) {
bool was_optimized = IsOptimized();
bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
+ if (was_optimized && is_optimized) {
+ shared()->EvictFromOptimizedCodeMap(
+ this->code(), "Replacing with another optimized code");
+ }
+
set_code(code);
// Add/remove the function from the list of optimized functions for this
@@ -5259,23 +5396,29 @@ INT_ACCESSORS(Code, prologue_offset, kPrologueOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
+ACCESSORS(Code, raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
-// Type feedback slot: type_feedback_info for FUNCTIONs, stub_info for STUBs.
-void Code::InitializeTypeFeedbackInfoNoWriteBarrier(Object* value) {
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+void Code::WipeOutHeader() {
+ WRITE_FIELD(this, kRelocationInfoOffset, NULL);
+ WRITE_FIELD(this, kHandlerTableOffset, NULL);
+ WRITE_FIELD(this, kDeoptimizationDataOffset, NULL);
+ // Do not wipe out e.g. a minor key.
+ if (!READ_FIELD(this, kTypeFeedbackInfoOffset)->IsSmi()) {
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL);
+ }
}
Object* Code::type_feedback_info() {
ASSERT(kind() == FUNCTION);
- return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+ return raw_type_feedback_info();
}
void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
ASSERT(kind() == FUNCTION);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+ set_raw_type_feedback_info(value, mode);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
value, mode);
}
@@ -5283,13 +5426,13 @@ void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
Object* Code::next_code_link() {
CHECK(kind() == OPTIMIZED_FUNCTION);
- return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+ return raw_type_feedback_info();
}
void Code::set_next_code_link(Object* value, WriteBarrierMode mode) {
CHECK(kind() == OPTIMIZED_FUNCTION);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+ set_raw_type_feedback_info(value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
value, mode);
}
@@ -5298,8 +5441,7 @@ void Code::set_next_code_link(Object* value, WriteBarrierMode mode) {
int Code::stub_info() {
ASSERT(kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
kind() == BINARY_OP_IC || kind() == LOAD_IC);
- Object* value = READ_FIELD(this, kTypeFeedbackInfoOffset);
- return Smi::cast(value)->value();
+ return Smi::cast(raw_type_feedback_info())->value();
}
@@ -5312,7 +5454,7 @@ void Code::set_stub_info(int value) {
kind() == KEYED_LOAD_IC ||
kind() == STORE_IC ||
kind() == KEYED_STORE_IC);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, Smi::FromInt(value));
+ set_raw_type_feedback_info(Smi::FromInt(value));
}
@@ -5389,6 +5531,16 @@ void JSArrayBuffer::set_is_external(bool value) {
}
+bool JSArrayBuffer::should_be_freed() {
+ return BooleanBit::get(flag(), kShouldBeFreed);
+}
+
+
+void JSArrayBuffer::set_should_be_freed(bool value) {
+ set_flag(BooleanBit::set(flag(), kShouldBeFreed, value));
+}
+
+
ACCESSORS(JSArrayBuffer, weak_next, Object, kWeakNextOffset)
ACCESSORS(JSArrayBuffer, weak_first_view, Object, kWeakFirstViewOffset)
@@ -5457,19 +5609,24 @@ ElementsKind JSObject::GetElementsKind() {
#if DEBUG
FixedArrayBase* fixed_array =
reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
- Map* map = fixed_array->map();
- ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
- (map == GetHeap()->fixed_array_map() ||
- map == GetHeap()->fixed_cow_array_map())) ||
- (IsFastDoubleElementsKind(kind) &&
- (fixed_array->IsFixedDoubleArray() ||
- fixed_array == GetHeap()->empty_fixed_array())) ||
- (kind == DICTIONARY_ELEMENTS &&
+
+ // If a GC was caused while constructing this object, the elements
+ // pointer may point to a one pointer filler map.
+ if (ElementsAreSafeToExamine()) {
+ Map* map = fixed_array->map();
+ ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
+ (map == GetHeap()->fixed_array_map() ||
+ map == GetHeap()->fixed_cow_array_map())) ||
+ (IsFastDoubleElementsKind(kind) &&
+ (fixed_array->IsFixedDoubleArray() ||
+ fixed_array == GetHeap()->empty_fixed_array())) ||
+ (kind == DICTIONARY_ELEMENTS &&
fixed_array->IsFixedArray() &&
- fixed_array->IsDictionary()) ||
- (kind > DICTIONARY_ELEMENTS));
- ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
- (elements()->IsFixedArray() && elements()->length() >= 2));
+ fixed_array->IsDictionary()) ||
+ (kind > DICTIONARY_ELEMENTS));
+ ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
+ (elements()->IsFixedArray() && elements()->length() >= 2));
+ }
#endif
return kind;
}
@@ -5729,19 +5886,23 @@ Object* JSReceiver::GetConstructor() {
}
-bool JSReceiver::HasProperty(Name* name) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasPropertyWithHandler(name);
+bool JSReceiver::HasProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return GetPropertyAttribute(name) != ABSENT;
+ return object->GetPropertyAttribute(*name) != ABSENT;
}
-bool JSReceiver::HasLocalProperty(Name* name) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasPropertyWithHandler(name);
+bool JSReceiver::HasLocalProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return GetLocalPropertyAttribute(name) != ABSENT;
+ return object->GetLocalPropertyAttribute(*name) != ABSENT;
}
@@ -5763,41 +5924,47 @@ PropertyAttributes JSReceiver::GetElementAttribute(uint32_t index) {
}
-// TODO(504): this may be useful in other places too where JSGlobalProxy
-// is used.
-Object* JSObject::BypassGlobalProxy() {
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return GetHeap()->undefined_value();
- ASSERT(proto->IsJSGlobalObject());
- return proto;
- }
- return this;
+bool JSGlobalObject::IsDetached() {
+ return JSGlobalProxy::cast(global_receiver())->IsDetachedFrom(this);
}
-MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) {
+bool JSGlobalProxy::IsDetachedFrom(GlobalObject* global) {
+ return GetPrototype() != global;
+}
+
+
+Handle<Object> JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver> object) {
+ return object->IsJSProxy()
+ ? JSProxy::GetOrCreateIdentityHash(Handle<JSProxy>::cast(object))
+ : JSObject::GetOrCreateIdentityHash(Handle<JSObject>::cast(object));
+}
+
+
+Object* JSReceiver::GetIdentityHash() {
return IsJSProxy()
- ? JSProxy::cast(this)->GetIdentityHash(flag)
- : JSObject::cast(this)->GetIdentityHash(flag);
+ ? JSProxy::cast(this)->GetIdentityHash()
+ : JSObject::cast(this)->GetIdentityHash();
}
-bool JSReceiver::HasElement(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasElementWithHandler(index);
+bool JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasElementWithHandler(proxy, index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, true) != ABSENT;
+ return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
+ *object, index, true) != ABSENT;
}
-bool JSReceiver::HasLocalElement(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasElementWithHandler(index);
+bool JSReceiver::HasLocalElement(Handle<JSReceiver> object, uint32_t index) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasElementWithHandler(proxy, index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, false) != ABSENT;
+ return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
+ *object, index, false) != ABSENT;
}
@@ -5853,7 +6020,7 @@ void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
Object* function_template = expected_receiver_type();
if (!function_template->IsFunctionTemplateInfo()) return true;
- return receiver->IsInstanceOf(FunctionTemplateInfo::cast(function_template));
+ return FunctionTemplateInfo::cast(function_template)->IsTemplateFor(receiver);
}
@@ -5964,6 +6131,7 @@ uint32_t NameDictionaryShape::HashForObject(Name* key, Object* other) {
MaybeObject* NameDictionaryShape::AsObject(Heap* heap, Name* key) {
+ ASSERT(key->IsUniqueName());
return key;
}
@@ -5976,16 +6144,14 @@ bool ObjectHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
template <int entrysize>
uint32_t ObjectHashTableShape<entrysize>::Hash(Object* key) {
- MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
- return Smi::cast(maybe_hash->ToObjectChecked())->value();
+ return Smi::cast(key->GetHash())->value();
}
template <int entrysize>
uint32_t ObjectHashTableShape<entrysize>::HashForObject(Object* key,
Object* other) {
- MaybeObject* maybe_hash = other->GetHash(OMIT_CREATION);
- return Smi::cast(maybe_hash->ToObjectChecked())->value();
+ return Smi::cast(other->GetHash())->value();
}
@@ -5996,6 +6162,34 @@ MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Heap* heap,
}
+template <int entrysize>
+bool WeakHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
+ return key->SameValue(other);
+}
+
+
+template <int entrysize>
+uint32_t WeakHashTableShape<entrysize>::Hash(Object* key) {
+ intptr_t hash = reinterpret_cast<intptr_t>(key);
+ return (uint32_t)(hash & 0xFFFFFFFF);
+}
+
+
+template <int entrysize>
+uint32_t WeakHashTableShape<entrysize>::HashForObject(Object* key,
+ Object* other) {
+ intptr_t hash = reinterpret_cast<intptr_t>(other);
+ return (uint32_t)(hash & 0xFFFFFFFF);
+}
+
+
+template <int entrysize>
+MaybeObject* WeakHashTableShape<entrysize>::AsObject(Heap* heap,
+ Object* key) {
+ return key;
+}
+
+
void Map::ClearCodeCache(Heap* heap) {
// No write barrier is needed since empty_fixed_array is not in new space.
// Please note this function is used during marking:
@@ -6065,6 +6259,12 @@ MaybeObject* FixedDoubleArray::Copy() {
}
+MaybeObject* ConstantPoolArray::Copy() {
+ if (length() == 0) return this;
+ return GetHeap()->CopyConstantPoolArray(this);
+}
+
+
void TypeFeedbackCells::SetAstId(int index, TypeFeedbackId id) {
set(1 + index * 2, Smi::FromInt(id.ToInt()));
}
diff --git a/chromium/v8/src/objects-printer.cc b/chromium/v8/src/objects-printer.cc
index 0b8fdfda030..381c9aa55ad 100644
--- a/chromium/v8/src/objects-printer.cc
+++ b/chromium/v8/src/objects-printer.cc
@@ -95,6 +95,9 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
break;
+ case CONSTANT_POOL_ARRAY_TYPE:
+ ConstantPoolArray::cast(this)->ConstantPoolArrayPrint(out);
+ break;
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayPrint(out);
break;
@@ -520,6 +523,7 @@ void Symbol::SymbolPrint(FILE* out) {
PrintF(out, " - hash: %d\n", Hash());
PrintF(out, " - name: ");
name()->ShortPrint();
+ PrintF(out, " - private: %d\n", is_private());
PrintF(out, "\n");
}
@@ -552,6 +556,11 @@ void Map::MapPrint(FILE* out) {
if (is_access_check_needed()) {
PrintF(out, " - access_check_needed\n");
}
+ if (is_frozen()) {
+ PrintF(out, " - frozen\n");
+ } else if (!is_extensible()) {
+ PrintF(out, " - sealed\n");
+ }
PrintF(out, " - back pointer: ");
GetBackPointer()->ShortPrint(out);
PrintF(out, "\n - instance descriptors %s#%i: ",
@@ -630,6 +639,23 @@ void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
}
+void ConstantPoolArray::ConstantPoolArrayPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "ConstantPoolArray");
+ PrintF(out, " - length: %d", length());
+ for (int i = 0; i < length(); i++) {
+ if (i < first_ptr_index()) {
+ PrintF(out, "\n [%d]: double: %g", i, get_int64_entry_as_double(i));
+ } else if (i < first_int32_index()) {
+ PrintF(out, "\n [%d]: pointer: %p", i,
+ reinterpret_cast<void*>(get_ptr_entry(i)));
+ } else {
+ PrintF(out, "\n [%d]: int32: %d", i, get_int32_entry(i));
+ }
+ }
+ PrintF(out, "\n");
+}
+
+
void JSValue::JSValuePrint(FILE* out) {
HeapObject::PrintHeader(out, "ValueObject");
value()->Print(out);
@@ -810,7 +836,7 @@ void JSTypedArray::JSTypedArrayPrint(FILE* out) {
byte_length()->ShortPrint(out);
PrintF(out, "\n - length = ");
length()->ShortPrint(out);
- PrintF("\n");
+ PrintF(out, "\n");
PrintElements(out);
}
@@ -824,7 +850,7 @@ void JSDataView::JSDataViewPrint(FILE* out) {
byte_offset()->ShortPrint(out);
PrintF(out, "\n - byte_length = ");
byte_length()->ShortPrint(out);
- PrintF("\n");
+ PrintF(out, "\n");
}
@@ -841,8 +867,13 @@ void JSFunction::JSFunctionPrint(FILE* out) {
shared()->name()->Print(out);
PrintF(out, "\n - context = ");
context()->ShortPrint(out);
- PrintF(out, "\n - literals = ");
- literals()->ShortPrint(out);
+ if (shared()->bound()) {
+ PrintF(out, "\n - bindings = ");
+ function_bindings()->ShortPrint(out);
+ } else {
+ PrintF(out, "\n - literals = ");
+ literals()->ShortPrint(out);
+ }
PrintF(out, "\n - code = ");
code()->ShortPrint(out);
PrintF(out, "\n");
@@ -1100,20 +1131,23 @@ void AllocationSite::AllocationSitePrint(FILE* out) {
HeapObject::PrintHeader(out, "AllocationSite");
PrintF(out, " - weak_next: ");
weak_next()->ShortPrint(out);
- PrintF(out, "\n");
-
- PrintF(out, " - transition_info: ");
- if (transition_info()->IsCell()) {
- Cell* cell = Cell::cast(transition_info());
- Object* cell_contents = cell->value();
- if (cell_contents->IsSmi()) {
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(cell_contents)->value());
- PrintF(out, "Array allocation with ElementsKind ");
- PrintElementsKind(out, kind);
- PrintF(out, "\n");
- return;
- }
+ PrintF(out, "\n - dependent code: ");
+ dependent_code()->ShortPrint(out);
+ PrintF(out, "\n - nested site: ");
+ nested_site()->ShortPrint(out);
+ PrintF(out, "\n - memento found count: ");
+ memento_found_count()->ShortPrint(out);
+ PrintF(out, "\n - memento create count: ");
+ memento_create_count()->ShortPrint(out);
+ PrintF(out, "\n - pretenure decision: ");
+ pretenure_decision()->ShortPrint(out);
+ PrintF(out, "\n - transition_info: ");
+ if (transition_info()->IsSmi()) {
+ ElementsKind kind = GetElementsKind();
+ PrintF(out, "Array allocation with ElementsKind ");
+ PrintElementsKind(out, kind);
+ PrintF(out, "\n");
+ return;
} else if (transition_info()->IsJSArray()) {
PrintF(out, "Array literal ");
transition_info()->ShortPrint(out);
diff --git a/chromium/v8/src/objects-visiting-inl.h b/chromium/v8/src/objects-visiting-inl.h
index 46cc9d79892..1a68344b26a 100644
--- a/chromium/v8/src/objects-visiting-inl.h
+++ b/chromium/v8/src/objects-visiting-inl.h
@@ -185,12 +185,11 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
+ table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray);
+
table_.Register(kVisitNativeContext, &VisitNativeContext);
- table_.Register(kVisitAllocationSite,
- &FixedBodyVisitor<StaticVisitor,
- AllocationSite::BodyDescriptor,
- void>::Visit);
+ table_.Register(kVisitAllocationSite, &VisitAllocationSite);
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
@@ -261,10 +260,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
ASSERT(!rinfo->target_object()->IsConsString());
HeapObject* object = HeapObject::cast(rinfo->target_object());
- if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps ||
- rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
- !object->IsMap() || !Map::cast(object)->CanTransition()) {
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), object)) {
StaticVisitor::MarkObject(heap, object);
}
}
@@ -389,6 +386,31 @@ void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+
+ Object** slot =
+ HeapObject::RawField(object, AllocationSite::kDependentCodeOffset);
+ if (FLAG_collect_maps) {
+ // Mark allocation site dependent codes array but do not push it onto
+ // marking stack, this will make references from it weak. We will clean
+ // dead codes when we iterate over allocation sites in
+ // ClearNonLiveReferences.
+ HeapObject* obj = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ StaticVisitor::MarkObjectWithoutPush(heap, obj);
+ } else {
+ StaticVisitor::VisitPointer(heap, slot);
+ }
+
+ StaticVisitor::VisitPointers(heap,
+ HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset),
+ HeapObject::RawField(object, AllocationSite::kPointerFieldsEndOffset));
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitCode(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
@@ -452,6 +474,22 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
+ int first_ptr_offset = constant_pool->OffsetOfElementAt(
+ constant_pool->first_ptr_index());
+ int last_ptr_offset = constant_pool->OffsetOfElementAt(
+ constant_pool->first_ptr_index() + constant_pool->count_of_ptr_entries());
+ StaticVisitor::VisitPointers(
+ heap,
+ HeapObject::RawField(object, first_ptr_offset),
+ HeapObject::RawField(object, last_ptr_offset));
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
diff --git a/chromium/v8/src/objects-visiting.cc b/chromium/v8/src/objects-visiting.cc
index cd46013398a..5ced2cf7a35 100644
--- a/chromium/v8/src/objects-visiting.cc
+++ b/chromium/v8/src/objects-visiting.cc
@@ -82,6 +82,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case FIXED_DOUBLE_ARRAY_TYPE:
return kVisitFixedDoubleArray;
+ case CONSTANT_POOL_ARRAY_TYPE:
+ return kVisitConstantPoolArray;
+
case ODDBALL_TYPE:
return kVisitOddball;
diff --git a/chromium/v8/src/objects-visiting.h b/chromium/v8/src/objects-visiting.h
index 21757377a4f..f7758fdf4fc 100644
--- a/chromium/v8/src/objects-visiting.h
+++ b/chromium/v8/src/objects-visiting.h
@@ -54,6 +54,7 @@ class StaticVisitorBase : public AllStatic {
V(FreeSpace) \
V(FixedArray) \
V(FixedDoubleArray) \
+ V(ConstantPoolArray) \
V(NativeContext) \
V(AllocationSite) \
V(DataObject2) \
@@ -398,6 +399,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
+ INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
@@ -416,6 +418,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitMap(Map* map, HeapObject* object));
INLINE(static void VisitCode(Map* map, HeapObject* object));
INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
+ INLINE(static void VisitConstantPoolArray(Map* map, HeapObject* object));
INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
diff --git a/chromium/v8/src/objects.cc b/chromium/v8/src/objects.cc
index d9538ae217d..e9788786c57 100644
--- a/chromium/v8/src/objects.cc
+++ b/chromium/v8/src/objects.cc
@@ -28,10 +28,12 @@
#include "v8.h"
#include "accessors.h"
+#include "allocation-site-scopes.h"
#include "api.h"
#include "arguments.h"
#include "bootstrapper.h"
#include "codegen.h"
+#include "code-stubs.h"
#include "cpu-profiler.h"
#include "debug.h"
#include "deoptimizer.h"
@@ -118,6 +120,17 @@ bool Object::BooleanValue() {
}
+bool Object::IsCallable() {
+ Object* fun = this;
+ while (fun->IsJSFunctionProxy()) {
+ fun = JSFunctionProxy::cast(fun)->call_trap();
+ }
+ return fun->IsJSFunction() ||
+ (fun->IsHeapObject() &&
+ HeapObject::cast(fun)->map()->has_instance_call_handler());
+}
+
+
void Object::Lookup(Name* name, LookupResult* result) {
Object* holder = NULL;
if (IsJSReceiver()) {
@@ -142,6 +155,20 @@ void Object::Lookup(Name* name, LookupResult* result) {
}
+Handle<Object> Object::GetPropertyWithReceiver(
+ Handle<Object> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
+ PropertyAttributes* attributes) {
+ LookupResult lookup(name->GetIsolate());
+ object->Lookup(*name, &lookup);
+ Handle<Object> result =
+ GetProperty(object, receiver, &lookup, name, attributes);
+ ASSERT(*attributes <= ABSENT);
+ return result;
+}
+
+
MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
Name* name,
PropertyAttributes* attributes) {
@@ -188,6 +215,31 @@ bool Object::ToUint32(uint32_t* value) {
}
+bool FunctionTemplateInfo::IsTemplateFor(Object* object) {
+ if (!object->IsHeapObject()) return false;
+ return IsTemplateFor(HeapObject::cast(object)->map());
+}
+
+
+bool FunctionTemplateInfo::IsTemplateFor(Map* map) {
+ // There is a constraint on the object; check.
+ if (!map->IsJSObjectMap()) return false;
+ // Fetch the constructor function of the object.
+ Object* cons_obj = map->constructor();
+ if (!cons_obj->IsJSFunction()) return false;
+ JSFunction* fun = JSFunction::cast(cons_obj);
+ // Iterate through the chain of inheriting function templates to
+ // see if the required one occurs.
+ for (Object* type = fun->shared()->function_data();
+ type->IsFunctionTemplateInfo();
+ type = FunctionTemplateInfo::cast(type)->parent_template()) {
+ if (type == this) return true;
+ }
+ // Didn't find the required type in the inheritance chain.
+ return false;
+}
+
+
template<typename To>
static inline To* CheckedCast(void *from) {
uintptr_t temp = reinterpret_cast<uintptr_t>(from);
@@ -328,9 +380,18 @@ static MaybeObject* GetDeclaredAccessorProperty(Object* receiver,
}
-MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
- Object* structure,
- Name* name) {
+Handle<FixedArray> JSObject::EnsureWritableFastElements(
+ Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->EnsureWritableFastElements(),
+ FixedArray);
+}
+
+
+Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Object> structure,
+ Handle<Name> name) {
Isolate* isolate = name->GetIsolate();
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -338,66 +399,71 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
if (structure->IsForeign()) {
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->foreign_address());
- MaybeObject* value = (callback->getter)(isolate, receiver, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return value;
+ Handle<Foreign>::cast(structure)->foreign_address());
+ CALL_HEAP_FUNCTION(isolate,
+ (callback->getter)(isolate, *receiver, callback->data),
+ Object);
}
// api style callbacks.
if (structure->IsAccessorInfo()) {
- if (!AccessorInfo::cast(structure)->IsCompatibleReceiver(receiver)) {
- Handle<Object> name_handle(name, isolate);
- Handle<Object> receiver_handle(receiver, isolate);
- Handle<Object> args[2] = { name_handle, receiver_handle };
+ Handle<AccessorInfo> accessor_info = Handle<AccessorInfo>::cast(structure);
+ if (!accessor_info->IsCompatibleReceiver(*receiver)) {
+ Handle<Object> args[2] = { name, receiver };
Handle<Object> error =
isolate->factory()->NewTypeError("incompatible_method_receiver",
HandleVector(args,
ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>::null();
}
// TODO(rossberg): Handling symbols in the API requires changing the API,
// so we do not support it for now.
- if (name->IsSymbol()) return isolate->heap()->undefined_value();
+ if (name->IsSymbol()) return isolate->factory()->undefined_value();
if (structure->IsDeclaredAccessorInfo()) {
- return GetDeclaredAccessorProperty(receiver,
- DeclaredAccessorInfo::cast(structure),
- isolate);
+ CALL_HEAP_FUNCTION(
+ isolate,
+ GetDeclaredAccessorProperty(*receiver,
+ DeclaredAccessorInfo::cast(*structure),
+ isolate),
+ Object);
}
- ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
- Object* fun_obj = data->getter();
+
+ Handle<ExecutableAccessorInfo> data =
+ Handle<ExecutableAccessorInfo>::cast(structure);
v8::AccessorGetterCallback call_fun =
- v8::ToCData<v8::AccessorGetterCallback>(fun_obj);
- if (call_fun == NULL) return isolate->heap()->undefined_value();
+ v8::ToCData<v8::AccessorGetterCallback>(data->getter());
+ if (call_fun == NULL) return isolate->factory()->undefined_value();
+
HandleScope scope(isolate);
- JSObject* self = JSObject::cast(receiver);
- Handle<String> key(String::cast(name));
- LOG(isolate, ApiNamedPropertyAccess("load", self, name));
- PropertyCallbackArguments args(isolate, data->data(), self, this);
+ Handle<JSObject> self = Handle<JSObject>::cast(receiver);
+ Handle<String> key = Handle<String>::cast(name);
+ LOG(isolate, ApiNamedPropertyAccess("load", *self, *name));
+ PropertyCallbackArguments args(isolate, data->data(), *self, *object);
v8::Handle<v8::Value> result =
args.Call(call_fun, v8::Utils::ToLocal(key));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.IsEmpty()) {
- return isolate->heap()->undefined_value();
+ return isolate->factory()->undefined_value();
}
- Object* return_value = *v8::Utils::OpenHandle(*result);
+ Handle<Object> return_value = v8::Utils::OpenHandle(*result);
return_value->VerifyApiCallResultType();
- return return_value;
+ return scope.CloseAndEscape(return_value);
}
// __defineGetter__ callback
- if (structure->IsAccessorPair()) {
- Object* getter = AccessorPair::cast(structure)->getter();
- if (getter->IsSpecFunction()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
- }
- // Getter is not a function.
- return isolate->heap()->undefined_value();
+ Handle<Object> getter(Handle<AccessorPair>::cast(structure)->getter(),
+ isolate);
+ if (getter->IsSpecFunction()) {
+ // TODO(rossberg): nicer would be to cast to some JSCallable here...
+ CALL_HEAP_FUNCTION(
+ isolate,
+ object->GetPropertyWithDefinedGetter(*receiver,
+ JSReceiver::cast(*getter)),
+ Object);
}
-
- UNREACHABLE();
- return NULL;
+ // Getter is not a function.
+ return isolate->factory()->undefined_value();
}
@@ -455,18 +521,15 @@ Handle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy,
StrictModeFlag strict_mode) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- CALL_HEAP_FUNCTION(isolate,
- proxy->SetPropertyWithHandler(
- *receiver, *name, *value, NONE, strict_mode),
- Object);
+ return SetPropertyWithHandler(
+ proxy, receiver, name, value, NONE, strict_mode);
}
-bool JSProxy::HasElementWithHandler(uint32_t index) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return HasPropertyWithHandler(name);
+bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ return HasPropertyWithHandler(proxy, name);
}
@@ -496,56 +559,51 @@ MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
// Only deal with CALLBACKS and INTERCEPTOR
-MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
- Object* receiver,
+Handle<Object> JSObject::GetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
LookupResult* result,
- Name* name,
+ Handle<Name> name,
PropertyAttributes* attributes) {
+ Isolate* isolate = name->GetIsolate();
if (result->IsProperty()) {
switch (result->type()) {
case CALLBACKS: {
// Only allow API accessors.
- Object* obj = result->GetCallbackObject();
- if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
- if (info->all_can_read()) {
- *attributes = result->GetAttributes();
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
- }
- } else if (obj->IsAccessorPair()) {
- AccessorPair* pair = AccessorPair::cast(obj);
- if (pair->all_can_read()) {
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
- }
+ Handle<Object> callback_obj(result->GetCallbackObject(), isolate);
+ if (callback_obj->IsAccessorInfo()) {
+ if (!AccessorInfo::cast(*callback_obj)->all_can_read()) break;
+ *attributes = result->GetAttributes();
+ // Fall through to GetPropertyWithCallback.
+ } else if (callback_obj->IsAccessorPair()) {
+ if (!AccessorPair::cast(*callback_obj)->all_can_read()) break;
+ // Fall through to GetPropertyWithCallback.
+ } else {
+ break;
}
- break;
+ Handle<JSObject> holder(result->holder(), isolate);
+ return GetPropertyWithCallback(holder, receiver, callback_obj, name);
}
case NORMAL:
case FIELD:
case CONSTANT: {
// Search ALL_CAN_READ accessors in prototype chain.
- LookupResult r(GetIsolate());
- result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
+ LookupResult r(isolate);
+ result->holder()->LookupRealNamedPropertyInPrototypes(*name, &r);
if (r.IsProperty()) {
- return GetPropertyWithFailedAccessCheck(receiver,
- &r,
- name,
- attributes);
+ return GetPropertyWithFailedAccessCheck(
+ object, receiver, &r, name, attributes);
}
break;
}
case INTERCEPTOR: {
// If the object has an interceptor, try real named properties.
// No access check in GetPropertyAttributeWithInterceptor.
- LookupResult r(GetIsolate());
- result->holder()->LookupRealNamedProperty(name, &r);
+ LookupResult r(isolate);
+ result->holder()->LookupRealNamedProperty(*name, &r);
if (r.IsProperty()) {
- return GetPropertyWithFailedAccessCheck(receiver,
- &r,
- name,
- attributes);
+ return GetPropertyWithFailedAccessCheck(
+ object, receiver, &r, name, attributes);
}
break;
}
@@ -556,11 +614,9 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
// No accessible property found.
*attributes = ABSENT;
- Heap* heap = name->GetHeap();
- Isolate* isolate = heap->isolate();
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_GET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return heap->undefined_value();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_GET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
@@ -643,67 +699,63 @@ Object* JSObject::GetNormalizedProperty(LookupResult* result) {
}
-Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetNormalizedProperty(result, *value),
- Object);
-}
-
-
-MaybeObject* JSObject::SetNormalizedProperty(LookupResult* result,
- Object* value) {
- ASSERT(!HasFastProperties());
- if (IsGlobalObject()) {
- PropertyCell* cell = PropertyCell::cast(
- property_dictionary()->ValueAt(result->GetDictionaryEntry()));
- MaybeObject* maybe_type = cell->SetValueInferType(value);
- if (maybe_type->IsFailure()) return maybe_type;
+void JSObject::SetNormalizedProperty(Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Object> value) {
+ ASSERT(!object->HasFastProperties());
+ NameDictionary* property_dictionary = object->property_dictionary();
+ if (object->IsGlobalObject()) {
+ Handle<PropertyCell> cell(PropertyCell::cast(
+ property_dictionary->ValueAt(result->GetDictionaryEntry())));
+ PropertyCell::SetValueInferType(cell, value);
} else {
- property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
+ property_dictionary->ValueAtPut(result->GetDictionaryEntry(), *value);
}
- return value;
}
-Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetNormalizedProperty(*key, *value, details),
- Object);
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dict->GetIsolate(),
+ dict->Add(*name, *value, details),
+ NameDictionary);
}
-MaybeObject* JSObject::SetNormalizedProperty(Name* name,
- Object* value,
- PropertyDetails details) {
- ASSERT(!HasFastProperties());
- int entry = property_dictionary()->FindEntry(name);
+void JSObject::SetNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ ASSERT(!object->HasFastProperties());
+ Handle<NameDictionary> property_dictionary(object->property_dictionary());
+
+ if (!name->IsUniqueName()) {
+ name = object->GetIsolate()->factory()->InternalizedStringFromString(
+ Handle<String>::cast(name));
+ }
+
+ int entry = property_dictionary->FindEntry(*name);
if (entry == NameDictionary::kNotFound) {
- Object* store_value = value;
- if (IsGlobalObject()) {
- Heap* heap = name->GetHeap();
- MaybeObject* maybe_store_value = heap->AllocatePropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
- }
- Object* dict;
- { MaybeObject* maybe_dict =
- property_dictionary()->Add(name, store_value, details);
- if (!maybe_dict->ToObject(&dict)) return maybe_dict;
+ Handle<Object> store_value = value;
+ if (object->IsGlobalObject()) {
+ store_value = object->GetIsolate()->factory()->NewPropertyCell(value);
}
- set_properties(NameDictionary::cast(dict));
- return value;
+
+ property_dictionary =
+ NameDictionaryAdd(property_dictionary, name, store_value, details);
+ object->set_properties(*property_dictionary);
+ return;
}
- PropertyDetails original_details = property_dictionary()->DetailsAt(entry);
+ PropertyDetails original_details = property_dictionary->DetailsAt(entry);
int enumeration_index;
// Preserve the enumeration index unless the property was deleted.
if (original_details.IsDeleted()) {
- enumeration_index = property_dictionary()->NextEnumerationIndex();
- property_dictionary()->SetNextEnumerationIndex(enumeration_index + 1);
+ enumeration_index = property_dictionary->NextEnumerationIndex();
+ property_dictionary->SetNextEnumerationIndex(enumeration_index + 1);
} else {
enumeration_index = original_details.dictionary_index();
ASSERT(enumeration_index > 0);
@@ -712,17 +764,15 @@ MaybeObject* JSObject::SetNormalizedProperty(Name* name,
details = PropertyDetails(
details.attributes(), details.type(), enumeration_index);
- if (IsGlobalObject()) {
- PropertyCell* cell =
- PropertyCell::cast(property_dictionary()->ValueAt(entry));
- MaybeObject* maybe_type = cell->SetValueInferType(value);
- if (maybe_type->IsFailure()) return maybe_type;
+ if (object->IsGlobalObject()) {
+ Handle<PropertyCell> cell(
+ PropertyCell::cast(property_dictionary->ValueAt(entry)));
+ PropertyCell::SetValueInferType(cell, value);
// Please note we have to update the property details.
- property_dictionary()->DetailsAtPut(entry, details);
+ property_dictionary->DetailsAtPut(entry, details);
} else {
- property_dictionary()->SetEntry(entry, name, value, details);
+ property_dictionary->SetEntry(entry, *name, *value, details);
}
- return value;
}
@@ -733,12 +783,6 @@ Handle<NameDictionary> NameDictionaryShrink(Handle<NameDictionary> dict,
}
-static void CellSetValueInferType(Handle<PropertyCell> cell,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION_VOID(cell->GetIsolate(), cell->SetValueInferType(*value));
-}
-
-
Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
Handle<Name> name,
DeleteMode mode) {
@@ -761,7 +805,8 @@ Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
object->set_map(*new_map);
}
Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
- CellSetValueInferType(cell, isolate->factory()->the_hole_value());
+ Handle<Object> value = isolate->factory()->the_hole_value();
+ PropertyCell::SetValueInferType(cell, value);
dictionary->DetailsAtPut(entry, details.AsDeleted());
} else {
Handle<Object> deleted(dictionary->DeleteProperty(entry, mode), isolate);
@@ -817,17 +862,24 @@ MaybeObject* Object::GetPropertyOrFail(Handle<Object> object,
}
+// TODO(yangguo): handlify this and get rid of.
MaybeObject* Object::GetProperty(Object* receiver,
LookupResult* result,
Name* name,
PropertyAttributes* attributes) {
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChangeWithHandleScope ncc;
-
Isolate* isolate = name->GetIsolate();
Heap* heap = isolate->heap();
+#ifdef DEBUG
+ // TODO(mstarzinger): Only because of the AssertNoContextChange, drop as soon
+ // as this method has been fully handlified.
+ HandleScope scope(isolate);
+#endif
+
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc(isolate);
+
// Traverse the prototype chain from the current object (this) to
// the holder and check for access rights. This avoids traversing the
// objects more than once in case of interceptors, because the
@@ -849,11 +901,16 @@ MaybeObject* Object::GetProperty(Object* receiver,
// property from the current object, we still check that we have
// access to it.
JSObject* checked = JSObject::cast(current);
- if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
- return checked->GetPropertyWithFailedAccessCheck(receiver,
- result,
- name,
- attributes);
+ if (!isolate->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithFailedAccessCheck(
+ handle(checked, isolate),
+ handle(receiver, isolate),
+ result,
+ handle(name, isolate),
+ attributes);
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
+ return *value;
}
}
// Stop traversing the chain once we reach the last object in the
@@ -884,14 +941,28 @@ MaybeObject* Object::GetProperty(Object* receiver,
}
case CONSTANT:
return result->GetConstant();
- case CALLBACKS:
- return result->holder()->GetPropertyWithCallback(
- receiver, result->GetCallbackObject(), name);
+ case CALLBACKS: {
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithCallback(
+ handle(result->holder(), isolate),
+ handle(receiver, isolate),
+ handle(result->GetCallbackObject(), isolate),
+ handle(name, isolate));
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
+ return *value;
+ }
case HANDLER:
return result->proxy()->GetPropertyWithHandler(receiver, name);
- case INTERCEPTOR:
- return result->holder()->GetPropertyWithInterceptor(
- receiver, name, attributes);
+ case INTERCEPTOR: {
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithInterceptor(
+ handle(result->holder(), isolate),
+ handle(receiver, isolate),
+ handle(name, isolate),
+ attributes);
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
+ return *value;
+ }
case TRANSITION:
case NONEXISTENT:
UNREACHABLE();
@@ -994,7 +1065,13 @@ Object* Object::GetPrototype(Isolate* isolate) {
}
-MaybeObject* Object::GetHash(CreationFlag flag) {
+Map* Object::GetMarkerMap(Isolate* isolate) {
+ if (IsSmi()) return isolate->heap()->heap_number_map();
+ return HeapObject::cast(this)->map();
+}
+
+
+Object* Object::GetHash() {
// The object is either a number, a name, an odd-ball,
// a real JS object, or a Harmony proxy.
if (IsNumber()) {
@@ -1009,12 +1086,20 @@ MaybeObject* Object::GetHash(CreationFlag flag) {
uint32_t hash = Oddball::cast(this)->to_string()->Hash();
return Smi::FromInt(hash);
}
- if (IsJSReceiver()) {
- return JSReceiver::cast(this)->GetIdentityHash(flag);
- }
- UNREACHABLE();
- return Smi::FromInt(0);
+ ASSERT(IsJSReceiver());
+ return JSReceiver::cast(this)->GetIdentityHash();
+}
+
+
+Handle<Object> Object::GetOrCreateHash(Handle<Object> object,
+ Isolate* isolate) {
+ Handle<Object> hash(object->GetHash(), isolate);
+ if (hash->IsSmi())
+ return hash;
+
+ ASSERT(object->IsJSReceiver());
+ return JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver>::cast(object));
}
@@ -1026,8 +1111,11 @@ bool Object::SameValue(Object* other) {
if (IsNumber() && other->IsNumber()) {
double this_value = Number();
double other_value = other->Number();
- return (this_value == other_value) ||
- (std::isnan(this_value) && std::isnan(other_value));
+ bool equal = this_value == other_value;
+ // SameValue(NaN, NaN) is true.
+ if (!equal) return std::isnan(this_value) && std::isnan(other_value);
+ // SameValue(0.0, -0.0) is false.
+ return (this_value != 0) || ((1 / this_value) == (1 / other_value));
}
if (IsString() && other->IsString()) {
return String::cast(this)->Equals(String::cast(other));
@@ -1167,7 +1255,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
ASSERT(!this->IsExternalString());
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
@@ -1224,7 +1312,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
ASSERT(static_cast<size_t>(this->length()) == resource->length());
@@ -1709,6 +1797,9 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case FIXED_ARRAY_TYPE:
FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
break;
+ case CONSTANT_POOL_ARRAY_TYPE:
+ reinterpret_cast<ConstantPoolArray*>(this)->ConstantPoolIterateBody(v);
+ break;
case FIXED_DOUBLE_ARRAY_TYPE:
break;
case JS_OBJECT_TYPE:
@@ -1871,211 +1962,247 @@ String* JSReceiver::constructor_name() {
}
-MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
- Name* name,
- Object* value,
- int field_index,
- Representation representation) {
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Object> NewStorageFor(Isolate* isolate,
+ Handle<Object> object,
+ Representation representation) {
+ Heap* heap = isolate->heap();
+ CALL_HEAP_FUNCTION(isolate,
+ object->AllocateNewStorageFor(heap, representation),
+ Object);
+}
+
+
+void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
+ Handle<Map> new_map,
+ Handle<Name> name,
+ Handle<Object> value,
+ int field_index,
+ Representation representation) {
+ Isolate* isolate = object->GetIsolate();
+
// This method is used to transition to a field. If we are transitioning to a
// double field, allocate new storage.
- Object* storage;
- MaybeObject* maybe_storage =
- value->AllocateNewStorageFor(GetHeap(), representation);
- if (!maybe_storage->To(&storage)) return maybe_storage;
+ Handle<Object> storage = NewStorageFor(isolate, value, representation);
- if (map()->unused_property_fields() == 0) {
+ if (object->map()->unused_property_fields() == 0) {
int new_unused = new_map->unused_property_fields();
- FixedArray* values;
- MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + new_unused + 1);
- if (!maybe_values->To(&values)) return maybe_values;
+ Handle<FixedArray> properties(object->properties());
+ Handle<FixedArray> values = isolate->factory()->CopySizeFixedArray(
+ properties, properties->length() + new_unused + 1);
+ object->set_properties(*values);
+ }
- set_properties(values);
+ object->set_map(*new_map);
+ object->FastPropertyAtPut(field_index, *storage);
+}
+
+
+static MaybeObject* CopyAddFieldDescriptor(Map* map,
+ Name* name,
+ int index,
+ PropertyAttributes attributes,
+ Representation representation,
+ TransitionFlag flag) {
+ Map* new_map;
+ FieldDescriptor new_field_desc(name, index, attributes, representation);
+ MaybeObject* maybe_map = map->CopyAddDescriptor(&new_field_desc, flag);
+ if (!maybe_map->To(&new_map)) return maybe_map;
+ int unused_property_fields = map->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += JSObject::kFieldsAdded;
}
+ new_map->set_unused_property_fields(unused_property_fields);
+ return new_map;
+}
- set_map(new_map);
- FastPropertyAtPut(field_index, storage);
- return value;
+static Handle<Map> CopyAddFieldDescriptor(Handle<Map> map,
+ Handle<Name> name,
+ int index,
+ PropertyAttributes attributes,
+ Representation representation,
+ TransitionFlag flag) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ CopyAddFieldDescriptor(
+ *map, *name, index, attributes, representation, flag),
+ Map);
}
-MaybeObject* JSObject::AddFastProperty(Name* name,
- Object* value,
- PropertyAttributes attributes,
- StoreFromKeyed store_mode,
- ValueType value_type,
- TransitionFlag flag) {
- ASSERT(!IsJSGlobalProxy());
+void JSObject::AddFastProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StoreFromKeyed store_mode,
+ ValueType value_type,
+ TransitionFlag flag) {
+ ASSERT(!object->IsJSGlobalProxy());
ASSERT(DescriptorArray::kNotFound ==
- map()->instance_descriptors()->Search(
- name, map()->NumberOfOwnDescriptors()));
+ object->map()->instance_descriptors()->Search(
+ *name, object->map()->NumberOfOwnDescriptors()));
// Normalize the object if the name is an actual name (not the
// hidden strings) and is not a real identifier.
// Normalize the object if it will have too many fast properties.
- Isolate* isolate = GetHeap()->isolate();
- if (!name->IsCacheable(isolate) || TooManyFastProperties(store_mode)) {
- MaybeObject* maybe_failure =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe_failure->IsFailure()) return maybe_failure;
- return AddSlowProperty(name, value, attributes);
+ Isolate* isolate = object->GetIsolate();
+ if (!name->IsCacheable(isolate) ||
+ object->TooManyFastProperties(store_mode)) {
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+ AddSlowProperty(object, name, value, attributes);
+ return;
}
// Compute the new index for new field.
- int index = map()->NextFreePropertyIndex();
+ int index = object->map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- if (IsJSContextExtensionObject()) value_type = FORCE_TAGGED;
+ if (object->IsJSContextExtensionObject()) value_type = FORCE_TAGGED;
Representation representation = value->OptimalRepresentation(value_type);
+ Handle<Map> new_map = CopyAddFieldDescriptor(
+ handle(object->map()), name, index, attributes, representation, flag);
- FieldDescriptor new_field(name, index, attributes, representation);
-
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ AddFastPropertyUsingMap(object, new_map, name, value, index, representation);
+}
- int unused_property_fields = map()->unused_property_fields() - 1;
- if (unused_property_fields < 0) {
- unused_property_fields += kFieldsAdded;
- }
- new_map->set_unused_property_fields(unused_property_fields);
- return AddFastPropertyUsingMap(new_map, name, value, index, representation);
+static MaybeObject* CopyAddConstantDescriptor(Map* map,
+ Name* name,
+ Object* value,
+ PropertyAttributes attributes,
+ TransitionFlag flag) {
+ ConstantDescriptor new_constant_desc(name, value, attributes);
+ return map->CopyAddDescriptor(&new_constant_desc, flag);
}
-MaybeObject* JSObject::AddConstantProperty(
- Name* name,
- Object* constant,
- PropertyAttributes attributes,
- TransitionFlag initial_flag) {
- // Allocate new instance descriptors with (name, constant) added
- ConstantDescriptor d(name, constant, attributes);
+static Handle<Map> CopyAddConstantDescriptor(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ TransitionFlag flag) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ CopyAddConstantDescriptor(
+ *map, *name, *value, attributes, flag),
+ Map);
+}
+
+void JSObject::AddConstantProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> constant,
+ PropertyAttributes attributes,
+ TransitionFlag initial_flag) {
TransitionFlag flag =
// Do not add transitions to global objects.
- (IsGlobalObject() ||
+ (object->IsGlobalObject() ||
// Don't add transitions to special properties with non-trivial
// attributes.
attributes != NONE)
? OMIT_TRANSITION
: initial_flag;
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&d, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ // Allocate new instance descriptors with (name, constant) added.
+ Handle<Map> new_map = CopyAddConstantDescriptor(
+ handle(object->map()), name, constant, attributes, flag);
- set_map(new_map);
- return constant;
+ object->set_map(*new_map);
}
-// Add property in slow mode
-MaybeObject* JSObject::AddSlowProperty(Name* name,
- Object* value,
- PropertyAttributes attributes) {
- ASSERT(!HasFastProperties());
- NameDictionary* dict = property_dictionary();
- Object* store_value = value;
- if (IsGlobalObject()) {
+void JSObject::AddSlowProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ ASSERT(!object->HasFastProperties());
+ Isolate* isolate = object->GetIsolate();
+ Handle<NameDictionary> dict(object->property_dictionary());
+ if (object->IsGlobalObject()) {
// In case name is an orphaned property reuse the cell.
- int entry = dict->FindEntry(name);
+ int entry = dict->FindEntry(*name);
if (entry != NameDictionary::kNotFound) {
- store_value = dict->ValueAt(entry);
- MaybeObject* maybe_type =
- PropertyCell::cast(store_value)->SetValueInferType(value);
- if (maybe_type->IsFailure()) return maybe_type;
+ Handle<PropertyCell> cell(PropertyCell::cast(dict->ValueAt(entry)));
+ PropertyCell::SetValueInferType(cell, value);
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = dict->NextEnumerationIndex();
PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
dict->SetNextEnumerationIndex(index + 1);
- dict->SetEntry(entry, name, store_value, details);
- return value;
- }
- Heap* heap = GetHeap();
- { MaybeObject* maybe_store_value =
- heap->AllocatePropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
+ dict->SetEntry(entry, *name, *cell, details);
+ return;
}
- MaybeObject* maybe_type =
- PropertyCell::cast(store_value)->SetValueInferType(value);
- if (maybe_type->IsFailure()) return maybe_type;
+ Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(value);
+ PropertyCell::SetValueInferType(cell, value);
+ value = cell;
}
PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
- Object* result;
- { MaybeObject* maybe_result = dict->Add(name, store_value, details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- if (dict != result) set_properties(NameDictionary::cast(result));
- return value;
+ Handle<NameDictionary> result = NameDictionaryAdd(dict, name, value, details);
+ if (*dict != *result) object->set_properties(*result);
}
-MaybeObject* JSObject::AddProperty(Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode,
- ExtensibilityCheck extensibility_check,
- ValueType value_type,
- StoreMode mode,
- TransitionFlag transition_flag) {
- ASSERT(!IsJSGlobalProxy());
- Map* map_of_this = map();
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
- MaybeObject* result;
+Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ JSReceiver::StoreFromKeyed store_mode,
+ ExtensibilityCheck extensibility_check,
+ ValueType value_type,
+ StoreMode mode,
+ TransitionFlag transition_flag) {
+ ASSERT(!object->IsJSGlobalProxy());
+ Isolate* isolate = object->GetIsolate();
+
+ if (!name->IsUniqueName()) {
+ name = isolate->factory()->InternalizedStringFromString(
+ Handle<String>::cast(name));
+ }
+
if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
- !map_of_this->is_extensible()) {
+ !object->map()->is_extensible()) {
if (strict_mode == kNonStrictMode) {
return value;
} else {
- Handle<Object> args[1] = {Handle<Name>(name)};
- return isolate->Throw(
- *isolate->factory()->NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
+ Handle<Object> args[1] = { name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "object_not_extensible", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
- if (HasFastProperties()) {
+ if (object->HasFastProperties()) {
// Ensure the descriptor array does not get too big.
- if (map_of_this->NumberOfOwnDescriptors() <
- DescriptorArray::kMaxNumberOfDescriptors) {
+ if (object->map()->NumberOfOwnDescriptors() <= kMaxNumberOfDescriptors) {
// TODO(verwaest): Support other constants.
// if (mode == ALLOW_AS_CONSTANT &&
// !value->IsTheHole() &&
// !value->IsConsString()) {
if (value->IsJSFunction()) {
- result = AddConstantProperty(name, value, attributes, transition_flag);
+ AddConstantProperty(object, name, value, attributes, transition_flag);
} else {
- result = AddFastProperty(
- name, value, attributes, store_mode, value_type, transition_flag);
+ AddFastProperty(object, name, value, attributes, store_mode,
+ value_type, transition_flag);
}
} else {
// Normalize the object to prevent very large instance descriptors.
// This eliminates unwanted N^2 allocation and lookup behavior.
- Object* obj;
- MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe->To(&obj)) return maybe;
- result = AddSlowProperty(name, value, attributes);
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+ AddSlowProperty(object, name, value, attributes);
}
} else {
- result = AddSlowProperty(name, value, attributes);
+ AddSlowProperty(object, name, value, attributes);
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (FLAG_harmony_observation && map()->is_observed()) {
- EnqueueChangeRecord(handle(this, isolate),
- "new",
- handle(name, isolate),
- handle(heap->the_hole_value(), isolate));
+ if (FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string()) {
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ EnqueueChangeRecord(object, "add", name, old_value);
}
- return *hresult;
+ return value;
}
@@ -2090,62 +2217,51 @@ void JSObject::EnqueueChangeRecord(Handle<JSObject> object,
object = handle(JSGlobalObject::cast(*object)->global_receiver(), isolate);
}
Handle<Object> args[] = { type, object, name, old_value };
+ int argc = name.is_null() ? 2 : old_value->IsTheHole() ? 3 : 4;
bool threw;
+
Execution::Call(isolate,
Handle<JSFunction>(isolate->observers_notify_change()),
isolate->factory()->undefined_value(),
- old_value->IsTheHole() ? 3 : 4, args,
+ argc, args,
&threw);
ASSERT(!threw);
}
-void JSObject::DeliverChangeRecords(Isolate* isolate) {
- ASSERT(isolate->observer_delivery_pending());
- bool threw = false;
- Execution::Call(
- isolate,
- isolate->observers_deliver_changes(),
- isolate->factory()->undefined_value(),
- 0,
- NULL,
- &threw);
- ASSERT(!threw);
- isolate->set_observer_delivery_pending(false);
-}
-
-
-MaybeObject* JSObject::SetPropertyPostInterceptor(
- Name* name,
- Object* value,
+Handle<Object> JSObject::SetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreMode mode) {
+ StrictModeFlag strict_mode) {
// Check local property, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (!result.IsFound()) map()->LookupTransition(this, name, &result);
+ LookupResult result(object->GetIsolate());
+ object->LocalLookupRealNamedProperty(*name, &result);
+ if (!result.IsFound()) {
+ object->map()->LookupTransition(*object, *name, &result);
+ }
if (result.IsFound()) {
// An existing property or a map transition was found. Use set property to
// handle all these cases.
- return SetProperty(&result, name, value, attributes, strict_mode);
+ return SetPropertyForResult(object, &result, name, value, attributes,
+ strict_mode, MAY_BE_STORE_FROM_KEYED);
}
bool done = false;
- MaybeObject* result_object =
- SetPropertyViaPrototypes(name, value, attributes, strict_mode, &done);
+ Handle<Object> result_object = SetPropertyViaPrototypes(
+ object, name, value, attributes, strict_mode, &done);
if (done) return result_object;
// Add a new real property.
- return AddProperty(name, value, attributes, strict_mode,
- MAY_BE_STORE_FROM_KEYED, PERFORM_EXTENSIBILITY_CHECK,
- OPTIMAL_REPRESENTATION, mode);
+ return AddProperty(object, name, value, attributes, strict_mode);
}
-MaybeObject* JSObject::ReplaceSlowProperty(Name* name,
- Object* value,
- PropertyAttributes attributes) {
- NameDictionary* dictionary = property_dictionary();
- int old_index = dictionary->FindEntry(name);
+static void ReplaceSlowProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ NameDictionary* dictionary = object->property_dictionary();
+ int old_index = dictionary->FindEntry(*name);
int new_enumeration_index = 0; // 0 means "Use the next available index."
if (old_index != -1) {
// All calls to ReplaceSlowProperty have had all transitions removed.
@@ -2153,7 +2269,7 @@ MaybeObject* JSObject::ReplaceSlowProperty(Name* name,
}
PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
- return SetNormalizedProperty(name, value, new_details);
+ JSObject::SetNormalizedProperty(object, name, value, new_details);
}
@@ -2219,6 +2335,13 @@ static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
}
}
+
+ // The array may not be moved during GC,
+ // and size has to be adjusted nevertheless.
+ HeapProfiler* profiler = heap->isolate()->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
+ profiler->UpdateObjectSizeEvent(elms->address(), elms->Size());
+ }
}
@@ -2275,28 +2398,27 @@ bool Map::InstancesNeedRewriting(Map* target,
// to temporarily store the inobject properties.
// * If there are properties left in the backing store, install the backing
// store.
-MaybeObject* JSObject::MigrateToMap(Map* new_map) {
- Heap* heap = GetHeap();
- Map* old_map = map();
+void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<Map> old_map(object->map());
int number_of_fields = new_map->NumberOfFields();
int inobject = new_map->inobject_properties();
int unused = new_map->unused_property_fields();
- // Nothing to do if no functions were converted to fields.
+ // Nothing to do if no functions were converted to fields and no smis were
+ // converted to doubles.
if (!old_map->InstancesNeedRewriting(
- new_map, number_of_fields, inobject, unused)) {
- set_map(new_map);
- return this;
+ *new_map, number_of_fields, inobject, unused)) {
+ object->set_map(*new_map);
+ return;
}
int total_size = number_of_fields + unused;
int external = total_size - inobject;
- FixedArray* array;
- MaybeObject* maybe_array = heap->AllocateFixedArray(total_size);
- if (!maybe_array->To(&array)) return maybe_array;
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(total_size);
- DescriptorArray* old_descriptors = old_map->instance_descriptors();
- DescriptorArray* new_descriptors = new_map->instance_descriptors();
+ Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
+ Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors());
int descriptors = new_map->NumberOfOwnDescriptors();
for (int i = 0; i < descriptors; i++) {
@@ -2309,69 +2431,72 @@ MaybeObject* JSObject::MigrateToMap(Map* new_map) {
}
ASSERT(old_details.type() == CONSTANT ||
old_details.type() == FIELD);
- Object* value = old_details.type() == CONSTANT
+ Object* raw_value = old_details.type() == CONSTANT
? old_descriptors->GetValue(i)
- : RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
+ : object->RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
+ Handle<Object> value(raw_value, isolate);
if (FLAG_track_double_fields &&
!old_details.representation().IsDouble() &&
details.representation().IsDouble()) {
- if (old_details.representation().IsNone()) value = Smi::FromInt(0);
- // Objects must be allocated in the old object space, since the
- // overall number of HeapNumbers needed for the conversion might
- // exceed the capacity of new space, and we would fail repeatedly
- // trying to migrate the instance.
- MaybeObject* maybe_storage =
- value->AllocateNewStorageFor(heap, details.representation(), TENURED);
- if (!maybe_storage->To(&value)) return maybe_storage;
+ if (old_details.representation().IsNone()) {
+ value = handle(Smi::FromInt(0), isolate);
+ }
+ value = NewStorageFor(isolate, value, details.representation());
}
ASSERT(!(FLAG_track_double_fields &&
details.representation().IsDouble() &&
value->IsSmi()));
int target_index = new_descriptors->GetFieldIndex(i) - inobject;
if (target_index < 0) target_index += total_size;
- array->set(target_index, value);
+ array->set(target_index, *value);
}
- // From here on we cannot fail anymore.
+ // From here on we cannot fail and we shouldn't GC anymore.
+ DisallowHeapAllocation no_allocation;
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
int limit = Min(inobject, number_of_fields);
for (int i = 0; i < limit; i++) {
- FastPropertyAtPut(i, array->get(external + i));
+ object->FastPropertyAtPut(i, array->get(external + i));
}
// Create filler object past the new instance size.
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_map->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- Address address = this->address() + new_instance_size;
- heap->CreateFillerObjectAt(address, instance_size_delta);
+ Address address = object->address() + new_instance_size;
+ isolate->heap()->CreateFillerObjectAt(address, instance_size_delta);
// If there are properties in the new backing store, trim it to the correct
// size and install the backing store into the object.
if (external > 0) {
- RightTrimFixedArray<FROM_MUTATOR>(heap, array, inobject);
- set_properties(array);
+ RightTrimFixedArray<FROM_MUTATOR>(isolate->heap(), *array, inobject);
+ object->set_properties(*array);
}
- set_map(new_map);
-
- return this;
+ object->set_map(*new_map);
}
-MaybeObject* JSObject::GeneralizeFieldRepresentation(
- int modify_index,
- Representation new_representation,
- StoreMode store_mode) {
- Map* new_map;
- MaybeObject* maybe_new_map = map()->GeneralizeRepresentation(
- modify_index, new_representation, store_mode);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- if (map() == new_map) return this;
+Handle<TransitionArray> Map::AddTransition(Handle<Map> map,
+ Handle<Name> key,
+ Handle<Map> target,
+ SimpleTransitionFlag flag) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->AddTransition(*key, *target, flag),
+ TransitionArray);
+}
+
- return MigrateToMap(new_map);
+void JSObject::GeneralizeFieldRepresentation(Handle<JSObject> object,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode) {
+ Handle<Map> new_map = Map::GeneralizeRepresentation(
+ handle(object->map()), modify_index, new_representation, store_mode);
+ if (object->map() == *new_map) return;
+ return MigrateToMap(object, new_map);
}
@@ -2385,14 +2510,12 @@ int Map::NumberOfFields() {
}
-MaybeObject* Map::CopyGeneralizeAllRepresentations(
- int modify_index,
- StoreMode store_mode,
- PropertyAttributes attributes,
- const char* reason) {
- Map* new_map;
- MaybeObject* maybe_map = this->Copy();
- if (!maybe_map->To(&new_map)) return maybe_map;
+Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
+ int modify_index,
+ StoreMode store_mode,
+ PropertyAttributes attributes,
+ const char* reason) {
+ Handle<Map> new_map = Copy(map);
DescriptorArray* descriptors = new_map->instance_descriptors();
descriptors->InitializeRepresentations(Representation::Tagged());
@@ -2414,7 +2537,7 @@ MaybeObject* Map::CopyGeneralizeAllRepresentations(
}
if (FLAG_trace_generalization) {
- PrintGeneralization(stdout, reason, modify_index,
+ map->PrintGeneralization(stdout, reason, modify_index,
new_map->NumberOfOwnDescriptors(),
new_map->NumberOfOwnDescriptors(),
details.type() == CONSTANT && store_mode == FORCE_FIELD,
@@ -2458,7 +2581,7 @@ void Map::DeprecateTarget(Name* key, DescriptorArray* new_descriptors) {
DescriptorArray* to_replace = instance_descriptors();
Map* current = this;
while (current->instance_descriptors() == to_replace) {
- current->SetEnumLength(Map::kInvalidEnumCache);
+ current->SetEnumLength(kInvalidEnumCacheSentinel);
current->set_instance_descriptors(new_descriptors);
Object* next = current->GetBackPointer();
if (next->IsUndefined()) break;
@@ -2562,11 +2685,11 @@ Map* Map::FindLastMatchMap(int verbatim,
// - If |updated| == |split_map|, |updated| is in the expected state. Return it.
// - Otherwise, invalidate the outdated transition target from |updated|, and
// replace its transition tree with a new branch for the updated descriptors.
-MaybeObject* Map::GeneralizeRepresentation(int modify_index,
- Representation new_representation,
- StoreMode store_mode) {
- Map* old_map = this;
- DescriptorArray* old_descriptors = old_map->instance_descriptors();
+Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode) {
+ Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
Representation old_representation = old_details.representation();
@@ -2582,37 +2705,37 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
}
int descriptors = old_map->NumberOfOwnDescriptors();
- Map* root_map = old_map->FindRootMap();
+ Handle<Map> root_map(old_map->FindRootMap());
// Check the state of the root map.
- if (!old_map->EquivalentToForTransition(root_map)) {
- return CopyGeneralizeAllRepresentations(
- modify_index, store_mode, old_details.attributes(), "not equivalent");
+ if (!old_map->EquivalentToForTransition(*root_map)) {
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ old_details.attributes(), "not equivalent");
}
int verbatim = root_map->NumberOfOwnDescriptors();
if (store_mode != ALLOW_AS_CONSTANT && modify_index < verbatim) {
- return CopyGeneralizeAllRepresentations(
- modify_index, store_mode,
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
old_details.attributes(), "root modification");
}
- Map* updated = root_map->FindUpdatedMap(
- verbatim, descriptors, old_descriptors);
- if (updated == NULL) {
- return CopyGeneralizeAllRepresentations(
- modify_index, store_mode, old_details.attributes(), "incompatible");
+ Map* raw_updated = root_map->FindUpdatedMap(
+ verbatim, descriptors, *old_descriptors);
+ if (raw_updated == NULL) {
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ old_details.attributes(), "incompatible");
}
- DescriptorArray* updated_descriptors = updated->instance_descriptors();
+ Handle<Map> updated(raw_updated);
+ Handle<DescriptorArray> updated_descriptors(updated->instance_descriptors());
int valid = updated->NumberOfOwnDescriptors();
// Directly change the map if the target map is more general. Ensure that the
// target type of the modify_index is a FIELD, unless we are migrating.
if (updated_descriptors->IsMoreGeneralThan(
- verbatim, valid, descriptors, old_descriptors) &&
+ verbatim, valid, descriptors, *old_descriptors) &&
(store_mode == ALLOW_AS_CONSTANT ||
updated_descriptors->GetDetails(modify_index).type() == FIELD)) {
Representation updated_representation =
@@ -2620,10 +2743,9 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
if (new_representation.fits_into(updated_representation)) return updated;
}
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors = updated_descriptors->Merge(
- verbatim, valid, descriptors, modify_index, store_mode, old_descriptors);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::Merge(
+ updated_descriptors, verbatim, valid, descriptors, modify_index,
+ store_mode, old_descriptors);
ASSERT(store_mode == ALLOW_AS_CONSTANT ||
new_descriptors->GetDetails(modify_index).type() == FIELD);
@@ -2635,8 +2757,8 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
new_descriptors->SetRepresentation(modify_index, updated_representation);
}
- Map* split_map = root_map->FindLastMatchMap(
- verbatim, descriptors, new_descriptors);
+ Handle<Map> split_map(root_map->FindLastMatchMap(
+ verbatim, descriptors, *new_descriptors));
int split_descriptors = split_map->NumberOfOwnDescriptors();
// This is shadowed by |updated_descriptors| being more general than
@@ -2645,29 +2767,20 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
int descriptor = split_descriptors;
split_map->DeprecateTarget(
- old_descriptors->GetKey(descriptor), new_descriptors);
+ old_descriptors->GetKey(descriptor), *new_descriptors);
if (FLAG_trace_generalization) {
- PrintGeneralization(
+ old_map->PrintGeneralization(
stdout, "", modify_index, descriptor, descriptors,
old_descriptors->GetDetails(modify_index).type() == CONSTANT &&
store_mode == FORCE_FIELD,
old_representation, updated_representation);
}
- Map* new_map = split_map;
// Add missing transitions.
+ Handle<Map> new_map = split_map;
for (; descriptor < descriptors; descriptor++) {
- MaybeObject* maybe_map = new_map->CopyInstallDescriptors(
- descriptor, new_descriptors);
- if (!maybe_map->To(&new_map)) {
- // Create a handle for the last created map to ensure it stays alive
- // during GC. Its descriptor array is too large, but it will be
- // overwritten during retry anyway.
- Handle<Map>(new_map);
- return maybe_map;
- }
- new_map->set_migration_target(true);
+ new_map = Map::CopyInstallDescriptors(new_map, descriptor, new_descriptors);
}
new_map->set_owns_descriptors(true);
@@ -2675,122 +2788,122 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
}
-Map* Map::CurrentMapForDeprecated() {
- DisallowHeapAllocation no_allocation;
- if (!is_deprecated()) return this;
+// Generalize the representation of all FIELD descriptors.
+Handle<Map> Map::GeneralizeAllFieldRepresentations(
+ Handle<Map> map,
+ Representation new_representation) {
+ Handle<DescriptorArray> descriptors(map->instance_descriptors());
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() == FIELD) {
+ map = GeneralizeRepresentation(map, i, new_representation, FORCE_FIELD);
+ }
+ }
+ return map;
+}
- DescriptorArray* old_descriptors = instance_descriptors();
- int descriptors = NumberOfOwnDescriptors();
- Map* root_map = FindRootMap();
+Handle<Map> Map::CurrentMapForDeprecated(Handle<Map> map) {
+ Handle<Map> proto_map(map);
+ while (proto_map->prototype()->IsJSObject()) {
+ Handle<JSObject> holder(JSObject::cast(proto_map->prototype()));
+ if (holder->map()->is_deprecated()) {
+ JSObject::TryMigrateInstance(holder);
+ }
+ proto_map = Handle<Map>(holder->map());
+ }
+ return CurrentMapForDeprecatedInternal(map);
+}
+
+
+Handle<Map> Map::CurrentMapForDeprecatedInternal(Handle<Map> map) {
+ if (!map->is_deprecated()) return map;
+
+ DisallowHeapAllocation no_allocation;
+ DescriptorArray* old_descriptors = map->instance_descriptors();
+
+ int descriptors = map->NumberOfOwnDescriptors();
+ Map* root_map = map->FindRootMap();
// Check the state of the root map.
- if (!EquivalentToForTransition(root_map)) return NULL;
+ if (!map->EquivalentToForTransition(root_map)) return Handle<Map>();
int verbatim = root_map->NumberOfOwnDescriptors();
Map* updated = root_map->FindUpdatedMap(
verbatim, descriptors, old_descriptors);
- if (updated == NULL) return NULL;
+ if (updated == NULL) return Handle<Map>();
DescriptorArray* updated_descriptors = updated->instance_descriptors();
int valid = updated->NumberOfOwnDescriptors();
if (!updated_descriptors->IsMoreGeneralThan(
verbatim, valid, descriptors, old_descriptors)) {
- return NULL;
+ return Handle<Map>();
}
- return updated;
+ return handle(updated);
}
-MaybeObject* JSObject::SetPropertyWithInterceptor(
- Name* name,
- Object* value,
+Handle<Object> JSObject::SetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return value;
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSObject> this_handle(this);
- Handle<String> name_handle(String::cast(name));
- Handle<Object> value_handle(value, isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
+ Isolate* isolate = object->GetIsolate();
+ Handle<String> name_string = Handle<String>::cast(name);
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
if (!interceptor->setter()->IsUndefined()) {
- LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", this, name));
- PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-set", *object, *name));
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *object, *object);
v8::NamedPropertySetterCallback setter =
v8::ToCData<v8::NamedPropertySetterCallback>(interceptor->setter());
- Handle<Object> value_unhole(value->IsTheHole() ?
- isolate->heap()->undefined_value() :
- value,
- isolate);
+ Handle<Object> value_unhole = value->IsTheHole()
+ ? Handle<Object>(isolate->factory()->undefined_value()) : value;
v8::Handle<v8::Value> result = args.Call(setter,
- v8::Utils::ToLocal(name_handle),
+ v8::Utils::ToLocal(name_string),
v8::Utils::ToLocal(value_unhole));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *value_handle;
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (!result.IsEmpty()) return value;
}
- MaybeObject* raw_result =
- this_handle->SetPropertyPostInterceptor(*name_handle,
- *value_handle,
- attributes,
- strict_mode);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
+ Handle<Object> result =
+ SetPropertyPostInterceptor(object, name, value, attributes, strict_mode);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return result;
}
Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
- Handle<Name> key,
+ Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetProperty(*key, *value, attributes, strict_mode),
- Object);
-}
-
-
-MaybeObject* JSReceiver::SetPropertyOrFail(
- Handle<JSReceiver> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode) {
- CALL_HEAP_FUNCTION_PASS_EXCEPTION(
- object->GetIsolate(),
- object->SetProperty(*key, *value, attributes, strict_mode, store_mode));
-}
-
-
-MaybeObject* JSReceiver::SetProperty(Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode) {
- LookupResult result(GetIsolate());
- LocalLookup(name, &result, true);
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
+ LookupResult result(object->GetIsolate());
+ object->LocalLookup(*name, &result, true);
if (!result.IsFound()) {
- map()->LookupTransition(JSObject::cast(this), name, &result);
+ object->map()->LookupTransition(JSObject::cast(*object), *name, &result);
}
- return SetProperty(&result, name, value, attributes, strict_mode, store_mode);
+ return SetProperty(object, &result, name, value, attributes, strict_mode,
+ store_mode);
}
-MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
- Name* name,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSObject::SetPropertyWithCallback(Handle<JSObject> object,
+ Handle<Object> structure,
+ Handle<Name> name,
+ Handle<Object> value,
+ Handle<JSObject> holder,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value, isolate);
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -2798,26 +2911,27 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
if (structure->IsForeign()) {
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->foreign_address());
- MaybeObject* obj = (callback->setter)(
- isolate, this, value, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (obj->IsFailure()) return obj;
- return *value_handle;
+ Handle<Foreign>::cast(structure)->foreign_address());
+ CALL_AND_RETRY_OR_DIE(isolate,
+ (callback->setter)(
+ isolate, *object, *value, callback->data),
+ break,
+ return Handle<Object>());
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
if (structure->IsExecutableAccessorInfo()) {
// api style callbacks
- ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
- if (!data->IsCompatibleReceiver(this)) {
- Handle<Object> name_handle(name, isolate);
- Handle<Object> receiver_handle(this, isolate);
- Handle<Object> args[2] = { name_handle, receiver_handle };
+ ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(*structure);
+ if (!data->IsCompatibleReceiver(*object)) {
+ Handle<Object> args[2] = { name, object };
Handle<Object> error =
isolate->factory()->NewTypeError("incompatible_method_receiver",
HandleVector(args,
ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return value;
@@ -2825,32 +2939,33 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
v8::AccessorSetterCallback call_fun =
v8::ToCData<v8::AccessorSetterCallback>(call_obj);
if (call_fun == NULL) return value;
- Handle<String> key(String::cast(name));
- LOG(isolate, ApiNamedPropertyAccess("store", this, name));
+ Handle<String> key = Handle<String>::cast(name);
+ LOG(isolate, ApiNamedPropertyAccess("store", *object, *name));
PropertyCallbackArguments args(
- isolate, data->data(), this, JSObject::cast(holder));
+ isolate, data->data(), *object, JSObject::cast(*holder));
args.Call(call_fun,
v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
+ v8::Utils::ToLocal(value));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
if (structure->IsAccessorPair()) {
- Object* setter = AccessorPair::cast(structure)->setter();
+ Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
if (setter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value);
+ return SetPropertyWithDefinedSetter(
+ object, Handle<JSReceiver>::cast(setter), value);
} else {
if (strict_mode == kNonStrictMode) {
return value;
}
- Handle<Name> key(name);
- Handle<Object> holder_handle(holder, isolate);
- Handle<Object> args[2] = { key, holder_handle };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
+ Handle<Object> args[2] = { name, holder };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("no_setter_in_callback",
+ HandleVector(args, 2));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
@@ -2860,91 +2975,91 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
}
UNREACHABLE();
- return NULL;
+ return Handle<Object>();
}
-MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
- Object* value) {
- Isolate* isolate = GetIsolate();
- Handle<Object> value_handle(value, isolate);
- Handle<JSReceiver> fun(setter, isolate);
- Handle<JSReceiver> self(this, isolate);
+Handle<Object> JSReceiver::SetPropertyWithDefinedSetter(
+ Handle<JSReceiver> object,
+ Handle<JSReceiver> setter,
+ Handle<Object> value) {
+ Isolate* isolate = object->GetIsolate();
+
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate->debug();
// Handle stepping into a setter if step into is active.
// TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->StepInActive() && fun->IsJSFunction()) {
+ if (debug->StepInActive() && setter->IsJSFunction()) {
debug->HandleStepIn(
- Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
+ Handle<JSFunction>::cast(setter), Handle<Object>::null(), 0, false);
}
#endif
+
bool has_pending_exception;
- Handle<Object> argv[] = { value_handle };
+ Handle<Object> argv[] = { value };
Execution::Call(
- isolate, fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
+ isolate, setter, object, ARRAY_SIZE(argv), argv, &has_pending_exception);
// Check for pending exception and return the result.
- if (has_pending_exception) return Failure::Exception();
- return *value_handle;
+ if (has_pending_exception) return Handle<Object>();
+ return value;
}
-MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
+Handle<Object> JSObject::SetElementWithCallbackSetterInPrototypes(
+ Handle<JSObject> object,
uint32_t index,
- Object* value,
+ Handle<Object> value,
bool* found,
StrictModeFlag strict_mode) {
- Heap* heap = GetHeap();
- for (Object* pt = GetPrototype();
- pt != heap->null_value();
- pt = pt->GetPrototype(GetIsolate())) {
- if (pt->IsJSProxy()) {
- String* name;
- MaybeObject* maybe = heap->Uint32ToString(index);
- if (!maybe->To<String>(&name)) {
- *found = true; // Force abort
- return maybe;
- }
- return JSProxy::cast(pt)->SetPropertyViaPrototypesWithHandler(
- this, name, value, NONE, strict_mode, found);
- }
- if (!JSObject::cast(pt)->HasDictionaryElements()) {
+ Isolate *isolate = object->GetIsolate();
+ for (Handle<Object> proto = handle(object->GetPrototype(), isolate);
+ !proto->IsNull();
+ proto = handle(proto->GetPrototype(isolate), isolate)) {
+ if (proto->IsJSProxy()) {
+ return JSProxy::SetPropertyViaPrototypesWithHandler(
+ Handle<JSProxy>::cast(proto),
+ object,
+ isolate->factory()->Uint32ToString(index), // name
+ value,
+ NONE,
+ strict_mode,
+ found);
+ }
+ Handle<JSObject> js_proto = Handle<JSObject>::cast(proto);
+ if (!js_proto->HasDictionaryElements()) {
continue;
}
- SeededNumberDictionary* dictionary =
- JSObject::cast(pt)->element_dictionary();
+ Handle<SeededNumberDictionary> dictionary(js_proto->element_dictionary());
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
*found = true;
- return SetElementWithCallback(dictionary->ValueAt(entry),
- index,
- value,
- JSObject::cast(pt),
+ Handle<Object> structure(dictionary->ValueAt(entry), isolate);
+ return SetElementWithCallback(object, structure, index, value, js_proto,
strict_mode);
}
}
}
*found = false;
- return heap->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
-MaybeObject* JSObject::SetPropertyViaPrototypes(
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool* done) {
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
+
+Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool* done) {
+ Isolate* isolate = object->GetIsolate();
*done = false;
// We could not find a local property so let's check whether there is an
// accessor that wants to handle the property, or whether the property is
// read-only on the prototype chain.
LookupResult result(isolate);
- LookupRealNamedPropertyInPrototypes(name, &result);
+ object->LookupRealNamedPropertyInPrototypes(*name, &result);
if (result.IsFound()) {
switch (result.type()) {
case NORMAL:
@@ -2955,19 +3070,21 @@ MaybeObject* JSObject::SetPropertyViaPrototypes(
case INTERCEPTOR: {
PropertyAttributes attr =
result.holder()->GetPropertyAttributeWithInterceptor(
- this, name, true);
+ *object, *name, true);
*done = !!(attr & READ_ONLY);
break;
}
case CALLBACKS: {
if (!FLAG_es5_readonly && result.IsReadOnly()) break;
*done = true;
- return SetPropertyWithCallback(result.GetCallbackObject(),
- name, value, result.holder(), strict_mode);
+ Handle<Object> callback_object(result.GetCallbackObject(), isolate);
+ return SetPropertyWithCallback(object, callback_object, name, value,
+ handle(result.holder()), strict_mode);
}
case HANDLER: {
- return result.proxy()->SetPropertyViaPrototypesWithHandler(
- this, name, value, attributes, strict_mode, done);
+ Handle<JSProxy> proxy(result.proxy());
+ return JSProxy::SetPropertyViaPrototypesWithHandler(
+ proxy, object, name, value, attributes, strict_mode, done);
}
case TRANSITION:
case NONEXISTENT:
@@ -2980,12 +3097,13 @@ MaybeObject* JSObject::SetPropertyViaPrototypes(
if (!FLAG_es5_readonly) *done = false;
if (*done) {
if (strict_mode == kNonStrictMode) return value;
- Handle<Object> args[] = { Handle<Object>(name, isolate),
- Handle<Object>(this, isolate)};
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
+ Handle<Object> args[] = { name, object };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
- return heap->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
@@ -3340,14 +3458,15 @@ void JSObject::LookupRealNamedPropertyInPrototypes(Name* name,
// We only need to deal with CALLBACKS and INTERCEPTORS
-MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
+Handle<Object> JSObject::SetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
LookupResult* result,
- Name* name,
- Object* value,
+ Handle<Name> name,
+ Handle<Object> value,
bool check_prototype,
StrictModeFlag strict_mode) {
if (check_prototype && !result->IsProperty()) {
- LookupRealNamedPropertyInPrototypes(name, result);
+ object->LookupRealNamedPropertyInPrototypes(*name, result);
}
if (result->IsProperty()) {
@@ -3356,21 +3475,23 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
case CALLBACKS: {
Object* obj = result->GetCallbackObject();
if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
+ Handle<AccessorInfo> info(AccessorInfo::cast(obj));
if (info->all_can_write()) {
- return SetPropertyWithCallback(result->GetCallbackObject(),
+ return SetPropertyWithCallback(object,
+ info,
name,
value,
- result->holder(),
+ handle(result->holder()),
strict_mode);
}
} else if (obj->IsAccessorPair()) {
- AccessorPair* pair = AccessorPair::cast(obj);
+ Handle<AccessorPair> pair(AccessorPair::cast(obj));
if (pair->all_can_read()) {
- return SetPropertyWithCallback(result->GetCallbackObject(),
+ return SetPropertyWithCallback(object,
+ pair,
name,
value,
- result->holder(),
+ handle(result->holder()),
strict_mode);
}
}
@@ -3379,10 +3500,11 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
case INTERCEPTOR: {
// Try lookup real named properties. Note that only property can be
// set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
- LookupResult r(GetIsolate());
- LookupRealNamedProperty(name, &r);
+ LookupResult r(object->GetIsolate());
+ object->LookupRealNamedProperty(*name, &r);
if (r.IsProperty()) {
- return SetPropertyWithFailedAccessCheck(&r,
+ return SetPropertyWithFailedAccessCheck(object,
+ &r,
name,
value,
check_prototype,
@@ -3397,42 +3519,38 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
}
}
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> value_handle(value, isolate);
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
+ Isolate* isolate = object->GetIsolate();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
-MaybeObject* JSReceiver::SetProperty(LookupResult* result,
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode) {
+Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
+ LookupResult* result,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
if (result->IsHandler()) {
- return result->proxy()->SetPropertyWithHandler(
- this, key, value, attributes, strict_mode);
+ return JSProxy::SetPropertyWithHandler(handle(result->proxy()),
+ object, key, value, attributes, strict_mode);
} else {
- return JSObject::cast(this)->SetPropertyForResult(
+ return JSObject::SetPropertyForResult(Handle<JSObject>::cast(object),
result, key, value, attributes, strict_mode, store_mode);
}
}
-bool JSProxy::HasPropertyWithHandler(Name* name_raw) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> receiver(this, isolate);
- Handle<Object> name(name_raw, isolate);
+bool JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name) {
+ Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) return false;
Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
+ Handle<Object> result = proxy->CallTrap(
"has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return false;
@@ -3440,58 +3558,51 @@ bool JSProxy::HasPropertyWithHandler(Name* name_raw) {
}
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
- JSReceiver* receiver_raw,
- Name* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<Object> name(name_raw, isolate);
- Handle<Object> value(value_raw, isolate);
+Handle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return *value;
+ if (name->IsSymbol()) return value;
Handle<Object> args[] = { receiver, name, value };
- CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ proxy->CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
+ if (isolate->has_pending_exception()) return Handle<Object>();
- return *value;
+ return value;
}
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
- JSReceiver* receiver_raw,
- Name* name_raw,
- Object* value_raw,
+Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool* done) {
- Isolate* isolate = GetIsolate();
- Handle<JSProxy> proxy(this);
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<Name> name(name_raw);
- Handle<Object> value(value_raw, isolate);
- Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy.
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy.
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) {
*done = false;
- return isolate->heap()->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
*done = true; // except where redefined...
Handle<Object> args[] = { name };
Handle<Object> result = proxy->CallTrap(
"getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ if (isolate->has_pending_exception()) return Handle<Object>();
if (result->IsUndefined()) {
*done = false;
- return isolate->heap()->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
// Emulate [[GetProperty]] semantics for proxies.
@@ -3500,7 +3611,7 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<Object> desc = Execution::Call(
isolate, isolate->to_complete_property_descriptor(), result,
ARRAY_SIZE(argv), argv, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
+ if (has_pending_exception) return Handle<Object>();
// [[GetProperty]] requires to check that all properties are configurable.
Handle<String> configurable_name =
@@ -3517,7 +3628,8 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<Object> args[] = { handler, trap, name };
Handle<Object> error = isolate->factory()->NewTypeError(
"proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
ASSERT(configurable->IsTrue());
@@ -3538,12 +3650,13 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
ASSERT(!isolate->has_pending_exception());
ASSERT(writable->IsTrue() || writable->IsFalse());
*done = writable->IsFalse();
- if (!*done) return GetHeap()->the_hole_value();
- if (strict_mode == kNonStrictMode) return *value;
+ if (!*done) return isolate->factory()->the_hole_value();
+ if (strict_mode == kNonStrictMode) return value;
Handle<Object> args[] = { name, receiver };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// We have an AccessorDescriptor.
@@ -3553,15 +3666,16 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
ASSERT(!isolate->has_pending_exception());
if (!setter->IsUndefined()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return receiver->SetPropertyWithDefinedSetter(
- JSReceiver::cast(*setter), *value);
+ return SetPropertyWithDefinedSetter(
+ receiver, Handle<JSReceiver>::cast(setter), value);
}
- if (strict_mode == kNonStrictMode) return *value;
+ if (strict_mode == kNonStrictMode) return value;
Handle<Object> args2[] = { name, proxy };
Handle<Object> error = isolate->factory()->NewTypeError(
"no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
@@ -3682,7 +3796,7 @@ void JSProxy::Fix(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
// Save identity hash.
- Handle<Object> hash = JSProxy::GetIdentityHash(proxy, OMIT_CREATION);
+ Handle<Object> hash(proxy->GetIdentityHash(), isolate);
if (proxy->IsJSFunctionProxy()) {
isolate->factory()->BecomeJSFunction(proxy);
@@ -3694,7 +3808,8 @@ void JSProxy::Fix(Handle<JSProxy> proxy) {
// Inherit identity, if it was present.
if (hash->IsSmi()) {
- JSObject::SetIdentityHash(Handle<JSObject>::cast(proxy), Smi::cast(*hash));
+ JSObject::SetIdentityHash(Handle<JSObject>::cast(proxy),
+ Handle<Smi>::cast(hash));
}
}
@@ -3726,44 +3841,75 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
}
-void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->AllocateStorageForMap(*map));
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Map> MapAsElementsKind(Handle<Map> map, ElementsKind kind) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(), map->AsElementsKind(kind), Map);
}
-void JSObject::MigrateInstance(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->MigrateInstance());
+void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
+ ASSERT(object->map()->inobject_properties() == map->inobject_properties());
+ ElementsKind obj_kind = object->map()->elements_kind();
+ ElementsKind map_kind = map->elements_kind();
+ if (map_kind != obj_kind) {
+ ElementsKind to_kind = map_kind;
+ if (IsMoreGeneralElementsKindTransition(map_kind, obj_kind) ||
+ IsDictionaryElementsKind(obj_kind)) {
+ to_kind = obj_kind;
+ }
+ if (IsDictionaryElementsKind(to_kind)) {
+ NormalizeElements(object);
+ } else {
+ TransitionElementsKind(object, to_kind);
+ }
+ map = MapAsElementsKind(map, to_kind);
+ }
+ int total_size =
+ map->NumberOfOwnDescriptors() + map->unused_property_fields();
+ int out_of_object = total_size - map->inobject_properties();
+ if (out_of_object != object->properties()->length()) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<FixedArray> new_properties = isolate->factory()->CopySizeFixedArray(
+ handle(object->properties()), out_of_object);
+ object->set_properties(*new_properties);
+ }
+ object->set_map(*map);
}
-Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->MigrateInstance(),
- Object);
+void JSObject::MigrateInstance(Handle<JSObject> object) {
+ // Converting any field to the most specific type will cause the
+ // GeneralizeFieldRepresentation algorithm to create the most general existing
+ // transition that matches the object. This achieves what is needed.
+ Handle<Map> original_map(object->map());
+ GeneralizeFieldRepresentation(
+ object, 0, Representation::None(), ALLOW_AS_CONSTANT);
+ object->map()->set_migration_target(true);
+ if (FLAG_trace_migration) {
+ object->PrintInstanceMigration(stdout, *original_map, object->map());
+ }
}
-Handle<Map> Map::GeneralizeRepresentation(Handle<Map> map,
- int modify_index,
- Representation representation,
- StoreMode store_mode) {
- CALL_HEAP_FUNCTION(
- map->GetIsolate(),
- map->GeneralizeRepresentation(modify_index, representation, store_mode),
- Map);
+Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
+ Handle<Map> original_map(object->map());
+ Handle<Map> new_map = Map::CurrentMapForDeprecatedInternal(original_map);
+ if (new_map.is_null()) return Handle<Object>();
+ JSObject::MigrateToMap(object, new_map);
+ if (FLAG_trace_migration) {
+ object->PrintInstanceMigration(stdout, *original_map, object->map());
+ }
+ return object;
}
-static MaybeObject* SetPropertyUsingTransition(LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes) {
- Map* transition_map = lookup->GetTransitionTarget();
+Handle<Object> JSObject::SetPropertyUsingTransition(
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Handle<Map> transition_map(lookup->GetTransitionTarget());
int descriptor = transition_map->LastAdded();
DescriptorArray* descriptors = transition_map->instance_descriptors();
@@ -3773,8 +3919,8 @@ static MaybeObject* SetPropertyUsingTransition(LookupResult* lookup,
// AddProperty will either normalize the object, or create a new fast copy
// of the map. If we get a fast copy of the map, all field representations
// will be tagged since the transition is omitted.
- return lookup->holder()->AddProperty(
- *name, *value, attributes, kNonStrictMode,
+ return JSObject::AddProperty(
+ object, name, value, attributes, kNonStrictMode,
JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED,
JSReceiver::OMIT_EXTENSIBILITY_CHECK,
JSObject::FORCE_TAGGED, FORCE_FIELD, OMIT_TRANSITION);
@@ -3785,45 +3931,41 @@ static MaybeObject* SetPropertyUsingTransition(LookupResult* lookup,
// (value->IsUninitialized) as constant.
if (details.type() == CONSTANT &&
descriptors->GetValue(descriptor) == *value) {
- lookup->holder()->set_map(transition_map);
- return *value;
+ object->set_map(*transition_map);
+ return value;
}
Representation representation = details.representation();
if (!value->FitsRepresentation(representation) ||
details.type() == CONSTANT) {
- MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
+ transition_map = Map::GeneralizeRepresentation(transition_map,
descriptor, value->OptimalRepresentation(), FORCE_FIELD);
- if (!maybe_map->To(&transition_map)) return maybe_map;
Object* back = transition_map->GetBackPointer();
if (back->IsMap()) {
- MaybeObject* maybe_failure =
- lookup->holder()->MigrateToMap(Map::cast(back));
- if (maybe_failure->IsFailure()) return maybe_failure;
+ MigrateToMap(object, handle(Map::cast(back)));
}
descriptors = transition_map->instance_descriptors();
representation = descriptors->GetDetails(descriptor).representation();
}
int field_index = descriptors->GetFieldIndex(descriptor);
- return lookup->holder()->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index, representation);
+ AddFastPropertyUsingMap(
+ object, transition_map, name, value, field_index, representation);
+ return value;
}
-static MaybeObject* SetPropertyToField(LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value) {
+static void SetPropertyToField(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value) {
Representation representation = lookup->representation();
if (!value->FitsRepresentation(representation) ||
lookup->type() == CONSTANT) {
- MaybeObject* maybe_failure =
- lookup->holder()->GeneralizeFieldRepresentation(
- lookup->GetDescriptorIndex(),
- value->OptimalRepresentation(),
- FORCE_FIELD);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ JSObject::GeneralizeFieldRepresentation(handle(lookup->holder()),
+ lookup->GetDescriptorIndex(),
+ value->OptimalRepresentation(),
+ FORCE_FIELD);
DescriptorArray* desc = lookup->holder()->map()->instance_descriptors();
int descriptor = lookup->GetDescriptorIndex();
representation = desc->GetDetails(descriptor).representation();
@@ -3833,222 +3975,182 @@ static MaybeObject* SetPropertyToField(LookupResult* lookup,
HeapNumber* storage = HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
lookup->GetFieldIndex().field_index()));
storage->set_value(value->Number());
- return *value;
+ return;
}
lookup->holder()->FastPropertyAtPut(
lookup->GetFieldIndex().field_index(), *value);
- return *value;
}
-static MaybeObject* ConvertAndSetLocalProperty(LookupResult* lookup,
- Name* name,
- Object* value,
- PropertyAttributes attributes) {
- JSObject* object = lookup->holder();
+static void ConvertAndSetLocalProperty(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Handle<JSObject> object(lookup->holder());
if (object->TooManyFastProperties()) {
- MaybeObject* maybe_failure = object->NormalizeProperties(
- CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
}
if (!object->HasFastProperties()) {
- return object->ReplaceSlowProperty(name, value, attributes);
+ ReplaceSlowProperty(object, name, value, attributes);
+ return;
}
int descriptor_index = lookup->GetDescriptorIndex();
if (lookup->GetAttributes() == attributes) {
- MaybeObject* maybe_failure = object->GeneralizeFieldRepresentation(
- descriptor_index, Representation::Tagged(), FORCE_FIELD);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ JSObject::GeneralizeFieldRepresentation(
+ object, descriptor_index, Representation::Tagged(), FORCE_FIELD);
} else {
- Map* map;
- MaybeObject* maybe_map = object->map()->CopyGeneralizeAllRepresentations(
+ Handle<Map> old_map(object->map());
+ Handle<Map> new_map = Map::CopyGeneralizeAllRepresentations(old_map,
descriptor_index, FORCE_FIELD, attributes, "attributes mismatch");
- if (!maybe_map->To(&map)) return maybe_map;
- MaybeObject* maybe_failure = object->MigrateToMap(map);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ JSObject::MigrateToMap(object, new_map);
}
DescriptorArray* descriptors = object->map()->instance_descriptors();
int index = descriptors->GetDetails(descriptor_index).field_index();
- object->FastPropertyAtPut(index, value);
- return value;
+ object->FastPropertyAtPut(index, *value);
}
-static MaybeObject* SetPropertyToFieldWithAttributes(
- LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes) {
+static void SetPropertyToFieldWithAttributes(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
if (lookup->GetAttributes() == attributes) {
- if (value->IsUninitialized()) return *value;
- return SetPropertyToField(lookup, name, value);
+ if (value->IsUninitialized()) return;
+ SetPropertyToField(lookup, name, value);
} else {
- return ConvertAndSetLocalProperty(lookup, *name, *value, attributes);
+ ConvertAndSetLocalProperty(lookup, name, value, attributes);
}
}
-MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
- Name* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode) {
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
+Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
+ Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChangeWithHandleScope ncc;
+ AssertNoContextChange ncc(isolate);
// Optimization for 2-byte strings often used as keys in a decompression
// dictionary. We internalize these short keys to avoid constantly
// reallocating them.
- if (name_raw->IsString() && !name_raw->IsInternalizedString() &&
- String::cast(name_raw)->length() <= 2) {
- Object* internalized_version;
- { MaybeObject* maybe_string_version =
- heap->InternalizeString(String::cast(name_raw));
- if (maybe_string_version->ToObject(&internalized_version)) {
- name_raw = String::cast(internalized_version);
- }
- }
+ if (name->IsString() && !name->IsInternalizedString() &&
+ Handle<String>::cast(name)->length() <= 2) {
+ name = isolate->factory()->InternalizeString(Handle<String>::cast(name));
}
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(
- lookup, name_raw, value_raw, true, strict_mode);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(object, lookup, name, value,
+ true, strict_mode);
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetPropertyForResult(
- lookup, name_raw, value_raw, attributes, strict_mode, store_mode);
+ return SetPropertyForResult(Handle<JSObject>::cast(proto),
+ lookup, name, value, attributes, strict_mode, store_mode);
}
- ASSERT(!lookup->IsFound() || lookup->holder() == this ||
+ ASSERT(!lookup->IsFound() || lookup->holder() == *object ||
lookup->holder()->map()->is_hidden_prototype());
- // From this point on everything needs to be handlified, because
- // SetPropertyViaPrototypes might call back into JavaScript.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<Name> name(name_raw);
- Handle<Object> value(value_raw, isolate);
-
- if (!lookup->IsProperty() && !self->IsJSContextExtensionObject()) {
+ if (!lookup->IsProperty() && !object->IsJSContextExtensionObject()) {
bool done = false;
- MaybeObject* result_object = self->SetPropertyViaPrototypes(
- *name, *value, attributes, strict_mode, &done);
+ Handle<Object> result_object = SetPropertyViaPrototypes(
+ object, name, value, attributes, strict_mode, &done);
if (done) return result_object;
}
if (!lookup->IsFound()) {
// Neither properties nor transitions found.
- return self->AddProperty(
- *name, *value, attributes, strict_mode, store_mode);
+ return AddProperty(
+ object, name, value, attributes, strict_mode, store_mode);
}
if (lookup->IsProperty() && lookup->IsReadOnly()) {
if (strict_mode == kStrictMode) {
- Handle<Object> args[] = { name, self };
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
+ Handle<Object> args[] = { name, object };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
} else {
- return *value;
+ return value;
}
}
- Handle<Object> old_value(heap->the_hole_value(), isolate);
- if (FLAG_harmony_observation &&
- map()->is_observed() && lookup->IsDataProperty()) {
- old_value = Object::GetProperty(self, name);
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ bool is_observed = FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string();
+ if (is_observed && lookup->IsDataProperty()) {
+ old_value = Object::GetProperty(object, name);
}
// This is a real property that is not read-only, or it is a
// transition or null descriptor and there are no setters in the prototypes.
- MaybeObject* result = *value;
+ Handle<Object> result = value;
switch (lookup->type()) {
case NORMAL:
- result = lookup->holder()->SetNormalizedProperty(lookup, *value);
+ SetNormalizedProperty(handle(lookup->holder()), lookup, value);
break;
case FIELD:
- result = SetPropertyToField(lookup, name, value);
+ SetPropertyToField(lookup, name, value);
break;
case CONSTANT:
// Only replace the constant if necessary.
- if (*value == lookup->GetConstant()) return *value;
- result = SetPropertyToField(lookup, name, value);
+ if (*value == lookup->GetConstant()) return value;
+ SetPropertyToField(lookup, name, value);
break;
case CALLBACKS: {
- Object* callback_object = lookup->GetCallbackObject();
- return self->SetPropertyWithCallback(
- callback_object, *name, *value, lookup->holder(), strict_mode);
+ Handle<Object> callback_object(lookup->GetCallbackObject(), isolate);
+ return SetPropertyWithCallback(object, callback_object, name, value,
+ handle(lookup->holder()), strict_mode);
}
case INTERCEPTOR:
- result = lookup->holder()->SetPropertyWithInterceptor(
- *name, *value, attributes, strict_mode);
+ result = SetPropertyWithInterceptor(handle(lookup->holder()), name, value,
+ attributes, strict_mode);
break;
- case TRANSITION: {
- result = SetPropertyUsingTransition(lookup, name, value, attributes);
+ case TRANSITION:
+ result = SetPropertyUsingTransition(handle(lookup->holder()), lookup,
+ name, value, attributes);
break;
- }
case HANDLER:
case NONEXISTENT:
UNREACHABLE();
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
- if (FLAG_harmony_observation && self->map()->is_observed()) {
+ if (is_observed) {
if (lookup->IsTransition()) {
- EnqueueChangeRecord(self, "new", name, old_value);
+ EnqueueChangeRecord(object, "add", name, old_value);
} else {
LookupResult new_lookup(isolate);
- self->LocalLookup(*name, &new_lookup, true);
+ object->LocalLookup(*name, &new_lookup, true);
if (new_lookup.IsDataProperty()) {
- Handle<Object> new_value = Object::GetProperty(self, name);
+ Handle<Object> new_value = Object::GetProperty(object, name);
if (!new_value->SameValue(*old_value)) {
- EnqueueChangeRecord(self, "updated", name, old_value);
+ EnqueueChangeRecord(object, "update", name, old_value);
}
}
}
}
- return *hresult;
-}
-
-
-MaybeObject* JSObject::SetLocalPropertyIgnoreAttributesTrampoline(
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- ValueType value_type,
- StoreMode mode,
- ExtensibilityCheck extensibility_check) {
- // TODO(mstarzinger): The trampoline is a giant hack, don't use it anywhere
- // else or handlification people will start hating you for all eternity.
- HandleScope scope(GetIsolate());
- IdempotentPointerToHandleCodeTrampoline trampoline(GetIsolate());
- return trampoline.CallWithReturnValue(
- &JSObject::SetLocalPropertyIgnoreAttributes,
- Handle<JSObject>(this),
- Handle<Name>(key),
- Handle<Object>(value, GetIsolate()),
- attributes,
- value_type,
- mode,
- extensibility_check);
+ return result;
}
@@ -4063,142 +4165,119 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributesTrampoline(
// doesn't handle function prototypes correctly.
Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
- Handle<Name> key,
+ Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
ValueType value_type,
StoreMode mode,
ExtensibilityCheck extensibility_check) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetLocalPropertyIgnoreAttributes(
- *key, *value, attributes, value_type, mode, extensibility_check),
- Object);
-}
-
+ Isolate* isolate = object->GetIsolate();
-MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
- Name* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- ValueType value_type,
- StoreMode mode,
- ExtensibilityCheck extensibility_check) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChangeWithHandleScope ncc;
- Isolate* isolate = GetIsolate();
+ AssertNoContextChange ncc(isolate);
+
LookupResult lookup(isolate);
- LocalLookup(name_raw, &lookup, true);
- if (!lookup.IsFound()) map()->LookupTransition(this, name_raw, &lookup);
+ object->LocalLookup(*name, &lookup, true);
+ if (!lookup.IsFound()) {
+ object->map()->LookupTransition(*object, *name, &lookup);
+ }
+
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(&lookup,
- name_raw,
- value_raw,
- false,
- kNonStrictMode);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(object, &lookup, name, value,
+ false, kNonStrictMode);
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetLocalPropertyIgnoreAttributes(
- name_raw,
- value_raw,
- attributes,
- value_type,
- mode,
- extensibility_check);
+ return SetLocalPropertyIgnoreAttributes(Handle<JSObject>::cast(proto),
+ name, value, attributes, value_type, mode, extensibility_check);
}
if (lookup.IsFound() &&
(lookup.type() == INTERCEPTOR || lookup.type() == CALLBACKS)) {
- LocalLookupRealNamedProperty(name_raw, &lookup);
+ object->LocalLookupRealNamedProperty(*name, &lookup);
}
// Check for accessor in prototype chain removed here in clone.
if (!lookup.IsFound()) {
+ object->map()->LookupTransition(*object, *name, &lookup);
+ TransitionFlag flag = lookup.IsFound()
+ ? OMIT_TRANSITION : INSERT_TRANSITION;
// Neither properties nor transitions found.
- return AddProperty(
- name_raw, value_raw, attributes, kNonStrictMode,
- MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode);
+ return AddProperty(object, name, value, attributes, kNonStrictMode,
+ MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode, flag);
}
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<Name> name(name_raw);
- Handle<Object> value(value_raw, isolate);
-
- Handle<Object> old_value(isolate->heap()->the_hole_value(), isolate);
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
PropertyAttributes old_attributes = ABSENT;
- bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
+ bool is_observed = FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string();
if (is_observed && lookup.IsProperty()) {
if (lookup.IsDataProperty()) old_value =
- Object::GetProperty(self, name);
+ Object::GetProperty(object, name);
old_attributes = lookup.GetAttributes();
}
// Check of IsReadOnly removed from here in clone.
- MaybeObject* result = *value;
switch (lookup.type()) {
case NORMAL:
- result = self->ReplaceSlowProperty(*name, *value, attributes);
+ ReplaceSlowProperty(object, name, value, attributes);
break;
case FIELD:
- result = SetPropertyToFieldWithAttributes(
- &lookup, name, value, attributes);
+ SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
break;
case CONSTANT:
// Only replace the constant if necessary.
if (lookup.GetAttributes() != attributes ||
*value != lookup.GetConstant()) {
- result = SetPropertyToFieldWithAttributes(
- &lookup, name, value, attributes);
+ SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
}
break;
case CALLBACKS:
- result = ConvertAndSetLocalProperty(&lookup, *name, *value, attributes);
+ ConvertAndSetLocalProperty(&lookup, name, value, attributes);
break;
- case TRANSITION:
- result = SetPropertyUsingTransition(&lookup, name, value, attributes);
+ case TRANSITION: {
+ Handle<Object> result = SetPropertyUsingTransition(
+ handle(lookup.holder()), &lookup, name, value, attributes);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
break;
+ }
case NONEXISTENT:
case HANDLER:
case INTERCEPTOR:
UNREACHABLE();
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
if (is_observed) {
if (lookup.IsTransition()) {
- EnqueueChangeRecord(self, "new", name, old_value);
+ EnqueueChangeRecord(object, "add", name, old_value);
} else if (old_value->IsTheHole()) {
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ EnqueueChangeRecord(object, "reconfigure", name, old_value);
} else {
LookupResult new_lookup(isolate);
- self->LocalLookup(*name, &new_lookup, true);
+ object->LocalLookup(*name, &new_lookup, true);
bool value_changed = false;
if (new_lookup.IsDataProperty()) {
- Handle<Object> new_value = Object::GetProperty(self, name);
+ Handle<Object> new_value = Object::GetProperty(object, name);
value_changed = !old_value->SameValue(*new_value);
}
if (new_lookup.GetAttributes() != old_attributes) {
if (!value_changed) old_value = isolate->factory()->the_hole_value();
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ EnqueueChangeRecord(object, "reconfigure", name, old_value);
} else if (value_changed) {
- EnqueueChangeRecord(self, "updated", name, old_value);
+ EnqueueChangeRecord(object, "update", name, old_value);
}
}
}
- return *hresult;
+ return value;
}
@@ -4235,7 +4314,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
Handle<JSObject> receiver_handle(receiver);
@@ -4370,7 +4449,7 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSReceiver> hreceiver(receiver);
@@ -4422,52 +4501,49 @@ PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor(
}
-MaybeObject* NormalizedMapCache::Get(JSObject* obj,
- PropertyNormalizationMode mode) {
- Isolate* isolate = obj->GetIsolate();
- Map* fast = obj->map();
- int index = fast->Hash() % kEntries;
- Object* result = get(index);
+Handle<Map> NormalizedMapCache::Get(Handle<NormalizedMapCache> cache,
+ Handle<JSObject> obj,
+ PropertyNormalizationMode mode) {
+ int index = obj->map()->Hash() % kEntries;
+ Handle<Object> result = handle(cache->get(index), cache->GetIsolate());
if (result->IsMap() &&
- Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
+ Handle<Map>::cast(result)->EquivalentToForNormalization(obj->map(),
+ mode)) {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- Map::cast(result)->SharedMapVerify();
+ Handle<Map>::cast(result)->SharedMapVerify();
}
#endif
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map bit-by-bit,
// except for the code cache, which can contain some ics which can be
// applied to the shared map.
- Object* fresh;
- MaybeObject* maybe_fresh =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (maybe_fresh->ToObject(&fresh)) {
- ASSERT(memcmp(Map::cast(fresh)->address(),
- Map::cast(result)->address(),
- Map::kCodeCacheOffset) == 0);
- STATIC_ASSERT(Map::kDependentCodeOffset ==
- Map::kCodeCacheOffset + kPointerSize);
- int offset = Map::kDependentCodeOffset + kPointerSize;
- ASSERT(memcmp(Map::cast(fresh)->address() + offset,
- Map::cast(result)->address() + offset,
- Map::kSize - offset) == 0);
- }
+ Handle<Map> fresh = Map::CopyNormalized(handle(obj->map()), mode,
+ SHARED_NORMALIZED_MAP);
+
+ ASSERT(memcmp(fresh->address(),
+ Handle<Map>::cast(result)->address(),
+ Map::kCodeCacheOffset) == 0);
+ STATIC_ASSERT(Map::kDependentCodeOffset ==
+ Map::kCodeCacheOffset + kPointerSize);
+ int offset = Map::kDependentCodeOffset + kPointerSize;
+ ASSERT(memcmp(fresh->address() + offset,
+ Handle<Map>::cast(result)->address() + offset,
+ Map::kSize - offset) == 0);
}
#endif
- return result;
+ return Handle<Map>::cast(result);
}
- { MaybeObject* maybe_result =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- ASSERT(Map::cast(result)->is_dictionary_map());
- set(index, result);
+ Isolate* isolate = cache->GetIsolate();
+ Handle<Map> map = Map::CopyNormalized(handle(obj->map()), mode,
+ SHARED_NORMALIZED_MAP);
+ ASSERT(map->is_dictionary_map());
+ cache->set(index, *map);
isolate->counters()->normalized_maps()->Increment();
- return result;
+ return map;
}
@@ -4483,16 +4559,6 @@ void HeapObject::UpdateMapCodeCache(Handle<HeapObject> object,
Handle<Name> name,
Handle<Code> code) {
Handle<Map> map(object->map());
- if (map->is_shared()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- // Fast case maps are never marked as shared.
- ASSERT(!receiver->HasFastProperties());
- // Replace the map with an identical copy that can be safely modified.
- map = Map::CopyNormalized(map, KEEP_INOBJECT_PROPERTIES,
- UNIQUE_NORMALIZED_MAP);
- receiver->GetIsolate()->counters()->normalized_maps()->Increment();
- receiver->set_map(*map);
- }
Map::UpdateCodeCache(map, name, code);
}
@@ -4500,65 +4566,55 @@ void HeapObject::UpdateMapCodeCache(Handle<HeapObject> object,
void JSObject::NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->NormalizeProperties(
- mode, expected_additional_properties));
-}
-
-
-MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
- int expected_additional_properties) {
- if (!HasFastProperties()) return this;
+ if (!object->HasFastProperties()) return;
// The global object is always normalized.
- ASSERT(!IsGlobalObject());
+ ASSERT(!object->IsGlobalObject());
// JSGlobalProxy must never be normalized
- ASSERT(!IsJSGlobalProxy());
+ ASSERT(!object->IsJSGlobalProxy());
- Map* map_of_this = map();
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Map> map(object->map());
// Allocate new content.
- int real_size = map_of_this->NumberOfOwnDescriptors();
+ int real_size = map->NumberOfOwnDescriptors();
int property_count = real_size;
if (expected_additional_properties > 0) {
property_count += expected_additional_properties;
} else {
property_count += 2; // Make space for two more properties.
}
- NameDictionary* dictionary;
- MaybeObject* maybe_dictionary =
- NameDictionary::Allocate(GetHeap(), property_count);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ Handle<NameDictionary> dictionary =
+ isolate->factory()->NewNameDictionary(property_count);
- DescriptorArray* descs = map_of_this->instance_descriptors();
+ Handle<DescriptorArray> descs(map->instance_descriptors());
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case CONSTANT: {
+ Handle<Name> key(descs->GetKey(i));
+ Handle<Object> value(descs->GetConstant(i), isolate);
PropertyDetails d = PropertyDetails(
details.attributes(), NORMAL, i + 1);
- Object* value = descs->GetConstant(i);
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ dictionary = NameDictionaryAdd(dictionary, key, value, d);
break;
}
case FIELD: {
+ Handle<Name> key(descs->GetKey(i));
+ Handle<Object> value(
+ object->RawFastPropertyAt(descs->GetFieldIndex(i)), isolate);
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, i + 1);
- Object* value = RawFastPropertyAt(descs->GetFieldIndex(i));
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ dictionary = NameDictionaryAdd(dictionary, key, value, d);
break;
}
case CALLBACKS: {
- Object* value = descs->GetCallbacksObject(i);
+ Handle<Name> key(descs->GetKey(i));
+ Handle<Object> value(descs->GetCallbacksObject(i), isolate);
PropertyDetails d = PropertyDetails(
details.attributes(), CALLBACKS, i + 1);
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ dictionary = NameDictionaryAdd(dictionary, key, value, d);
break;
}
case INTERCEPTOR:
@@ -4572,62 +4628,52 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
}
}
- Heap* current_heap = GetHeap();
-
// Copy the next enumeration index from instance descriptor.
dictionary->SetNextEnumerationIndex(real_size + 1);
- Map* new_map;
- MaybeObject* maybe_map =
- current_heap->isolate()->context()->native_context()->
- normalized_map_cache()->Get(this, mode);
- if (!maybe_map->To(&new_map)) return maybe_map;
+ Handle<NormalizedMapCache> cache(
+ isolate->context()->native_context()->normalized_map_cache());
+ Handle<Map> new_map = NormalizedMapCache::Get(cache, object, mode);
ASSERT(new_map->is_dictionary_map());
- // We have now successfully allocated all the necessary objects.
- // Changes can now be made with the guarantee that all of them take effect.
+ // From here on we cannot fail and we shouldn't GC anymore.
+ DisallowHeapAllocation no_allocation;
// Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size();
- int instance_size_delta = map_of_this->instance_size() - new_instance_size;
+ int instance_size_delta = map->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
- instance_size_delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+ isolate->heap()->CreateFillerObjectAt(object->address() + new_instance_size,
+ instance_size_delta);
+ if (Marking::IsBlack(Marking::MarkBitFrom(*object))) {
+ MemoryChunk::IncrementLiveBytesFromMutator(object->address(),
-instance_size_delta);
}
- set_map(new_map);
- map_of_this->NotifyLeafMapLayoutChange();
+ object->set_map(*new_map);
+ map->NotifyLeafMapLayoutChange();
- set_properties(dictionary);
+ object->set_properties(*dictionary);
- current_heap->isolate()->counters()->props_to_dictionary()->Increment();
+ isolate->counters()->props_to_dictionary()->Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
PrintF("Object properties have been normalized:\n");
- Print();
+ object->Print();
}
#endif
- return this;
}
void JSObject::TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields) {
+ if (object->HasFastProperties()) return;
+ ASSERT(!object->IsGlobalObject());
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
- object->TransformToFastProperties(unused_property_fields));
-}
-
-
-MaybeObject* JSObject::TransformToFastProperties(int unused_property_fields) {
- if (HasFastProperties()) return this;
- ASSERT(!IsGlobalObject());
- return property_dictionary()->
- TransformPropertiesToFastFor(this, unused_property_fields);
+ object->property_dictionary()->TransformPropertiesToFastFor(
+ *object, unused_property_fields));
}
@@ -4667,6 +4713,18 @@ static MUST_USE_RESULT MaybeObject* CopyFastElementsToDictionary(
}
+static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
+ Handle<FixedArrayBase> array,
+ int length,
+ Handle<SeededNumberDictionary> dict) {
+ Isolate* isolate = array->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ CopyFastElementsToDictionary(
+ isolate, *array, length, *dict),
+ SeededNumberDictionary);
+}
+
+
Handle<SeededNumberDictionary> JSObject::NormalizeElements(
Handle<JSObject> object) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
@@ -4753,52 +4811,52 @@ Smi* JSReceiver::GenerateIdentityHash() {
}
-void JSObject::SetIdentityHash(Handle<JSObject> object, Smi* hash) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->SetHiddenProperty(
- object->GetHeap()->identity_hash_string(), hash));
+void JSObject::SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash) {
+ Isolate* isolate = object->GetIsolate();
+ SetHiddenProperty(object, isolate->factory()->identity_hash_string(), hash);
}
-int JSObject::GetIdentityHash(Handle<JSObject> object) {
- CALL_AND_RETRY_OR_DIE(object->GetIsolate(),
- object->GetIdentityHash(ALLOW_CREATION),
- return Smi::cast(__object__)->value(),
- return 0);
+Object* JSObject::GetIdentityHash() {
+ Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_string());
+ return stored_value->IsSmi() ? stored_value : GetHeap()->undefined_value();
}
-MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
- Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_string());
- if (stored_value->IsSmi()) return stored_value;
+Handle<Object> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) {
+ Handle<Object> hash(object->GetIdentityHash(), object->GetIsolate());
+ if (hash->IsSmi())
+ return hash;
+
+ Isolate* isolate = object->GetIsolate();
- // Do not generate permanent identity hash code if not requested.
- if (flag == OMIT_CREATION) return GetHeap()->undefined_value();
+ hash = handle(object->GenerateIdentityHash(), isolate);
+ Handle<Object> result = SetHiddenProperty(object,
+ isolate->factory()->identity_hash_string(), hash);
- Smi* hash = GenerateIdentityHash();
- MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_string(),
- hash);
- if (result->IsFailure()) return result;
- if (result->ToObjectUnchecked()->IsUndefined()) {
+ if (result->IsUndefined()) {
// Trying to get hash of detached proxy.
- return Smi::FromInt(0);
+ return handle(Smi::FromInt(0), isolate);
}
+
return hash;
}
-Handle<Object> JSProxy::GetIdentityHash(Handle<JSProxy> proxy,
- CreationFlag flag) {
- CALL_HEAP_FUNCTION(proxy->GetIsolate(), proxy->GetIdentityHash(flag), Object);
+Object* JSProxy::GetIdentityHash() {
+ return this->hash();
}
-MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
- Object* hash = this->hash();
- if (!hash->IsSmi() && flag == ALLOW_CREATION) {
- hash = GenerateIdentityHash();
- set_hash(hash);
- }
+Handle<Object> JSProxy::GetOrCreateIdentityHash(Handle<JSProxy> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
+
+ Handle<Object> hash(proxy->GetIdentityHash(), isolate);
+ if (hash->IsSmi())
+ return hash;
+
+ hash = handle(proxy->GenerateIdentityHash(), isolate);
+ proxy->set_hash(*hash);
return hash;
}
@@ -4814,9 +4872,7 @@ Object* JSObject::GetHiddenProperty(Name* key) {
return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
}
ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup =
- GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
- Object* inline_value = hidden_lookup->ToObjectUnchecked();
+ Object* inline_value = GetHiddenPropertiesHashTable();
if (inline_value->IsSmi()) {
// Handle inline-stored identity hash.
@@ -4835,53 +4891,45 @@ Object* JSObject::GetHiddenProperty(Name* key) {
}
-Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
+Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> object,
Handle<Name> key,
Handle<Object> value) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->SetHiddenProperty(*key, *value),
- Object);
-}
-
+ Isolate* isolate = object->GetIsolate();
-MaybeObject* JSObject::SetHiddenProperty(Name* key, Object* value) {
ASSERT(key->IsUniqueName());
- if (IsJSGlobalProxy()) {
+ if (object->IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
- Object* proxy_parent = GetPrototype();
+ Handle<Object> proxy_parent(object->GetPrototype(), isolate);
// If the proxy is detached, return undefined.
- if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+ if (proxy_parent->IsNull()) return isolate->factory()->undefined_value();
ASSERT(proxy_parent->IsJSGlobalObject());
- return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value);
+ return SetHiddenProperty(Handle<JSObject>::cast(proxy_parent), key, value);
}
- ASSERT(!IsJSGlobalProxy());
- MaybeObject* hidden_lookup =
- GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
- Object* inline_value = hidden_lookup->ToObjectUnchecked();
+ ASSERT(!object->IsJSGlobalProxy());
+
+ Handle<Object> inline_value(object->GetHiddenPropertiesHashTable(), isolate);
// If there is no backing store yet, store the identity hash inline.
if (value->IsSmi() &&
- key == GetHeap()->identity_hash_string() &&
+ *key == *isolate->factory()->identity_hash_string() &&
(inline_value->IsUndefined() || inline_value->IsSmi())) {
- return SetHiddenPropertiesHashTable(value);
+ return JSObject::SetHiddenPropertiesHashTable(object, value);
}
- hidden_lookup = GetHiddenPropertiesHashTable(CREATE_NEW_IF_ABSENT);
- ObjectHashTable* hashtable;
- if (!hidden_lookup->To(&hashtable)) return hidden_lookup;
+ Handle<ObjectHashTable> hashtable =
+ GetOrCreateHiddenPropertiesHashtable(object);
// If it was found, check if the key is already in the dictionary.
- MaybeObject* insert_result = hashtable->Put(key, value);
- ObjectHashTable* new_table;
- if (!insert_result->To(&new_table)) return insert_result;
- if (new_table != hashtable) {
+ Handle<ObjectHashTable> new_table = ObjectHashTable::Put(hashtable, key,
+ value);
+ if (*new_table != *hashtable) {
// If adding the key expanded the dictionary (i.e., Add returned a new
// dictionary), store it back to the object.
- MaybeObject* store_result = SetHiddenPropertiesHashTable(new_table);
- if (store_result->IsFailure()) return store_result;
+ SetHiddenPropertiesHashTable(object, new_table);
}
+
// Return this to mark success.
- return this;
+ return object;
}
@@ -4896,16 +4944,14 @@ void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
return DeleteHiddenProperty(Handle<JSObject>::cast(proto), key);
}
- MaybeObject* hidden_lookup =
- object->GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
- Object* inline_value = hidden_lookup->ToObjectUnchecked();
+ Object* inline_value = object->GetHiddenPropertiesHashTable();
// We never delete (inline-stored) identity hashes.
- ASSERT(*key != isolate->heap()->identity_hash_string());
+ ASSERT(*key != *isolate->factory()->identity_hash_string());
if (inline_value->IsUndefined() || inline_value->IsSmi()) return;
Handle<ObjectHashTable> hashtable(ObjectHashTable::cast(inline_value));
- PutIntoObjectHashTable(hashtable, key, isolate->factory()->the_hole_value());
+ ObjectHashTable::Put(hashtable, key, isolate->factory()->the_hole_value());
}
@@ -4916,10 +4962,8 @@ bool JSObject::HasHiddenProperties() {
}
-MaybeObject* JSObject::GetHiddenPropertiesHashTable(
- InitializeHiddenProperties init_option) {
+Object* JSObject::GetHiddenPropertiesHashTable() {
ASSERT(!IsJSGlobalProxy());
- Object* inline_value;
if (HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden string. Since the
@@ -4931,93 +4975,97 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
sorted_index < map()->NumberOfOwnDescriptors()) {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
- MaybeObject* maybe_value = this->FastPropertyAt(
- descriptors->GetDetails(sorted_index).representation(),
+ ASSERT(descriptors->GetDetails(sorted_index).representation().
+ IsCompatibleForLoad(Representation::Tagged()));
+ return this->RawFastPropertyAt(
descriptors->GetFieldIndex(sorted_index));
- if (!maybe_value->To(&inline_value)) return maybe_value;
} else {
- inline_value = GetHeap()->undefined_value();
+ return GetHeap()->undefined_value();
}
} else {
- inline_value = GetHeap()->undefined_value();
+ return GetHeap()->undefined_value();
}
} else {
PropertyAttributes attributes;
// You can't install a getter on a property indexed by the hidden string,
// so we can be sure that GetLocalPropertyPostInterceptor returns a real
// object.
- inline_value =
- GetLocalPropertyPostInterceptor(this,
- GetHeap()->hidden_string(),
- &attributes)->ToObjectUnchecked();
+ return GetLocalPropertyPostInterceptor(this,
+ GetHeap()->hidden_string(),
+ &attributes)->ToObjectUnchecked();
}
+}
- if (init_option == ONLY_RETURN_INLINE_VALUE ||
- inline_value->IsHashTable()) {
- return inline_value;
- }
+Handle<ObjectHashTable> JSObject::GetOrCreateHiddenPropertiesHashtable(
+ Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
- ObjectHashTable* hashtable;
static const int kInitialCapacity = 4;
- MaybeObject* maybe_obj =
- ObjectHashTable::Allocate(GetHeap(),
- kInitialCapacity,
- ObjectHashTable::USE_CUSTOM_MINIMUM_CAPACITY);
- if (!maybe_obj->To<ObjectHashTable>(&hashtable)) return maybe_obj;
+ Handle<Object> inline_value(object->GetHiddenPropertiesHashTable(), isolate);
+ if (inline_value->IsHashTable()) {
+ return Handle<ObjectHashTable>::cast(inline_value);
+ }
+
+ Handle<ObjectHashTable> hashtable = isolate->factory()->NewObjectHashTable(
+ kInitialCapacity,
+ USE_CUSTOM_MINIMUM_CAPACITY);
if (inline_value->IsSmi()) {
// We were storing the identity hash inline and now allocated an actual
// dictionary. Put the identity hash into the new dictionary.
- MaybeObject* insert_result =
- hashtable->Put(GetHeap()->identity_hash_string(), inline_value);
- ObjectHashTable* new_table;
- if (!insert_result->To(&new_table)) return insert_result;
- // We expect no resizing for the first insert.
- ASSERT_EQ(hashtable, new_table);
+ hashtable = ObjectHashTable::Put(hashtable,
+ isolate->factory()->identity_hash_string(),
+ inline_value);
}
- MaybeObject* store_result = SetLocalPropertyIgnoreAttributesTrampoline(
- GetHeap()->hidden_string(),
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ object,
+ isolate->factory()->hidden_string(),
hashtable,
DONT_ENUM,
OPTIMAL_REPRESENTATION,
ALLOW_AS_CONSTANT,
OMIT_EXTENSIBILITY_CHECK);
- if (store_result->IsFailure()) return store_result;
+
return hashtable;
}
-MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
- ASSERT(!IsJSGlobalProxy());
+Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
+ Handle<Object> value) {
+ ASSERT(!object->IsJSGlobalProxy());
+
+ Isolate* isolate = object->GetIsolate();
+
// We can store the identity hash inline iff there is no backing store
// for hidden properties yet.
- ASSERT(HasHiddenProperties() != value->IsSmi());
- if (HasFastProperties()) {
+ ASSERT(object->HasHiddenProperties() != value->IsSmi());
+ if (object->HasFastProperties()) {
// If the object has fast properties, check whether the first slot
// in the descriptor array matches the hidden string. Since the
// hidden strings hash code is zero (and no other name has hash
// code zero) it will always occupy the first entry if present.
- DescriptorArray* descriptors = this->map()->instance_descriptors();
+ DescriptorArray* descriptors = object->map()->instance_descriptors();
if (descriptors->number_of_descriptors() > 0) {
int sorted_index = descriptors->GetSortedKeyIndex(0);
- if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
- sorted_index < map()->NumberOfOwnDescriptors()) {
+ if (descriptors->GetKey(sorted_index) == isolate->heap()->hidden_string()
+ && sorted_index < object->map()->NumberOfOwnDescriptors()) {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
- FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index), value);
- return this;
+ object->FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index),
+ *value);
+ return object;
}
}
}
- MaybeObject* store_result = SetLocalPropertyIgnoreAttributesTrampoline(
- GetHeap()->hidden_string(),
- value,
- DONT_ENUM,
- OPTIMAL_REPRESENTATION,
- ALLOW_AS_CONSTANT,
- OMIT_EXTENSIBILITY_CHECK);
- if (store_result->IsFailure()) return store_result;
- return this;
+
+ SetLocalPropertyIgnoreAttributes(object,
+ isolate->factory()->hidden_string(),
+ value,
+ DONT_ENUM,
+ OPTIMAL_REPRESENTATION,
+ ALLOW_AS_CONSTANT,
+ OMIT_EXTENSIBILITY_CHECK);
+ return object;
}
@@ -5089,7 +5137,7 @@ Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
if (interceptor->deleter()->IsUndefined()) return factory->false_value();
@@ -5152,7 +5200,7 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
Handle<Object> old_value;
bool should_enqueue_change_record = false;
if (FLAG_harmony_observation && object->map()->is_observed()) {
- should_enqueue_change_record = object->HasLocalElement(index);
+ should_enqueue_change_record = HasLocalElement(object, index);
if (should_enqueue_change_record) {
old_value = object->GetLocalElementAccessorPair(index) != NULL
? Handle<Object>::cast(factory->the_hole_value())
@@ -5168,9 +5216,9 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
result = AccessorDelete(object, index, mode);
}
- if (should_enqueue_change_record && !object->HasLocalElement(index)) {
+ if (should_enqueue_change_record && !HasLocalElement(object, index)) {
Handle<String> name = factory->Uint32ToString(index);
- EnqueueChangeRecord(object, "deleted", name, old_value);
+ EnqueueChangeRecord(object, "delete", name, old_value);
}
return result;
@@ -5222,7 +5270,9 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation && object->map()->is_observed();
+ bool is_observed = FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string();
if (is_observed && lookup.IsDataProperty()) {
old_value = Object::GetProperty(object, name);
}
@@ -5243,8 +5293,8 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
result = DeleteNormalizedProperty(object, name, mode);
}
- if (is_observed && !object->HasLocalProperty(*name)) {
- EnqueueChangeRecord(object, "deleted", name, old_value);
+ if (is_observed && !HasLocalProperty(object, name)) {
+ EnqueueChangeRecord(object, "delete", name, old_value);
}
return result;
@@ -5405,59 +5455,58 @@ bool JSObject::ReferencesObject(Object* obj) {
Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
-}
+ Isolate* isolate = object->GetIsolate();
+ if (!object->map()->is_extensible()) return object;
-MaybeObject* JSObject::PreventExtensions() {
- Isolate* isolate = GetIsolate();
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this,
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object,
isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->false_value();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return object;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->PreventExtensions();
+ return PreventExtensions(Handle<JSObject>::cast(proto));
}
// It's not possible to seal objects with external array elements
- if (HasExternalArrayElements()) {
- HandleScope scope(isolate);
- Handle<Object> object(this, isolate);
+ if (object->HasExternalArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
HandleVector(&object, 1));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// If there are fast elements we normalize.
- SeededNumberDictionary* dictionary = NULL;
- { MaybeObject* maybe = NormalizeElements();
- if (!maybe->To<SeededNumberDictionary>(&dictionary)) return maybe;
- }
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements());
+
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
// Do a map transition, other objects with this map may still
// be extensible.
// TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
- Map* new_map;
- MaybeObject* maybe = map()->Copy();
- if (!maybe->To(&new_map)) return maybe;
+ Handle<Map> new_map = Map::Copy(handle(object->map()));
new_map->set_is_extensible(false);
- set_map(new_map);
- ASSERT(!map()->is_extensible());
- return new_map;
+ object->set_map(*new_map);
+ ASSERT(!object->map()->is_extensible());
+
+ if (FLAG_harmony_observation && object->map()->is_observed()) {
+ EnqueueChangeRecord(object, "preventExtensions", Handle<Name>(),
+ isolate->factory()->the_hole_value());
+ }
+ return object;
}
@@ -5482,296 +5531,374 @@ static void FreezeDictionary(Dictionary* dictionary) {
}
-MUST_USE_RESULT MaybeObject* JSObject::Freeze(Isolate* isolate) {
+Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
// Freezing non-strict arguments should be handled elsewhere.
- ASSERT(!HasNonStrictArgumentsElements());
-
- Heap* heap = isolate->heap();
+ ASSERT(!object->HasNonStrictArgumentsElements());
+ ASSERT(!object->map()->is_observed());
- if (map()->is_frozen()) return this;
+ if (object->map()->is_frozen()) return object;
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this,
- heap->undefined_value(),
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object,
+ isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return heap->false_value();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return object;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->Freeze(isolate);
+ return Freeze(Handle<JSObject>::cast(proto));
}
// It's not possible to freeze objects with external array elements
- if (HasExternalArrayElements()) {
- HandleScope scope(isolate);
- Handle<Object> object(this, isolate);
+ if (object->HasExternalArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
HandleVector(&object, 1));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
- SeededNumberDictionary* new_element_dictionary = NULL;
- if (!elements()->IsDictionary()) {
- int length = IsJSArray()
- ? Smi::cast(JSArray::cast(this)->length())->value()
- : elements()->length();
+ Handle<SeededNumberDictionary> new_element_dictionary;
+ if (!object->elements()->IsDictionary()) {
+ int length = object->IsJSArray()
+ ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
+ : object->elements()->length();
if (length > 0) {
int capacity = 0;
int used = 0;
- GetElementsCapacityAndUsage(&capacity, &used);
- MaybeObject* maybe_dict = SeededNumberDictionary::Allocate(heap, used);
- if (!maybe_dict->To(&new_element_dictionary)) return maybe_dict;
+ object->GetElementsCapacityAndUsage(&capacity, &used);
+ new_element_dictionary =
+ isolate->factory()->NewSeededNumberDictionary(used);
// Move elements to a dictionary; avoid calling NormalizeElements to avoid
// unnecessary transitions.
- maybe_dict = CopyFastElementsToDictionary(isolate, elements(), length,
- new_element_dictionary);
- if (!maybe_dict->To(&new_element_dictionary)) return maybe_dict;
+ new_element_dictionary = CopyFastElementsToDictionary(
+ handle(object->elements()), length, new_element_dictionary);
} else {
// No existing elements, use a pre-allocated empty backing store
- new_element_dictionary = heap->empty_slow_element_dictionary();
+ new_element_dictionary =
+ isolate->factory()->empty_slow_element_dictionary();
}
}
LookupResult result(isolate);
- map()->LookupTransition(this, heap->frozen_symbol(), &result);
+ Handle<Map> old_map(object->map());
+ old_map->LookupTransition(*object, isolate->heap()->frozen_symbol(), &result);
if (result.IsTransition()) {
Map* transition_map = result.GetTransitionTarget();
ASSERT(transition_map->has_dictionary_elements());
ASSERT(transition_map->is_frozen());
ASSERT(!transition_map->is_extensible());
- set_map(transition_map);
- } else if (HasFastProperties() && map()->CanHaveMoreTransitions()) {
+ object->set_map(transition_map);
+ } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
// Create a new descriptor array with fully-frozen properties
- int num_descriptors = map()->NumberOfOwnDescriptors();
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors =
- map()->instance_descriptors()->CopyUpToAddAttributes(num_descriptors,
- FROZEN);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
-
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyReplaceDescriptors(
- new_descriptors, INSERT_TRANSITION, heap->frozen_symbol());
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ int num_descriptors = old_map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> new_descriptors =
+ DescriptorArray::CopyUpToAddAttributes(
+ handle(old_map->instance_descriptors()), num_descriptors, FROZEN);
+ Handle<Map> new_map = Map::CopyReplaceDescriptors(
+ old_map, new_descriptors, INSERT_TRANSITION,
+ isolate->factory()->frozen_symbol());
new_map->freeze();
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- set_map(new_map);
+ object->set_map(*new_map);
} else {
// Slow path: need to normalize properties for safety
- MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe->IsFailure()) return maybe;
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
// Create a new map, since other objects with this map may be extensible.
// TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
- Map* new_map;
- MaybeObject* maybe_copy = map()->Copy();
- if (!maybe_copy->To(&new_map)) return maybe_copy;
+ Handle<Map> new_map = Map::Copy(handle(object->map()));
new_map->freeze();
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- set_map(new_map);
+ object->set_map(*new_map);
// Freeze dictionary-mode properties
- FreezeDictionary(property_dictionary());
+ FreezeDictionary(object->property_dictionary());
}
- ASSERT(map()->has_dictionary_elements());
- if (new_element_dictionary != NULL) {
- set_elements(new_element_dictionary);
+ ASSERT(object->map()->has_dictionary_elements());
+ if (!new_element_dictionary.is_null()) {
+ object->set_elements(*new_element_dictionary);
}
- if (elements() != heap->empty_slow_element_dictionary()) {
- SeededNumberDictionary* dictionary = element_dictionary();
+ if (object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
+ SeededNumberDictionary* dictionary = object->element_dictionary();
// Make sure we never go back to the fast case
dictionary->set_requires_slow_elements();
// Freeze all elements in the dictionary
FreezeDictionary(dictionary);
}
- return this;
+ return object;
}
-MUST_USE_RESULT MaybeObject* JSObject::SetObserved(Isolate* isolate) {
- if (map()->is_observed())
- return isolate->heap()->undefined_value();
-
- Heap* heap = isolate->heap();
+void JSObject::SetObserved(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
- if (!HasExternalArrayElements()) {
- // Go to dictionary mode, so that we don't skip map checks.
- MaybeObject* maybe = NormalizeElements();
- if (maybe->IsFailure()) return maybe;
- ASSERT(!HasFastElements());
- }
+ if (object->map()->is_observed())
+ return;
LookupResult result(isolate);
- map()->LookupTransition(this, heap->observed_symbol(), &result);
+ object->map()->LookupTransition(*object,
+ isolate->heap()->observed_symbol(),
+ &result);
- Map* new_map;
+ Handle<Map> new_map;
if (result.IsTransition()) {
- new_map = result.GetTransitionTarget();
+ new_map = handle(result.GetTransitionTarget());
ASSERT(new_map->is_observed());
- } else if (map()->CanHaveMoreTransitions()) {
- MaybeObject* maybe_new_map = map()->CopyForObserved();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ } else if (object->map()->CanHaveMoreTransitions()) {
+ new_map = Map::CopyForObserved(handle(object->map()));
} else {
- MaybeObject* maybe_copy = map()->Copy();
- if (!maybe_copy->To(&new_map)) return maybe_copy;
- new_map->set_is_observed(true);
+ new_map = Map::Copy(handle(object->map()));
+ new_map->set_is_observed();
}
- set_map(new_map);
+ object->set_map(*new_map);
+}
- return heap->undefined_value();
+
+Handle<JSObject> JSObject::Copy(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ isolate->heap()->CopyJSObject(*object), JSObject);
}
-MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
- StackLimitCheck check(isolate);
- if (check.HasOverflowed()) return isolate->StackOverflow();
+template<class ContextObject>
+class JSObjectWalkVisitor {
+ public:
+ JSObjectWalkVisitor(ContextObject* site_context, bool copying,
+ JSObject::DeepCopyHints hints)
+ : site_context_(site_context),
+ copying_(copying),
+ hints_(hints) {}
- if (map()->is_deprecated()) {
- MaybeObject* maybe_failure = MigrateInstance();
- if (maybe_failure->IsFailure()) return maybe_failure;
+ Handle<JSObject> StructureWalk(Handle<JSObject> object);
+
+ protected:
+ inline Handle<JSObject> VisitElementOrProperty(Handle<JSObject> object,
+ Handle<JSObject> value) {
+ Handle<AllocationSite> current_site = site_context()->EnterNewScope();
+ Handle<JSObject> copy_of_value = StructureWalk(value);
+ site_context()->ExitScope(current_site, value);
+ return copy_of_value;
}
- Heap* heap = isolate->heap();
- Object* result;
- { MaybeObject* maybe_result = heap->CopyJSObject(this);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ inline ContextObject* site_context() { return site_context_; }
+ inline Isolate* isolate() { return site_context()->isolate(); }
+
+ inline bool copying() const { return copying_; }
+
+ private:
+ ContextObject* site_context_;
+ const bool copying_;
+ const JSObject::DeepCopyHints hints_;
+};
+
+
+template <class ContextObject>
+Handle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
+ Handle<JSObject> object) {
+ Isolate* isolate = this->isolate();
+ bool copying = this->copying();
+ bool shallow = hints_ == JSObject::kObjectIsShallowArray;
+
+ if (!shallow) {
+ StackLimitCheck check(isolate);
+
+ if (check.HasOverflowed()) {
+ isolate->StackOverflow();
+ return Handle<JSObject>::null();
+ }
}
- JSObject* copy = JSObject::cast(result);
- // Deep copy local properties.
- if (copy->HasFastProperties()) {
- DescriptorArray* descriptors = copy->map()->instance_descriptors();
- int limit = copy->map()->NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != FIELD) continue;
- int index = descriptors->GetFieldIndex(i);
- Object* value = RawFastPropertyAt(index);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- MaybeObject* maybe_copy = js_object->DeepCopy(isolate);
- if (!maybe_copy->To(&value)) return maybe_copy;
- } else {
- Representation representation = details.representation();
- MaybeObject* maybe_storage =
- value->AllocateNewStorageFor(heap, representation);
- if (!maybe_storage->To(&value)) return maybe_storage;
- }
- copy->FastPropertyAtPut(index, value);
+ if (object->map()->is_deprecated()) {
+ JSObject::MigrateInstance(object);
+ }
+
+ Handle<JSObject> copy;
+ if (copying) {
+ Handle<AllocationSite> site_to_pass;
+ if (site_context()->ShouldCreateMemento(object)) {
+ site_to_pass = site_context()->current();
}
+ CALL_AND_RETRY_OR_DIE(isolate,
+ isolate->heap()->CopyJSObject(*object,
+ site_to_pass.is_null() ? NULL : *site_to_pass),
+ { copy = Handle<JSObject>(JSObject::cast(__object__),
+ isolate);
+ break;
+ },
+ return Handle<JSObject>());
} else {
- { MaybeObject* maybe_result =
- heap->AllocateFixedArray(copy->NumberOfLocalProperties());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- FixedArray* names = FixedArray::cast(result);
- copy->GetLocalPropertyNames(names, 0);
- for (int i = 0; i < names->length(); i++) {
- ASSERT(names->get(i)->IsString());
- String* key_string = String::cast(names->get(i));
- PropertyAttributes attributes =
- copy->GetLocalPropertyAttribute(key_string);
- // Only deep copy fields from the object literal expression.
- // In particular, don't try to copy the length attribute of
- // an array.
- if (attributes != NONE) continue;
- Object* value =
- copy->GetProperty(key_string, &attributes)->ToObjectUnchecked();
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ copy = object;
+ }
+
+ ASSERT(copying || copy.is_identical_to(object));
+
+ ElementsKind kind = copy->GetElementsKind();
+ if (copying && IsFastSmiOrObjectElementsKind(kind) &&
+ FixedArray::cast(copy->elements())->map() ==
+ isolate->heap()->fixed_cow_array_map()) {
+ isolate->counters()->cow_arrays_created_runtime()->Increment();
+ }
+
+ if (!shallow) {
+ HandleScope scope(isolate);
+
+ // Deep copy local properties.
+ if (copy->HasFastProperties()) {
+ Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors());
+ int limit = copy->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ int index = descriptors->GetFieldIndex(i);
+ Handle<Object> value(object->RawFastPropertyAt(index), isolate);
+ if (value->IsJSObject()) {
+ value = VisitElementOrProperty(copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<JSObject>());
+ } else {
+ Representation representation = details.representation();
+ value = NewStorageFor(isolate, value, representation);
}
- { MaybeObject* maybe_result =
- // Creating object copy for literals. No strict mode needed.
- copy->SetProperty(key_string, result, NONE, kNonStrictMode);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ if (copying) {
+ copy->FastPropertyAtPut(index, *value);
+ }
+ }
+ } else {
+ Handle<FixedArray> names =
+ isolate->factory()->NewFixedArray(copy->NumberOfLocalProperties());
+ copy->GetLocalPropertyNames(*names, 0);
+ for (int i = 0; i < names->length(); i++) {
+ ASSERT(names->get(i)->IsString());
+ Handle<String> key_string(String::cast(names->get(i)));
+ PropertyAttributes attributes =
+ copy->GetLocalPropertyAttribute(*key_string);
+ // Only deep copy fields from the object literal expression.
+ // In particular, don't try to copy the length attribute of
+ // an array.
+ if (attributes != NONE) continue;
+ Handle<Object> value(
+ copy->GetProperty(*key_string, &attributes)->ToObjectUnchecked(),
+ isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> result = VisitElementOrProperty(
+ copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (copying) {
+ // Creating object copy for literals. No strict mode needed.
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetProperty(
+ copy, key_string, result, NONE, kNonStrictMode));
+ }
}
}
}
- }
- // Deep copy local elements.
- // Pixel elements cannot be created using an object literal.
- ASSERT(!copy->HasExternalArrayElements());
- switch (copy->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- FixedArray* elements = FixedArray::cast(copy->elements());
- if (elements->map() == heap->fixed_cow_array_map()) {
- isolate->counters()->cow_arrays_created_runtime()->Increment();
+ // Deep copy local elements.
+ // Pixel elements cannot be created using an object literal.
+ ASSERT(!copy->HasExternalArrayElements());
+ switch (kind) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
+ if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
#ifdef DEBUG
- for (int i = 0; i < elements->length(); i++) {
- ASSERT(!elements->get(i)->IsJSObject());
- }
+ for (int i = 0; i < elements->length(); i++) {
+ ASSERT(!elements->get(i)->IsJSObject());
+ }
#endif
- } else {
- for (int i = 0; i < elements->length(); i++) {
- Object* value = elements->get(i);
- ASSERT(value->IsSmi() ||
- value->IsTheHole() ||
- (IsFastObjectElementsKind(copy->GetElementsKind())));
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ } else {
+ for (int i = 0; i < elements->length(); i++) {
+ Handle<Object> value(elements->get(i), isolate);
+ ASSERT(value->IsSmi() ||
+ value->IsTheHole() ||
+ (IsFastObjectElementsKind(copy->GetElementsKind())));
+ if (value->IsJSObject()) {
+ Handle<JSObject> result = VisitElementOrProperty(
+ copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (copying) {
+ elements->set(i, *result);
+ }
}
- elements->set(i, result);
}
}
+ break;
}
- break;
- }
- case DICTIONARY_ELEMENTS: {
- SeededNumberDictionary* element_dictionary = copy->element_dictionary();
- int capacity = element_dictionary->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = element_dictionary->KeyAt(i);
- if (element_dictionary->IsKey(k)) {
- Object* value = element_dictionary->ValueAt(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ case DICTIONARY_ELEMENTS: {
+ Handle<SeededNumberDictionary> element_dictionary(
+ copy->element_dictionary());
+ int capacity = element_dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = element_dictionary->KeyAt(i);
+ if (element_dictionary->IsKey(k)) {
+ Handle<Object> value(element_dictionary->ValueAt(i), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> result = VisitElementOrProperty(
+ copy, Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ if (copying) {
+ element_dictionary->ValueAtPut(i, *result);
+ }
}
- element_dictionary->ValueAtPut(i, result);
}
}
+ break;
}
- break;
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ // No contained objects, nothing to do.
+ break;
}
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNIMPLEMENTED();
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- // No contained objects, nothing to do.
- break;
}
+
+ return copy;
+}
+
+
+Handle<JSObject> JSObject::DeepWalk(
+ Handle<JSObject> object,
+ AllocationSiteCreationContext* site_context) {
+ JSObjectWalkVisitor<AllocationSiteCreationContext> v(site_context, false,
+ kNoHints);
+ Handle<JSObject> result = v.StructureWalk(object);
+ ASSERT(result.is_null() || result.is_identical_to(object));
+ return result;
+}
+
+
+Handle<JSObject> JSObject::DeepCopy(Handle<JSObject> object,
+ AllocationSiteUsageContext* site_context,
+ DeepCopyHints hints) {
+ JSObjectWalkVisitor<AllocationSiteUsageContext> v(site_context, true, hints);
+ Handle<JSObject> copy = v.StructureWalk(object);
+ ASSERT(!copy.is_identical_to(object));
return copy;
}
@@ -5789,7 +5916,7 @@ bool JSReceiver::IsSimpleEnum() {
if (!o->IsJSObject()) return false;
JSObject* curr = JSObject::cast(o);
int enum_length = curr->map()->EnumLength();
- if (enum_length == Map::kInvalidEnumCache) return false;
+ if (enum_length == kInvalidEnumCacheSentinel) return false;
ASSERT(!curr->HasNamedInterceptor());
ASSERT(!curr->HasIndexedInterceptor());
ASSERT(!curr->IsAccessCheckNeeded());
@@ -6043,8 +6170,7 @@ void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
bool only_attribute_changes = getter->IsNull() && setter->IsNull();
if (object->HasFastProperties() && !only_attribute_changes &&
access_control == v8::DEFAULT &&
- (object->map()->NumberOfOwnDescriptors() <
- DescriptorArray::kMaxNumberOfDescriptors)) {
+ (object->map()->NumberOfOwnDescriptors() <= kMaxNumberOfDescriptors)) {
bool getterOk = getter->IsNull() ||
DefineFastAccessor(object, name, ACCESSOR_GETTER, getter, attributes);
bool setterOk = !getterOk || setter->IsNull() ||
@@ -6085,6 +6211,31 @@ bool JSObject::CanSetCallback(Name* name) {
}
+bool Map::DictionaryElementsInPrototypeChainOnly() {
+ Heap* heap = GetHeap();
+
+ if (IsDictionaryElementsKind(elements_kind())) {
+ return false;
+ }
+
+ for (Object* prototype = this->prototype();
+ prototype != heap->null_value();
+ prototype = prototype->GetPrototype(GetIsolate())) {
+ if (prototype->IsJSProxy()) {
+ // Be conservative, don't walk into proxies.
+ return true;
+ }
+
+ if (IsDictionaryElementsKind(
+ JSObject::cast(prototype)->map()->elements_kind())) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
void JSObject::SetElementCallback(Handle<JSObject> object,
uint32_t index,
Handle<Object> structure,
@@ -6093,10 +6244,10 @@ void JSObject::SetElementCallback(Handle<JSObject> object,
PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
// Normalize elements to make this operation simple.
+ bool had_dictionary_elements = object->HasDictionaryElements();
Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
ASSERT(object->HasDictionaryElements() ||
object->HasDictionaryArgumentsElements());
-
// Update the dictionary with the new CALLBACKS property.
dictionary = SeededNumberDictionary::Set(dictionary, index, structure,
details);
@@ -6116,6 +6267,11 @@ void JSObject::SetElementCallback(Handle<JSObject> object,
parameter_map->set(1, *dictionary);
} else {
object->set_elements(*dictionary);
+
+ if (!had_dictionary_elements) {
+ // KeyedStoreICs (at least the non-generic ones) need a reset.
+ heap->ClearAllICsByKind(Code::KEYED_STORE_IC);
+ }
}
}
@@ -6175,7 +6331,7 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChangeWithHandleScope ncc;
+ AssertNoContextChange ncc(isolate);
// Try to flatten before operating on the string.
if (name->IsString()) String::cast(*name)->TryFlatten();
@@ -6186,11 +6342,13 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
bool is_element = name->AsArrayIndex(&index);
Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation && object->map()->is_observed();
+ bool is_observed = FLAG_harmony_observation &&
+ object->map()->is_observed() &&
+ *name != isolate->heap()->hidden_string();
bool preexists = false;
if (is_observed) {
if (is_element) {
- preexists = object->HasLocalElement(index);
+ preexists = HasLocalElement(object, index);
if (preexists && object->GetLocalElementAccessorPair(index) == NULL) {
old_value = Object::GetElement(isolate, object, index);
}
@@ -6213,7 +6371,7 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
}
if (is_observed) {
- const char* type = preexists ? "reconfigured" : "new";
+ const char* type = preexists ? "reconfigure" : "add";
EnqueueChangeRecord(object, type, name, old_value);
}
}
@@ -6361,7 +6519,7 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
// Try to flatten before operating on the string.
if (name->IsString()) FlattenString(Handle<String>::cast(name));
@@ -6420,58 +6578,62 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
}
-MaybeObject* JSObject::LookupAccessor(Name* name, AccessorComponent component) {
- Heap* heap = GetHeap();
+Handle<Object> JSObject::GetAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ AccessorComponent component) {
+ Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChangeWithHandleScope ncc;
+ AssertNoContextChange ncc(isolate);
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- RETURN_IF_SCHEDULED_EXCEPTION(heap->isolate());
- return heap->undefined_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object, *name, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
// Make the lookup and include prototypes.
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
- for (Object* obj = this;
- obj != heap->null_value();
- obj = JSReceiver::cast(obj)->GetPrototype()) {
- if (obj->IsJSObject() && JSObject::cast(obj)->HasDictionaryElements()) {
- JSObject* js_object = JSObject::cast(obj);
+ for (Handle<Object> obj = object;
+ !obj->IsNull();
+ obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) {
+ if (obj->IsJSObject() && JSObject::cast(*obj)->HasDictionaryElements()) {
+ JSObject* js_object = JSObject::cast(*obj);
SeededNumberDictionary* dictionary = js_object->element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
if (dictionary->DetailsAt(entry).type() == CALLBACKS &&
element->IsAccessorPair()) {
- return AccessorPair::cast(element)->GetComponent(component);
+ return handle(AccessorPair::cast(element)->GetComponent(component),
+ isolate);
}
}
}
}
} else {
- for (Object* obj = this;
- obj != heap->null_value();
- obj = JSReceiver::cast(obj)->GetPrototype()) {
- LookupResult result(heap->isolate());
- JSReceiver::cast(obj)->LocalLookup(name, &result);
+ for (Handle<Object> obj = object;
+ !obj->IsNull();
+ obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) {
+ LookupResult result(isolate);
+ JSReceiver::cast(*obj)->LocalLookup(*name, &result);
if (result.IsFound()) {
- if (result.IsReadOnly()) return heap->undefined_value();
+ if (result.IsReadOnly()) return isolate->factory()->undefined_value();
if (result.IsPropertyCallbacks()) {
Object* obj = result.GetCallbackObject();
if (obj->IsAccessorPair()) {
- return AccessorPair::cast(obj)->GetComponent(component);
+ return handle(AccessorPair::cast(obj)->GetComponent(component),
+ isolate);
}
}
}
}
}
- return heap->undefined_value();
+ return isolate->factory()->undefined_value();
}
@@ -6504,6 +6666,14 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
+Handle<Map> Map::RawCopy(Handle<Map> map,
+ int instance_size) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->RawCopy(instance_size),
+ Map);
+}
+
+
MaybeObject* Map::RawCopy(int instance_size) {
Map* result;
MaybeObject* maybe_result =
@@ -6517,7 +6687,8 @@ MaybeObject* Map::RawCopy(int instance_size) {
int new_bit_field3 = bit_field3();
new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true);
new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
- new_bit_field3 = EnumLengthBits::update(new_bit_field3, kInvalidEnumCache);
+ new_bit_field3 = EnumLengthBits::update(new_bit_field3,
+ kInvalidEnumCacheSentinel);
new_bit_field3 = Deprecated::update(new_bit_field3, false);
new_bit_field3 = IsUnstable::update(new_bit_field3, false);
result->set_bit_field3(new_bit_field3);
@@ -6528,25 +6699,15 @@ MaybeObject* Map::RawCopy(int instance_size) {
Handle<Map> Map::CopyNormalized(Handle<Map> map,
PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing) {
- CALL_HEAP_FUNCTION(map->GetIsolate(),
- map->CopyNormalized(mode, sharing),
- Map);
-}
-
-
-MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
- NormalizedMapSharingMode sharing) {
- int new_instance_size = instance_size();
+ int new_instance_size = map->instance_size();
if (mode == CLEAR_INOBJECT_PROPERTIES) {
- new_instance_size -= inobject_properties() * kPointerSize;
+ new_instance_size -= map->inobject_properties() * kPointerSize;
}
- Map* result;
- MaybeObject* maybe_result = RawCopy(new_instance_size);
- if (!maybe_result->To(&result)) return maybe_result;
+ Handle<Map> result = Map::RawCopy(map, new_instance_size);
if (mode != CLEAR_INOBJECT_PROPERTIES) {
- result->set_inobject_properties(inobject_properties());
+ result->set_inobject_properties(map->inobject_properties());
}
result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
@@ -6660,6 +6821,16 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
}
+Handle<Map> Map::CopyReplaceDescriptors(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ TransitionFlag flag,
+ Handle<Name> name) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->CopyReplaceDescriptors(*descriptors, flag, *name),
+ Map);
+}
+
+
MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
TransitionFlag flag,
Name* name,
@@ -6688,20 +6859,19 @@ MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
// Since this method is used to rewrite an existing transition tree, it can
// always insert transitions without checking.
-MaybeObject* Map::CopyInstallDescriptors(int new_descriptor,
- DescriptorArray* descriptors) {
+Handle<Map> Map::CopyInstallDescriptors(Handle<Map> map,
+ int new_descriptor,
+ Handle<DescriptorArray> descriptors) {
ASSERT(descriptors->IsSortedNoDuplicates());
- Map* result;
- MaybeObject* maybe_result = CopyDropDescriptors();
- if (!maybe_result->To(&result)) return maybe_result;
+ Handle<Map> result = Map::CopyDropDescriptors(map);
- result->InitializeDescriptors(descriptors);
+ result->InitializeDescriptors(*descriptors);
result->SetNumberOfOwnDescriptors(new_descriptor + 1);
- int unused_property_fields = this->unused_property_fields();
+ int unused_property_fields = map->unused_property_fields();
if (descriptors->GetDetails(new_descriptor).type() == FIELD) {
- unused_property_fields = this->unused_property_fields() - 1;
+ unused_property_fields = map->unused_property_fields() - 1;
if (unused_property_fields < 0) {
unused_property_fields += JSObject::kFieldsAdded;
}
@@ -6710,14 +6880,12 @@ MaybeObject* Map::CopyInstallDescriptors(int new_descriptor,
result->set_unused_property_fields(unused_property_fields);
result->set_owns_descriptors(false);
- Name* name = descriptors->GetKey(new_descriptor);
- TransitionArray* transitions;
- MaybeObject* maybe_transitions =
- AddTransition(name, result, SIMPLE_TRANSITION);
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+ Handle<Name> name = handle(descriptors->GetKey(new_descriptor));
+ Handle<TransitionArray> transitions = Map::AddTransition(map, name, result,
+ SIMPLE_TRANSITION);
- set_transitions(transitions);
- result->SetBackPointer(this);
+ map->set_transitions(*transitions);
+ result->SetBackPointer(*map);
return result;
}
@@ -6775,35 +6943,34 @@ MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) {
}
-MaybeObject* Map::CopyForObserved() {
- ASSERT(!is_observed());
+Handle<Map> Map::CopyForObserved(Handle<Map> map) {
+ ASSERT(!map->is_observed());
+
+ Isolate* isolate = map->GetIsolate();
// In case the map owned its own descriptors, share the descriptors and
// transfer ownership to the new map.
- Map* new_map;
- MaybeObject* maybe_new_map;
- if (owns_descriptors()) {
- maybe_new_map = CopyDropDescriptors();
+ Handle<Map> new_map;
+ if (map->owns_descriptors()) {
+ new_map = Map::CopyDropDescriptors(map);
} else {
- maybe_new_map = Copy();
+ new_map = Map::Copy(map);
}
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- TransitionArray* transitions;
- MaybeObject* maybe_transitions = AddTransition(GetHeap()->observed_symbol(),
- new_map,
- FULL_TRANSITION);
- if (!maybe_transitions->To(&transitions)) return maybe_transitions;
- set_transitions(transitions);
+ Handle<TransitionArray> transitions =
+ Map::AddTransition(map, isolate->factory()->observed_symbol(), new_map,
+ FULL_TRANSITION);
- new_map->set_is_observed(true);
+ map->set_transitions(*transitions);
- if (owns_descriptors()) {
- new_map->InitializeDescriptors(instance_descriptors());
- set_owns_descriptors(false);
+ new_map->set_is_observed();
+
+ if (map->owns_descriptors()) {
+ new_map->InitializeDescriptors(map->instance_descriptors());
+ map->set_owns_descriptors(false);
}
- new_map->SetBackPointer(this);
+ new_map->SetBackPointer(*map);
return new_map;
}
@@ -6904,6 +7071,16 @@ MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor,
}
+Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
+ Handle<DescriptorArray> desc,
+ int enumeration_index,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION(desc->GetIsolate(),
+ desc->CopyUpToAddAttributes(enumeration_index, attributes),
+ DescriptorArray);
+}
+
+
MaybeObject* DescriptorArray::CopyUpToAddAttributes(
int enumeration_index, PropertyAttributes attributes) {
if (enumeration_index == 0) return GetHeap()->empty_descriptor_array();
@@ -6992,8 +7169,6 @@ void Map::UpdateCodeCache(Handle<Map> map,
MaybeObject* Map::UpdateCodeCache(Name* name, Code* code) {
- ASSERT(!is_shared() || code->allowed_in_shared_map_code_cache());
-
// Allocate the code cache if not present.
if (code_cache()->IsFixedArray()) {
Object* result;
@@ -7320,11 +7495,10 @@ MaybeObject* CodeCache::UpdateNormalTypeCache(Name* name, Code* code) {
Object* CodeCache::Lookup(Name* name, Code::Flags flags) {
- if (Code::ExtractTypeFromFlags(flags) == Code::NORMAL) {
- return LookupNormalTypeCache(name, flags);
- } else {
- return LookupDefaultCache(name, flags);
- }
+ flags = Code::RemoveTypeFromFlags(flags);
+ Object* result = LookupDefaultCache(name, flags);
+ if (result->IsCode()) return result;
+ return LookupNormalTypeCache(name, flags);
}
@@ -7338,7 +7512,7 @@ Object* CodeCache::LookupDefaultCache(Name* name, Code::Flags flags) {
if (key->IsUndefined()) return key;
if (name->Equals(Name::cast(key))) {
Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset));
- if (code->flags() == flags) {
+ if (Code::RemoveTypeFromFlags(code->flags()) == flags) {
return code;
}
}
@@ -7402,9 +7576,7 @@ class CodeCacheHashTableKey : public HashTableKey {
: name_(name), flags_(flags), code_(NULL) { }
CodeCacheHashTableKey(Name* name, Code* code)
- : name_(name),
- flags_(code->flags()),
- code_(code) { }
+ : name_(name), flags_(code->flags()), code_(code) { }
bool IsMatch(Object* other) {
@@ -7676,7 +7848,7 @@ MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
accessor->AddElementsToFixedArray(array, array, this);
FixedArray* result;
if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
@@ -7694,7 +7866,7 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
accessor->AddElementsToFixedArray(NULL, NULL, this, other);
FixedArray* result;
if (!maybe_result->To(&result)) return maybe_result;
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < result->length(); i++) {
Object* current = result->get(i);
@@ -7706,11 +7878,11 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
}
-MaybeObject* FixedArray::CopySize(int new_length) {
+MaybeObject* FixedArray::CopySize(int new_length, PretenureFlag pretenure) {
Heap* heap = GetHeap();
if (new_length == 0) return heap->empty_fixed_array();
Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length, pretenure);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* result = FixedArray::cast(obj);
@@ -7798,6 +7970,20 @@ void DescriptorArray::CopyFrom(int dst_index,
}
+Handle<DescriptorArray> DescriptorArray::Merge(Handle<DescriptorArray> desc,
+ int verbatim,
+ int valid,
+ int new_size,
+ int modify_index,
+ StoreMode store_mode,
+ Handle<DescriptorArray> other) {
+ CALL_HEAP_FUNCTION(desc->GetIsolate(),
+ desc->Merge(verbatim, valid, new_size, modify_index,
+ store_mode, *other),
+ DescriptorArray);
+}
+
+
// Generalize the |other| descriptor array by merging it into the (at least
// partly) updated |this| descriptor array.
// The method merges two descriptor array in three parts. Both descriptor arrays
@@ -8145,11 +8331,6 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
}
-const uc16* String::GetTwoByteData() {
- return GetTwoByteData(0);
-}
-
-
const uc16* String::GetTwoByteData(unsigned start) {
ASSERT(!IsOneByteRepresentationUnderneath());
switch (StringShape(this).representation_tag()) {
@@ -8735,7 +8916,7 @@ bool String::SlowEquals(String* other) {
// Fast check: if hash code is computed for both strings
// a fast negative check can be performed.
if (HasHashCode() && other->HasHashCode()) {
-#ifdef DEBUG
+#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
if (Hash() != other->Hash()) {
bool found_difference = false;
@@ -8990,7 +9171,7 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
if (newspace->Contains(start_of_string) &&
newspace->top() == start_of_string + old_size) {
// Last allocated object in new space. Simply lower allocation top.
- *(newspace->allocation_top_address()) = start_of_string + new_size;
+ newspace->set_top(start_of_string + new_size);
} else {
// Sizes are pointer size aligned, so that we can use filler objects
// that are a multiple of pointer size.
@@ -9006,17 +9187,23 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
}
-AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object) {
+AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object,
+ bool in_GC) {
// Currently, AllocationMemento objects are only allocated immediately
- // after JSArrays in NewSpace, and detecting whether a JSArray has one
- // involves carefully checking the object immediately after the JSArray
- // (if there is one) to see if it's an AllocationMemento.
+ // after JSArrays and some JSObjects in NewSpace. Detecting whether a
+ // memento is present involves carefully checking the object immediately
+ // after the current object (if there is one) to see if it's an
+ // AllocationMemento.
if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) {
- ASSERT(object->GetHeap()->InToSpace(object));
Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) +
object->Size();
- if ((ptr_end + AllocationMemento::kSize) <=
- object->GetHeap()->NewSpaceTop()) {
+ Address top;
+ if (in_GC) {
+ top = object->GetHeap()->new_space()->FromSpacePageHigh();
+ } else {
+ top = object->GetHeap()->NewSpaceTop();
+ }
+ if ((ptr_end + AllocationMemento::kSize) <= top) {
// There is room in newspace for allocation info. Do we have some?
Map** possible_allocation_memento_map =
reinterpret_cast<Map**>(ptr_end);
@@ -9024,7 +9211,9 @@ AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object) {
object->GetHeap()->allocation_memento_map()) {
AllocationMemento* memento = AllocationMemento::cast(
reinterpret_cast<Object*>(ptr_end + kHeapObjectTag));
- return memento;
+ if (memento->IsValid()) {
+ return memento;
+ }
}
}
}
@@ -9130,7 +9319,7 @@ void String::PrintOn(FILE* file) {
static void TrimEnumCache(Heap* heap, Map* map, DescriptorArray* descriptors) {
int live_enum = map->EnumLength();
- if (live_enum == Map::kInvalidEnumCache) {
+ if (live_enum == kInvalidEnumCacheSentinel) {
live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
}
if (live_enum == 0) return descriptors->ClearEnumCache();
@@ -9221,6 +9410,7 @@ void Map::ClearNonLiveTransitions(Heap* heap) {
if (number_of_own_descriptors > 0) {
TrimDescriptorArray(heap, this, descriptors, number_of_own_descriptors);
ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ set_owns_descriptors(true);
} else {
ASSERT(descriptors == GetHeap()->empty_descriptor_array());
}
@@ -9277,6 +9467,16 @@ bool Map::EquivalentToForNormalization(Map* other,
}
+void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) {
+ int first_ptr_offset = OffsetOfElementAt(first_ptr_index());
+ int last_ptr_offset =
+ OffsetOfElementAt(first_ptr_index() + count_of_ptr_entries());
+ v->VisitPointers(
+ HeapObject::RawField(this, first_ptr_offset),
+ HeapObject::RawField(this, last_ptr_offset));
+}
+
+
void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
// Iterate over all fields in the body but take care in dealing with
// the code entry.
@@ -9303,7 +9503,7 @@ void JSFunction::MarkForConcurrentRecompilation() {
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
ASSERT(!shared()->is_generator());
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(GetIsolate()->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
PrintName();
@@ -9321,7 +9521,7 @@ void JSFunction::MarkInRecompileQueue() {
ASSERT(!GetIsolate()->DebuggerHasBreakPoints());
ASSERT(IsMarkedForConcurrentRecompilation() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(GetIsolate()->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queueing ");
PrintName();
@@ -9537,20 +9737,6 @@ bool JSFunction::EnsureCompiled(Handle<JSFunction> function,
}
-bool JSFunction::IsInlineable() {
- if (IsBuiltin()) return false;
- SharedFunctionInfo* shared_info = shared();
- // Check that the function has a script associated with it.
- if (!shared_info->script()->IsScript()) return false;
- if (shared_info->optimization_disabled()) return false;
- Code* code = shared_info->code();
- if (code->kind() == Code::OPTIMIZED_FUNCTION) return true;
- // If we never ran this (unlikely) then lets try to optimize it.
- if (code->kind() != Code::FUNCTION) return true;
- return code->optimizable();
-}
-
-
void JSObject::OptimizeAsPrototype(Handle<JSObject> object) {
if (object->IsGlobalObject()) return;
@@ -9694,6 +9880,48 @@ void JSFunction::RemovePrototype() {
}
+void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
+ if (function->has_initial_map()) return;
+ Isolate* isolate = function->GetIsolate();
+
+ // First create a new map with the size and number of in-object properties
+ // suggested by the function.
+ InstanceType instance_type;
+ int instance_size;
+ int in_object_properties;
+ if (function->shared()->is_generator()) {
+ instance_type = JS_GENERATOR_OBJECT_TYPE;
+ instance_size = JSGeneratorObject::kSize;
+ in_object_properties = 0;
+ } else {
+ instance_type = JS_OBJECT_TYPE;
+ instance_size = function->shared()->CalculateInstanceSize();
+ in_object_properties = function->shared()->CalculateInObjectProperties();
+ }
+ Handle<Map> map = isolate->factory()->NewMap(instance_type, instance_size);
+
+ // Fetch or allocate prototype.
+ Handle<Object> prototype;
+ if (function->has_instance_prototype()) {
+ prototype = handle(function->instance_prototype(), isolate);
+ } else {
+ prototype = isolate->factory()->NewFunctionPrototype(function);
+ }
+ map->set_inobject_properties(in_object_properties);
+ map->set_unused_property_fields(in_object_properties);
+ map->set_prototype(*prototype);
+ ASSERT(map->has_fast_object_elements());
+
+ if (!function->shared()->is_generator()) {
+ function->shared()->StartInobjectSlackTracking(*map);
+ }
+
+ // Finally link initial map and constructor function.
+ function->set_initial_map(*map);
+ map->set_constructor(*function);
+}
+
+
void JSFunction::SetInstanceClassName(String* name) {
shared()->set_instance_class_name(name);
}
@@ -9722,9 +9950,13 @@ bool JSFunction::PassesFilter(const char* raw_filter) {
String* name = shared()->DebugName();
Vector<const char> filter = CStrVector(raw_filter);
if (filter.length() == 0) return name->length() == 0;
- if (filter[0] != '-' && name->IsUtf8EqualTo(filter)) return true;
- if (filter[0] == '-' &&
- !name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+ if (filter[0] == '-') {
+ if (filter.length() == 1) {
+ return (name->length() != 0);
+ } else if (!name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+ return true;
+ }
+ } else if (name->IsUtf8EqualTo(filter)) {
return true;
}
if (filter[filter.length() - 1] == '*' &&
@@ -9768,7 +10000,18 @@ bool SharedFunctionInfo::HasSourceCode() {
Handle<Object> SharedFunctionInfo::GetSourceCode() {
if (!HasSourceCode()) return GetIsolate()->factory()->undefined_value();
Handle<String> source(String::cast(Script::cast(script())->source()));
- return SubString(source, start_position(), end_position());
+ return GetIsolate()->factory()->NewSubString(
+ source, start_position(), end_position());
+}
+
+
+bool SharedFunctionInfo::IsInlineable() {
+ // Check that the function has a script associated with it.
+ if (!script()->IsScript()) return false;
+ if (optimization_disabled()) return false;
+ // If we never ran this (unlikely) then lets try to optimize it.
+ if (code()->kind() != Code::FUNCTION) return true;
+ return code()->optimizable();
}
@@ -10122,13 +10365,14 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- VisitPointer(rinfo->target_object_address());
+ Object* p = rinfo->target_object();
+ VisitPointer(&p);
}
void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
- Address* p = rinfo->target_reference_address();
- VisitExternalReferences(p, p + 1);
+ Address p = rinfo->target_reference();
+ VisitExternalReference(&p);
}
@@ -10137,6 +10381,18 @@ void Code::InvalidateRelocation() {
}
+void Code::InvalidateEmbeddedObjects() {
+ Object* undefined = GetHeap()->undefined_value();
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
+ }
+ }
+}
+
+
void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta);
@@ -10185,6 +10441,10 @@ void Code::CopyFrom(const CodeDesc& desc) {
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER);
+ } else if (mode == RelocInfo::CODE_AGE_SEQUENCE) {
+ Handle<Object> p = it.rinfo()->code_age_stub_handle(origin);
+ Code* code = Code::cast(*p);
+ it.rinfo()->set_code_age_stub(code);
} else {
it.rinfo()->apply(delta);
}
@@ -10281,7 +10541,7 @@ Map* Code::FindFirstMap() {
void Code::ReplaceNthObject(int n,
Map* match_map,
Object* replace_with) {
- ASSERT(is_inline_cache_stub());
+ ASSERT(is_inline_cache_stub() || is_handler());
DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
@@ -10307,7 +10567,23 @@ void Code::FindAllMaps(MapHandleList* maps) {
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Object* object = info->target_object();
- if (object->IsMap()) maps->Add(Handle<Map>(Map::cast(object)));
+ if (object->IsMap()) maps->Add(handle(Map::cast(object)));
+ }
+}
+
+
+void Code::FindAllTypes(TypeHandleList* types) {
+ ASSERT(is_inline_cache_stub());
+ DisallowHeapAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ Isolate* isolate = GetIsolate();
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Object* object = info->target_object();
+ if (object->IsMap()) {
+ Handle<Map> map(Map::cast(object));
+ types->Add(handle(IC::MapToType(map), isolate));
+ }
}
}
@@ -10317,31 +10593,35 @@ void Code::ReplaceFirstMap(Map* replace_with) {
}
-Code* Code::FindFirstCode() {
+Code* Code::FindFirstHandler() {
ASSERT(is_inline_cache_stub());
DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
- return Code::GetCodeFromTargetAddress(info->target_address());
+ Code* code = Code::GetCodeFromTargetAddress(info->target_address());
+ if (code->kind() == Code::HANDLER) return code;
}
return NULL;
}
-void Code::FindAllCode(CodeHandleList* code_list, int length) {
+bool Code::FindHandlers(CodeHandleList* code_list, int length) {
ASSERT(is_inline_cache_stub());
DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
int i = 0;
for (RelocIterator it(this, mask); !it.done(); it.next()) {
- if (i++ == length) return;
+ if (i == length) return true;
RelocInfo* info = it.rinfo();
Code* code = Code::GetCodeFromTargetAddress(info->target_address());
- ASSERT(code->kind() == Code::STUB);
+ // IC stubs with handlers never contain non-handler code objects before
+ // handler targets.
+ if (code->kind() != Code::HANDLER) break;
code_list->Add(Handle<Code>(code));
+ i++;
}
- UNREACHABLE();
+ return i == length;
}
@@ -10374,6 +10654,16 @@ void Code::ReplaceNthCell(int n, Cell* replace_with) {
void Code::ClearInlineCaches() {
+ ClearInlineCaches(NULL);
+}
+
+
+void Code::ClearInlineCaches(Code::Kind kind) {
+ ClearInlineCaches(&kind);
+}
+
+
+void Code::ClearInlineCaches(Code::Kind* kind) {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID) |
@@ -10382,7 +10672,9 @@ void Code::ClearInlineCaches() {
RelocInfo* info = it.rinfo();
Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
if (target->is_inline_cache_stub()) {
- IC::Clear(this->GetIsolate(), info->pc());
+ if (kind == NULL || *kind == target->kind()) {
+ IC::Clear(this->GetIsolate(), info->pc());
+ }
}
}
}
@@ -10409,24 +10701,34 @@ void Code::ClearTypeFeedbackCells(Heap* heap) {
BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
DisallowHeapAllocation no_gc;
ASSERT(kind() == FUNCTION);
- for (FullCodeGenerator::BackEdgeTableIterator it(this, &no_gc);
- !it.Done();
- it.Next()) {
- if (it.pc_offset() == pc_offset) return it.ast_id();
+ BackEdgeTable back_edges(this, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (back_edges.pc_offset(i) == pc_offset) return back_edges.ast_id(i);
}
return BailoutId::None();
}
-bool Code::allowed_in_shared_map_code_cache() {
- return is_keyed_load_stub() || is_keyed_store_stub() ||
- (is_compare_ic_stub() &&
- ICCompareStub::CompareState(stub_info()) == CompareIC::KNOWN_OBJECT);
+void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
+ PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge, NO_MARKING_PARITY);
+}
+
+
+void Code::MarkCodeAsExecuted(byte* sequence, Isolate* isolate) {
+ PatchPlatformCodeAge(isolate, sequence, kExecutedOnceCodeAge,
+ NO_MARKING_PARITY);
}
-void Code::MakeCodeAgeSequenceYoung(byte* sequence) {
- PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY);
+static Code::Age EffectiveAge(Code::Age age) {
+ if (age == Code::kNotExecutedCodeAge) {
+ // Treat that's never been executed as old immediately.
+ age = Code::kIsOldCodeAge;
+ } else if (age == Code::kExecutedOnceCodeAge) {
+ // Pre-age code that has only been executed once.
+ age = Code::kPreAgedCodeAge;
+ }
+ return age;
}
@@ -10436,8 +10738,11 @@ void Code::MakeOlder(MarkingParity current_parity) {
Age age;
MarkingParity code_parity;
GetCodeAgeAndParity(sequence, &age, &code_parity);
+ age = EffectiveAge(age);
if (age != kLastCodeAge && code_parity != current_parity) {
- PatchPlatformCodeAge(sequence, static_cast<Age>(age + 1),
+ PatchPlatformCodeAge(GetIsolate(),
+ sequence,
+ static_cast<Age>(age + 1),
current_parity);
}
}
@@ -10445,18 +10750,13 @@ void Code::MakeOlder(MarkingParity current_parity) {
bool Code::IsOld() {
- byte* sequence = FindCodeAgeSequence();
- if (sequence == NULL) return false;
- Age age;
- MarkingParity parity;
- GetCodeAgeAndParity(sequence, &age, &parity);
- return age >= kSexagenarianCodeAge;
+ return GetAge() >= kIsOldCodeAge;
}
byte* Code::FindCodeAgeSequence() {
return FLAG_age_code &&
- prologue_offset() != kPrologueOffsetNotSet &&
+ prologue_offset() != Code::kPrologueOffsetNotSet &&
(kind() == OPTIMIZED_FUNCTION ||
(kind() == FUNCTION && !has_debug_break_slots()))
? instruction_start() + prologue_offset()
@@ -10464,10 +10764,15 @@ byte* Code::FindCodeAgeSequence() {
}
-int Code::GetAge() {
+Code::Age Code::GetAge() {
+ return EffectiveAge(GetRawAge());
+}
+
+
+Code::Age Code::GetRawAge() {
byte* sequence = FindCodeAgeSequence();
if (sequence == NULL) {
- return Code::kNoAge;
+ return kNoAgeCodeAge;
}
Age age;
MarkingParity parity;
@@ -10496,12 +10801,23 @@ void Code::GetCodeAgeAndParity(Code* code, Age* age,
}
CODE_AGE_LIST(HANDLE_CODE_AGE)
#undef HANDLE_CODE_AGE
+ stub = *builtins->MarkCodeAsExecutedOnce();
+ if (code == stub) {
+ *age = kNotExecutedCodeAge;
+ *parity = NO_MARKING_PARITY;
+ return;
+ }
+ stub = *builtins->MarkCodeAsExecutedTwice();
+ if (code == stub) {
+ *age = kExecutedOnceCodeAge;
+ *parity = NO_MARKING_PARITY;
+ return;
+ }
UNREACHABLE();
}
-Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) {
- Isolate* isolate = Isolate::Current();
+Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
Builtins* builtins = isolate->builtins();
switch (age) {
#define HANDLE_CODE_AGE(AGE) \
@@ -10513,6 +10829,14 @@ Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) {
}
CODE_AGE_LIST(HANDLE_CODE_AGE)
#undef HANDLE_CODE_AGE
+ case kNotExecutedCodeAge: {
+ ASSERT(parity == NO_MARKING_PARITY);
+ return *builtins->MarkCodeAsExecutedOnce();
+ }
+ case kExecutedOnceCodeAge: {
+ ASSERT(parity == NO_MARKING_PARITY);
+ return *builtins->MarkCodeAsExecutedTwice();
+ }
default:
UNREACHABLE();
break;
@@ -10521,7 +10845,7 @@ Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) {
}
-void Code::PrintDeoptLocation(int bailout_id) {
+void Code::PrintDeoptLocation(FILE* out, int bailout_id) {
const char* last_comment = NULL;
int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
| RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
@@ -10535,7 +10859,7 @@ void Code::PrintDeoptLocation(int bailout_id) {
(bailout_id == Deoptimizer::GetDeoptimizationId(
GetIsolate(), info->target_address(), Deoptimizer::SOFT))) {
CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
- PrintF(" %s\n", last_comment);
+ PrintF(out, " %s\n", last_comment);
return;
}
}
@@ -10737,10 +11061,10 @@ void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
this->DeoptPoints());
if (this->DeoptPoints() == 0) return;
- PrintF("%6s %8s %s\n", "ast id", "pc", "state");
+ PrintF(out, "%6s %8s %s\n", "ast id", "pc", "state");
for (int i = 0; i < this->DeoptPoints(); i++) {
int pc_and_state = this->PcAndState(i)->value();
- PrintF("%6d %8d %s\n",
+ PrintF(out, "%6d %8d %s\n",
this->AstId(i).ToInt(),
FullCodeGenerator::PcField::decode(pc_and_state),
FullCodeGenerator::State2String(
@@ -10768,12 +11092,7 @@ const char* Code::ICState2String(InlineCacheState state) {
const char* Code::StubType2String(StubType type) {
switch (type) {
case NORMAL: return "NORMAL";
- case FIELD: return "FIELD";
- case CONSTANT: return "CONSTANT";
- case CALLBACKS: return "CALLBACKS";
- case INTERCEPTOR: return "INTERCEPTOR";
- case MAP_TRANSITION: return "MAP_TRANSITION";
- case NONEXISTENT: return "NONEXISTENT";
+ case FAST: return "FAST";
}
UNREACHABLE(); // keep the compiler happy
return NULL;
@@ -10808,6 +11127,10 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "kind = %s\n", Kind2String(kind()));
+ if (has_major_key()) {
+ PrintF(out, "major_key = %s\n",
+ CodeStub::MajorName(CodeStub::GetMajorKey(this), true));
+ }
if (is_inline_cache_stub()) {
PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
PrintExtraICState(out, kind(), needs_extended_extra_ic_state(kind()) ?
@@ -10851,7 +11174,7 @@ void Code::Disassemble(const char* name, FILE* out) {
DeoptimizationInputData::cast(this->deoptimization_data());
data->DeoptimizationInputDataPrint(out);
}
- PrintF("\n");
+ PrintF(out, "\n");
if (is_crankshafted()) {
SafepointTable table(this);
@@ -10859,7 +11182,7 @@ void Code::Disassemble(const char* name, FILE* out) {
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
PrintF(out, "%p %4d ", (instruction_start() + pc_offset), pc_offset);
- table.PrintEntry(i);
+ table.PrintEntry(i, out);
PrintF(out, " (sp -> fp)");
SafepointEntry entry = table.GetEntry(i);
if (entry.deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
@@ -10879,15 +11202,15 @@ void Code::Disassemble(const char* name, FILE* out) {
// (due to alignment) the end of the instruction stream.
if (static_cast<int>(offset) < instruction_size()) {
DisallowHeapAllocation no_gc;
- FullCodeGenerator::BackEdgeTableIterator back_edges(this, &no_gc);
+ BackEdgeTable back_edges(this, &no_gc);
- PrintF(out, "Back edges (size = %u)\n", back_edges.table_length());
+ PrintF(out, "Back edges (size = %u)\n", back_edges.length());
PrintF(out, "ast_id pc_offset loop_depth\n");
- for ( ; !back_edges.Done(); back_edges.Next()) {
- PrintF(out, "%6d %9u %10u\n", back_edges.ast_id().ToInt(),
- back_edges.pc_offset(),
- back_edges.loop_depth());
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ PrintF(out, "%6d %9u %10u\n", back_edges.ast_id(i).ToInt(),
+ back_edges.pc_offset(i),
+ back_edges.loop_depth(i));
}
PrintF(out, "\n");
@@ -10900,7 +11223,7 @@ void Code::Disassemble(const char* name, FILE* out) {
#endif
}
- PrintF("RelocInfo (size = %d)\n", relocation_size());
+ PrintF(out, "RelocInfo (size = %d)\n", relocation_size());
for (RelocIterator it(this); !it.done(); it.next()) {
it.rinfo()->Print(GetIsolate(), out);
}
@@ -10909,6 +11232,18 @@ void Code::Disassemble(const char* name, FILE* out) {
#endif // ENABLE_DISASSEMBLER
+Handle<FixedArray> JSObject::SetFastElementsCapacityAndLength(
+ Handle<JSObject> object,
+ int capacity,
+ int length,
+ SetFastElementsCapacitySmiMode smi_mode) {
+ CALL_HEAP_FUNCTION(
+ object->GetIsolate(),
+ object->SetFastElementsCapacityAndLength(capacity, length, smi_mode),
+ FixedArray);
+}
+
+
MaybeObject* JSObject::SetFastElementsCapacityAndLength(
int capacity,
int length,
@@ -10916,7 +11251,6 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
- ASSERT(!map()->is_observed());
// Allocate a new fast elements backing store.
FixedArray* new_elements;
@@ -10958,6 +11292,10 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
}
ValidateElements();
set_map_and_elements(new_map, new_elements);
+
+ // Transition through the allocation site as well if present.
+ maybe_obj = UpdateAllocationSite(new_elements_kind);
+ if (maybe_obj->IsFailure()) return maybe_obj;
} else {
FixedArray* parameter_map = FixedArray::cast(old_elements);
parameter_map->set(1, new_elements);
@@ -10975,13 +11313,38 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
}
+bool Code::IsWeakEmbeddedObject(Kind kind, Object* object) {
+ if (kind != Code::OPTIMIZED_FUNCTION) return false;
+
+ if (object->IsMap()) {
+ return Map::cast(object)->CanTransition() &&
+ FLAG_collect_maps &&
+ FLAG_weak_embedded_maps_in_optimized_code;
+ }
+
+ if (object->IsJSObject()) {
+ return FLAG_weak_embedded_objects_in_optimized_code;
+ }
+
+ return false;
+}
+
+
+void JSObject::SetFastDoubleElementsCapacityAndLength(Handle<JSObject> object,
+ int capacity,
+ int length) {
+ CALL_HEAP_FUNCTION_VOID(
+ object->GetIsolate(),
+ object->SetFastDoubleElementsCapacityAndLength(capacity, length));
+}
+
+
MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
int capacity,
int length) {
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
- ASSERT(!map()->is_observed());
FixedArrayBase* elems;
{ MaybeObject* maybe_obj =
@@ -11130,10 +11493,6 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
if (!new_length_handle->ToArrayIndex(&new_length))
return Failure::InternalError();
- // Observed arrays should always be in dictionary mode;
- // if they were in fast mode, the below is slower than necessary
- // as it iterates over the array backing store multiple times.
- ASSERT(self->HasDictionaryElements());
static const PropertyAttributes kNoAttrFilter = NONE;
int num_elements = self->NumberOfLocalElements(kNoAttrFilter);
if (num_elements > 0) {
@@ -11144,6 +11503,8 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
}
} else {
// For sparse arrays, only iterate over existing elements.
+ // TODO(rafaelw): For fast, sparse arrays, we can avoid iterating over
+ // the to-be-removed indices twice.
Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
self->GetLocalElementKeys(*keys, kNoAttrFilter);
while (num_elements-- > 0) {
@@ -11166,11 +11527,11 @@ MaybeObject* JSArray::SetElementsLength(Object* len) {
for (int i = 0; i < indices.length(); ++i) {
JSObject::EnqueueChangeRecord(
- self, "deleted", isolate->factory()->Uint32ToString(indices[i]),
+ self, "delete", isolate->factory()->Uint32ToString(indices[i]),
old_values[i]);
}
JSObject::EnqueueChangeRecord(
- self, "updated", isolate->factory()->length_string(),
+ self, "update", isolate->factory()->length_string(),
old_length_handle);
EndPerformSplice(self);
@@ -11248,7 +11609,7 @@ Handle<Map> Map::PutPrototypeTransition(Handle<Map> map,
cache->set(entry + kProtoTransitionPrototypeOffset, *prototype);
cache->set(entry + kProtoTransitionMapOffset, *target_map);
- map->SetNumberOfProtoTransitions(transitions);
+ map->SetNumberOfProtoTransitions(last + 1);
return map;
}
@@ -11312,6 +11673,9 @@ DependentCode* DependentCode::ForObject(Handle<HeapObject> object,
AllowDeferredHandleDereference dependencies_are_safe;
if (group == DependentCode::kPropertyCellChangedGroup) {
return Handle<PropertyCell>::cast(object)->dependent_code();
+ } else if (group == DependentCode::kAllocationSiteTenuringChangedGroup ||
+ group == DependentCode::kAllocationSiteTransitionChangedGroup) {
+ return Handle<AllocationSite>::cast(object)->dependent_code();
}
return Handle<Map>::cast(object)->dependent_code();
}
@@ -11333,7 +11697,7 @@ Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
int capacity = kCodesStartIndex + number_of_entries + 1;
if (capacity > 5) capacity = capacity * 5 / 4;
Handle<DependentCode> new_entries = Handle<DependentCode>::cast(
- factory->CopySizeFixedArray(entries, capacity));
+ factory->CopySizeFixedArray(entries, capacity, TENURED));
// The number of codes can change after GC.
starts.Recompute(*entries);
start = starts.at(group);
@@ -11516,6 +11880,8 @@ Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
}
}
+ bool dictionary_elements_in_chain =
+ object->map()->DictionaryElementsInPrototypeChainOnly();
Handle<JSObject> real_receiver = object;
if (skip_hidden_prototypes) {
@@ -11548,6 +11914,14 @@ Handle<Object> JSObject::SetPrototype(Handle<JSObject> object,
ASSERT(new_map->prototype() == *value);
real_receiver->set_map(*new_map);
+ if (!dictionary_elements_in_chain &&
+ new_map->DictionaryElementsInPrototypeChainOnly()) {
+ // If the prototype chain didn't previously have element callbacks, then
+ // KeyedStoreICs need to be cleared to ensure any that involve this
+ // map go generic.
+ object->GetHeap()->ClearAllICsByKind(Code::KEYED_STORE_IC);
+ }
+
heap->ClearInstanceofCache();
ASSERT(size == object->Size());
return value;
@@ -11567,22 +11941,6 @@ MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
}
-PropertyType JSObject::GetLocalPropertyType(Name* name) {
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- return GetLocalElementType(index);
- }
- LookupResult lookup(GetIsolate());
- LocalLookup(name, &lookup, true);
- return lookup.type();
-}
-
-
-PropertyType JSObject::GetLocalElementType(uint32_t index) {
- return GetElementsAccessor()->GetType(this, this, index);
-}
-
-
AccessorPair* JSObject::GetLocalPropertyAccessorPair(Name* name) {
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
@@ -11615,42 +11973,38 @@ AccessorPair* JSObject::GetLocalElementAccessorPair(uint32_t index) {
}
-MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSObject::SetElementWithInterceptor(
+ Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool check_prototype,
+ SetPropertyMode set_mode) {
+ Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSObject> this_handle(this);
- Handle<Object> value_handle(value, isolate);
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
if (!interceptor->setter()->IsUndefined()) {
v8::IndexedPropertySetterCallback setter =
v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
- PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
+ ApiIndexedPropertyAccess("interceptor-indexed-set", *object, index));
+ PropertyCallbackArguments args(isolate, interceptor->data(), *object,
+ *object);
v8::Handle<v8::Value> result =
- args.Call(setter, index, v8::Utils::ToLocal(value_handle));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *value_handle;
- }
- MaybeObject* raw_result =
- this_handle->SetElementWithoutInterceptor(index,
- *value_handle,
- attributes,
- strict_mode,
- check_prototype,
- set_mode);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
+ args.Call(setter, index, v8::Utils::ToLocal(value));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (!result.IsEmpty()) return value;
+ }
+
+ return SetElementWithoutInterceptor(object, index, value, attributes,
+ strict_mode,
+ check_prototype,
+ set_mode);
}
@@ -11707,18 +12061,17 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
}
-MaybeObject* JSObject::SetElementWithCallback(Object* structure,
- uint32_t index,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
+ Handle<Object> structure,
+ uint32_t index,
+ Handle<Object> value,
+ Handle<JSObject> holder,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value, isolate);
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -11727,41 +12080,40 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
if (structure->IsExecutableAccessorInfo()) {
// api style callbacks
- Handle<JSObject> self(this);
- Handle<JSObject> holder_handle(JSObject::cast(holder));
- Handle<ExecutableAccessorInfo> data(
- ExecutableAccessorInfo::cast(structure));
+ Handle<ExecutableAccessorInfo> data =
+ Handle<ExecutableAccessorInfo>::cast(structure);
Object* call_obj = data->setter();
v8::AccessorSetterCallback call_fun =
v8::ToCData<v8::AccessorSetterCallback>(call_obj);
if (call_fun == NULL) return value;
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> key(isolate->factory()->NumberToString(number));
- LOG(isolate, ApiNamedPropertyAccess("store", *self, *key));
+ LOG(isolate, ApiNamedPropertyAccess("store", *object, *key));
PropertyCallbackArguments
- args(isolate, data->data(), *self, *holder_handle);
+ args(isolate, data->data(), *object, *holder);
args.Call(call_fun,
v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
+ v8::Utils::ToLocal(value));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
if (structure->IsAccessorPair()) {
- Handle<Object> setter(AccessorPair::cast(structure)->setter(), isolate);
+ Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
if (setter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
+ return SetPropertyWithDefinedSetter(
+ object, Handle<JSReceiver>::cast(setter), value);
} else {
if (strict_mode == kNonStrictMode) {
return value;
}
- Handle<Object> holder_handle(holder, isolate);
Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
- Handle<Object> args[2] = { key, holder_handle };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
+ Handle<Object> args[2] = { key, holder };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "no_setter_in_callback", HandleVector(args, 2));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
@@ -11769,7 +12121,7 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
if (structure->IsDeclaredAccessorInfo()) return value;
UNREACHABLE();
- return NULL;
+ return Handle<Object>();
}
@@ -11800,41 +12152,39 @@ bool JSObject::HasDictionaryArgumentsElements() {
// Adding n elements in fast case is O(n*n).
// Note: revisit design to have dual undefined values to capture absent
// elements.
-MaybeObject* JSObject::SetFastElement(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype) {
- ASSERT(HasFastSmiOrObjectElements() ||
- HasFastArgumentsElements());
+Handle<Object> JSObject::SetFastElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode,
+ bool check_prototype) {
+ ASSERT(object->HasFastSmiOrObjectElements() ||
+ object->HasFastArgumentsElements());
+
+ Isolate* isolate = object->GetIsolate();
// Array optimizations rely on the prototype lookups of Array objects always
// returning undefined. If there is a store to the initial prototype object,
// make sure all of these optimizations are invalidated.
- Isolate* isolate(GetIsolate());
- if (isolate->is_initial_object_prototype(this) ||
- isolate->is_initial_array_prototype(this)) {
- HandleScope scope(GetIsolate());
- map()->dependent_code()->DeoptimizeDependentCodeGroup(
- GetIsolate(),
+ if (isolate->is_initial_object_prototype(*object) ||
+ isolate->is_initial_array_prototype(*object)) {
+ object->map()->dependent_code()->DeoptimizeDependentCodeGroup(isolate,
DependentCode::kElementsCantBeAddedGroup);
}
- FixedArray* backing_store = FixedArray::cast(elements());
- if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
- backing_store = FixedArray::cast(backing_store->get(1));
+ Handle<FixedArray> backing_store(FixedArray::cast(object->elements()));
+ if (backing_store->map() ==
+ isolate->heap()->non_strict_arguments_elements_map()) {
+ backing_store = handle(FixedArray::cast(backing_store->get(1)));
} else {
- MaybeObject* maybe = EnsureWritableFastElements();
- if (!maybe->To(&backing_store)) return maybe;
+ backing_store = EnsureWritableFastElements(object);
}
uint32_t capacity = static_cast<uint32_t>(backing_store->length());
if (check_prototype &&
(index >= capacity || backing_store->get(index)->IsTheHole())) {
bool found;
- MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
- value,
- &found,
- strict_mode);
+ Handle<Object> result = SetElementWithCallbackSetterInPrototypes(
+ object, index, value, &found, strict_mode);
if (found) return result;
}
@@ -11843,8 +12193,8 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
uint32_t array_length = 0;
bool must_update_array_length = false;
bool introduces_holes = true;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+ if (object->IsJSArray()) {
+ CHECK(Handle<JSArray>::cast(object)->length()->ToArrayIndex(&array_length));
introduces_holes = index > array_length;
if (index >= array_length) {
must_update_array_length = true;
@@ -11856,13 +12206,12 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
// If the array is growing, and it's not growth by a single element at the
// end, make sure that the ElementsKind is HOLEY.
- ElementsKind elements_kind = GetElementsKind();
+ ElementsKind elements_kind = object->GetElementsKind();
if (introduces_holes &&
IsFastElementsKind(elements_kind) &&
!IsFastHoleyElementsKind(elements_kind)) {
ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
- MaybeObject* maybe = TransitionElementsKind(transitioned_kind);
- if (maybe->IsFailure()) return maybe;
+ TransitionElementsKind(object, transitioned_kind);
}
// Check if the capacity of the backing store needs to be increased, or if
@@ -11872,104 +12221,91 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
if ((index - capacity) < kMaxGap) {
new_capacity = NewElementsCapacity(index + 1);
ASSERT(new_capacity > index);
- if (!ShouldConvertToSlowElements(new_capacity)) {
+ if (!object->ShouldConvertToSlowElements(new_capacity)) {
convert_to_slow = false;
}
}
if (convert_to_slow) {
- MaybeObject* result = NormalizeElements();
- if (result->IsFailure()) return result;
- return SetDictionaryElement(index, value, NONE, strict_mode,
+ NormalizeElements(object);
+ return SetDictionaryElement(object, index, value, NONE, strict_mode,
check_prototype);
}
}
// Convert to fast double elements if appropriate.
- if (HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) {
+ if (object->HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) {
// Consider fixing the boilerplate as well if we have one.
ElementsKind to_kind = IsHoleyElementsKind(elements_kind)
? FAST_HOLEY_DOUBLE_ELEMENTS
: FAST_DOUBLE_ELEMENTS;
- MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ UpdateAllocationSite(object, to_kind);
- MaybeObject* maybe =
- SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
- if (maybe->IsFailure()) return maybe;
- FixedDoubleArray::cast(elements())->set(index, value->Number());
- ValidateElements();
+ SetFastDoubleElementsCapacityAndLength(object, new_capacity, array_length);
+ FixedDoubleArray::cast(object->elements())->set(index, value->Number());
+ object->ValidateElements();
return value;
}
// Change elements kind from Smi-only to generic FAST if necessary.
- if (HasFastSmiElements() && !value->IsSmi()) {
- Map* new_map;
- ElementsKind kind = HasFastHoleyElements()
+ if (object->HasFastSmiElements() && !value->IsSmi()) {
+ ElementsKind kind = object->HasFastHoleyElements()
? FAST_HOLEY_ELEMENTS
: FAST_ELEMENTS;
- MaybeObject* maybe_failure = UpdateAllocationSite(kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
-
- MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
- kind);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- set_map(new_map);
+ UpdateAllocationSite(object, kind);
+ Handle<Map> new_map = GetElementsTransitionMap(object, kind);
+ object->set_map(*new_map);
+ ASSERT(IsFastObjectElementsKind(object->GetElementsKind()));
}
// Increase backing store capacity if that's been decided previously.
if (new_capacity != capacity) {
- FixedArray* new_elements;
SetFastElementsCapacitySmiMode smi_mode =
- value->IsSmi() && HasFastSmiElements()
+ value->IsSmi() && object->HasFastSmiElements()
? kAllowSmiElements
: kDontAllowSmiElements;
- { MaybeObject* maybe =
- SetFastElementsCapacityAndLength(new_capacity,
- array_length,
- smi_mode);
- if (!maybe->To(&new_elements)) return maybe;
- }
- new_elements->set(index, value);
- ValidateElements();
+ Handle<FixedArray> new_elements =
+ SetFastElementsCapacityAndLength(object, new_capacity, array_length,
+ smi_mode);
+ new_elements->set(index, *value);
+ object->ValidateElements();
return value;
}
// Finally, set the new element and length.
- ASSERT(elements()->IsFixedArray());
- backing_store->set(index, value);
+ ASSERT(object->elements()->IsFixedArray());
+ backing_store->set(index, *value);
if (must_update_array_length) {
- JSArray::cast(this)->set_length(Smi::FromInt(array_length));
+ Handle<JSArray>::cast(object)->set_length(Smi::FromInt(array_length));
}
return value;
}
-MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
- Handle<JSObject> self(this);
- Handle<Object> value(value_raw, isolate);
+Handle<Object> JSObject::SetDictionaryElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool check_prototype,
+ SetPropertyMode set_mode) {
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements());
+ Isolate* isolate = object->GetIsolate();
// Insert element in the dictionary.
- Handle<FixedArray> elements(FixedArray::cast(this->elements()));
+ Handle<FixedArray> elements(FixedArray::cast(object->elements()));
bool is_arguments =
- (elements->map() == heap->non_strict_arguments_elements_map());
+ (elements->map() == isolate->heap()->non_strict_arguments_elements_map());
Handle<SeededNumberDictionary> dictionary(is_arguments
? SeededNumberDictionary::cast(elements->get(1))
: SeededNumberDictionary::cast(*elements));
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
+ Handle<Object> element(dictionary->ValueAt(entry), isolate);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
- return SetElementWithCallback(element, index, *value, this, strict_mode);
+ return SetElementWithCallback(object, element, index, value, object,
+ strict_mode);
} else {
dictionary->UpdateMaxNumberKey(index);
// If a value has not been initialized we allow writing to it even if it
@@ -11981,26 +12317,27 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
dictionary->DetailsAtPut(entry, details);
} else if (details.IsReadOnly() && !element->IsTheHole()) {
if (strict_mode == kNonStrictMode) {
- return isolate->heap()->undefined_value();
+ return isolate->factory()->undefined_value();
} else {
- Handle<Object> holder(this, isolate);
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[2] = { number, holder };
+ Handle<Object> args[2] = { number, object };
Handle<Object> error =
isolate->factory()->NewTypeError("strict_read_only_property",
HandleVector(args, 2));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
// Elements of the arguments object in slow mode might be slow aliases.
if (is_arguments && element->IsAliasedArgumentsEntry()) {
- AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(element);
- Context* context = Context::cast(elements->get(0));
+ Handle<AliasedArgumentsEntry> entry =
+ Handle<AliasedArgumentsEntry>::cast(element);
+ Handle<Context> context(Context::cast(elements->get(0)));
int context_index = entry->aliased_context_slot();
ASSERT(!context->get(context_index)->IsTheHole());
context->set(context_index, *value);
// For elements that are still writable we keep slow aliasing.
- if (!details.IsReadOnly()) value = handle(element, isolate);
+ if (!details.IsReadOnly()) value = element;
}
dictionary->ValueAtPut(entry, *value);
}
@@ -12009,15 +12346,16 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
// Can cause GC!
if (check_prototype) {
bool found;
- MaybeObject* result = SetElementWithCallbackSetterInPrototypes(
- index, *value, &found, strict_mode);
+ Handle<Object> result = SetElementWithCallbackSetterInPrototypes(object,
+ index, value, &found, strict_mode);
if (found) return result;
}
+
// When we set the is_extensible flag to false we always force the
// element into dictionary mode (and force them to stay there).
- if (!self->map()->is_extensible()) {
+ if (!object->map()->is_extensible()) {
if (strict_mode == kNonStrictMode) {
- return isolate->heap()->undefined_value();
+ return isolate->factory()->undefined_value();
} else {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> name = isolate->factory()->NumberToString(number);
@@ -12025,36 +12363,36 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
Handle<Object> error =
isolate->factory()->NewTypeError("object_not_extensible",
HandleVector(args, 1));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
- FixedArrayBase* new_dictionary;
+
PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
- MaybeObject* maybe = dictionary->AddNumberEntry(index, *value, details);
- if (!maybe->To(&new_dictionary)) return maybe;
- if (*dictionary != SeededNumberDictionary::cast(new_dictionary)) {
+ Handle<SeededNumberDictionary> new_dictionary =
+ SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
+ details);
+ if (*dictionary != *new_dictionary) {
if (is_arguments) {
- elements->set(1, new_dictionary);
+ elements->set(1, *new_dictionary);
} else {
- self->set_elements(new_dictionary);
+ object->set_elements(*new_dictionary);
}
- dictionary =
- handle(SeededNumberDictionary::cast(new_dictionary), isolate);
+ dictionary = new_dictionary;
}
}
// Update the array length if this JSObject is an array.
- if (self->IsJSArray()) {
- MaybeObject* result =
- JSArray::cast(*self)->JSArrayUpdateLengthFromIndex(index, *value);
- if (result->IsFailure()) return result;
+ if (object->IsJSArray()) {
+ JSArray::JSArrayUpdateLengthFromIndex(Handle<JSArray>::cast(object), index,
+ value);
}
// Attempt to put this object back in fast case.
- if (self->ShouldConvertToFastElements()) {
+ if (object->ShouldConvertToFastElements()) {
uint32_t new_length = 0;
- if (self->IsJSArray()) {
- CHECK(JSArray::cast(*self)->length()->ToArrayIndex(&new_length));
+ if (object->IsJSArray()) {
+ CHECK(Handle<JSArray>::cast(object)->length()->ToArrayIndex(&new_length));
} else {
new_length = dictionary->max_number_key() + 1;
}
@@ -12063,47 +12401,47 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
: kDontAllowSmiElements;
bool has_smi_only_elements = false;
bool should_convert_to_fast_double_elements =
- self->ShouldConvertToFastDoubleElements(&has_smi_only_elements);
+ object->ShouldConvertToFastDoubleElements(&has_smi_only_elements);
if (has_smi_only_elements) {
smi_mode = kForceSmiElements;
}
- MaybeObject* result = should_convert_to_fast_double_elements
- ? self->SetFastDoubleElementsCapacityAndLength(new_length, new_length)
- : self->SetFastElementsCapacityAndLength(
- new_length, new_length, smi_mode);
- self->ValidateElements();
- if (result->IsFailure()) return result;
+
+ if (should_convert_to_fast_double_elements) {
+ SetFastDoubleElementsCapacityAndLength(object, new_length, new_length);
+ } else {
+ SetFastElementsCapacityAndLength(object, new_length, new_length,
+ smi_mode);
+ }
+ object->ValidateElements();
#ifdef DEBUG
if (FLAG_trace_normalization) {
PrintF("Object elements are fast case again:\n");
- Print();
+ object->Print();
}
#endif
}
- return *value;
+ return value;
}
-
-MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
+Handle<Object> JSObject::SetFastDoubleElement(
+ Handle<JSObject> object,
uint32_t index,
- Object* value,
+ Handle<Object> value,
StrictModeFlag strict_mode,
bool check_prototype) {
- ASSERT(HasFastDoubleElements());
+ ASSERT(object->HasFastDoubleElements());
- FixedArrayBase* base_elms = FixedArrayBase::cast(elements());
+ Handle<FixedArrayBase> base_elms(FixedArrayBase::cast(object->elements()));
uint32_t elms_length = static_cast<uint32_t>(base_elms->length());
// If storing to an element that isn't in the array, pass the store request
// up the prototype chain before storing in the receiver's elements.
if (check_prototype &&
(index >= elms_length ||
- FixedDoubleArray::cast(base_elms)->is_the_hole(index))) {
+ Handle<FixedDoubleArray>::cast(base_elms)->is_the_hole(index))) {
bool found;
- MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
- value,
- &found,
- strict_mode);
+ Handle<Object> result = SetElementWithCallbackSetterInPrototypes(object,
+ index, value, &found, strict_mode);
if (found) return result;
}
@@ -12112,48 +12450,47 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
bool value_is_smi = value->IsSmi();
bool introduces_holes = true;
uint32_t length = elms_length;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ if (object->IsJSArray()) {
+ CHECK(Handle<JSArray>::cast(object)->length()->ToArrayIndex(&length));
introduces_holes = index > length;
} else {
introduces_holes = index >= elms_length;
}
if (!value->IsNumber()) {
- MaybeObject* maybe_obj = SetFastElementsCapacityAndLength(
- elms_length,
- length,
- kDontAllowSmiElements);
- if (maybe_obj->IsFailure()) return maybe_obj;
- maybe_obj = SetFastElement(index, value, strict_mode, check_prototype);
- if (maybe_obj->IsFailure()) return maybe_obj;
- ValidateElements();
- return maybe_obj;
+ SetFastElementsCapacityAndLength(object, elms_length, length,
+ kDontAllowSmiElements);
+ Handle<Object> result = SetFastElement(object, index, value, strict_mode,
+ check_prototype);
+ RETURN_IF_EMPTY_HANDLE_VALUE(object->GetIsolate(), result,
+ Handle<Object>());
+ object->ValidateElements();
+ return result;
}
double double_value = value_is_smi
- ? static_cast<double>(Smi::cast(value)->value())
- : HeapNumber::cast(value)->value();
+ ? static_cast<double>(Handle<Smi>::cast(value)->value())
+ : Handle<HeapNumber>::cast(value)->value();
// If the array is growing, and it's not growth by a single element at the
// end, make sure that the ElementsKind is HOLEY.
- ElementsKind elements_kind = GetElementsKind();
+ ElementsKind elements_kind = object->GetElementsKind();
if (introduces_holes && !IsFastHoleyElementsKind(elements_kind)) {
ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
- MaybeObject* maybe = TransitionElementsKind(transitioned_kind);
- if (maybe->IsFailure()) return maybe;
+ TransitionElementsKind(object, transitioned_kind);
}
// Check whether there is extra space in the fixed array.
if (index < elms_length) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
+ Handle<FixedDoubleArray> elms(FixedDoubleArray::cast(object->elements()));
elms->set(index, double_value);
- if (IsJSArray()) {
+ if (object->IsJSArray()) {
// Update the length of the array if needed.
uint32_t array_length = 0;
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+ CHECK(
+ Handle<JSArray>::cast(object)->length()->ToArrayIndex(&array_length));
if (index >= array_length) {
- JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
+ Handle<JSArray>::cast(object)->set_length(Smi::FromInt(index + 1));
}
}
return value;
@@ -12163,27 +12500,23 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
if ((index - elms_length) < kMaxGap) {
// Try allocating extra space.
int new_capacity = NewElementsCapacity(index+1);
- if (!ShouldConvertToSlowElements(new_capacity)) {
+ if (!object->ShouldConvertToSlowElements(new_capacity)) {
ASSERT(static_cast<uint32_t>(new_capacity) > index);
- MaybeObject* maybe_obj =
- SetFastDoubleElementsCapacityAndLength(new_capacity, index + 1);
- if (maybe_obj->IsFailure()) return maybe_obj;
- FixedDoubleArray::cast(elements())->set(index, double_value);
- ValidateElements();
+ SetFastDoubleElementsCapacityAndLength(object, new_capacity, index + 1);
+ FixedDoubleArray::cast(object->elements())->set(index, double_value);
+ object->ValidateElements();
return value;
}
}
// Otherwise default to slow case.
- ASSERT(HasFastDoubleElements());
- ASSERT(map()->has_fast_double_elements());
- ASSERT(elements()->IsFixedDoubleArray());
- Object* obj;
- { MaybeObject* maybe_obj = NormalizeElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- ASSERT(HasDictionaryElements());
- return SetElement(index, value, NONE, strict_mode, check_prototype);
+ ASSERT(object->HasFastDoubleElements());
+ ASSERT(object->map()->has_fast_double_elements());
+ ASSERT(object->elements()->IsFixedDoubleArray());
+
+ NormalizeElements(object);
+ ASSERT(object->HasDictionaryElements());
+ return SetElement(object, index, value, NONE, strict_mode, check_prototype);
}
@@ -12206,328 +12539,381 @@ Handle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
Handle<Object> value,
StrictModeFlag strict_mode) {
ASSERT(!object->HasExternalArrayElements());
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetElement(index, *value, NONE, strict_mode, false),
- Object);
+ return JSObject::SetElement(object, index, value, NONE, strict_mode, false);
}
Handle<Object> JSObject::SetElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- PropertyAttributes attr,
+ PropertyAttributes attributes,
StrictModeFlag strict_mode,
+ bool check_prototype,
SetPropertyMode set_mode) {
+ Isolate* isolate = object->GetIsolate();
+
if (object->HasExternalArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
Handle<Object> number =
- Execution::ToNumber(object->GetIsolate(), value, &has_exception);
+ Execution::ToNumber(isolate, value, &has_exception);
if (has_exception) return Handle<Object>();
value = number;
}
}
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetElement(index, *value, attr, strict_mode, true, set_mode),
- Object);
-}
-
-
-MaybeObject* JSObject::SetElement(uint32_t index,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
- Isolate* isolate = GetIsolate();
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return value_raw;
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetElement(index,
- value_raw,
- attributes,
- strict_mode,
- check_prototype,
- set_mode);
+ return SetElement(Handle<JSObject>::cast(proto), index, value, attributes,
+ strict_mode,
+ check_prototype,
+ set_mode);
}
// Don't allow element properties to be redefined for external arrays.
- if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
+ if (object->HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[] = { handle(this, isolate), number };
+ Handle<Object> args[] = { object, number };
Handle<Object> error = isolate->factory()->NewTypeError(
"redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// Normalize the elements to enable attributes on the property.
if ((attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) {
- SeededNumberDictionary* dictionary;
- MaybeObject* maybe_object = NormalizeElements();
- if (!maybe_object->To(&dictionary)) return maybe_object;
+ Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
}
- if (!(FLAG_harmony_observation && map()->is_observed())) {
- return HasIndexedInterceptor()
- ? SetElementWithInterceptor(
- index, value_raw, attributes, strict_mode, check_prototype, set_mode)
- : SetElementWithoutInterceptor(
- index, value_raw, attributes, strict_mode, check_prototype, set_mode);
+ if (!(FLAG_harmony_observation && object->map()->is_observed())) {
+ return object->HasIndexedInterceptor()
+ ? SetElementWithInterceptor(object, index, value, attributes, strict_mode,
+ check_prototype,
+ set_mode)
+ : SetElementWithoutInterceptor(object, index, value, attributes,
+ strict_mode,
+ check_prototype,
+ set_mode);
}
- // From here on, everything has to be handlified.
- Handle<JSObject> self(this);
- Handle<Object> value(value_raw, isolate);
- PropertyAttributes old_attributes = self->GetLocalElementAttribute(index);
+ PropertyAttributes old_attributes = object->GetLocalElementAttribute(index);
Handle<Object> old_value = isolate->factory()->the_hole_value();
Handle<Object> old_length_handle;
Handle<Object> new_length_handle;
if (old_attributes != ABSENT) {
- if (self->GetLocalElementAccessorPair(index) == NULL)
- old_value = Object::GetElement(isolate, self, index);
- } else if (self->IsJSArray()) {
+ if (object->GetLocalElementAccessorPair(index) == NULL)
+ old_value = Object::GetElement(isolate, object, index);
+ } else if (object->IsJSArray()) {
// Store old array length in case adding an element grows the array.
- old_length_handle = handle(Handle<JSArray>::cast(self)->length(), isolate);
+ old_length_handle = handle(Handle<JSArray>::cast(object)->length(),
+ isolate);
}
// Check for lookup interceptor
- MaybeObject* result = self->HasIndexedInterceptor()
- ? self->SetElementWithInterceptor(
- index, *value, attributes, strict_mode, check_prototype, set_mode)
- : self->SetElementWithoutInterceptor(
- index, *value, attributes, strict_mode, check_prototype, set_mode);
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
+ Handle<Object> result = object->HasIndexedInterceptor()
+ ? SetElementWithInterceptor(object, index, value, attributes, strict_mode,
+ check_prototype,
+ set_mode)
+ : SetElementWithoutInterceptor(object, index, value, attributes,
+ strict_mode,
+ check_prototype,
+ set_mode);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
Handle<String> name = isolate->factory()->Uint32ToString(index);
- PropertyAttributes new_attributes = self->GetLocalElementAttribute(index);
+ PropertyAttributes new_attributes = object->GetLocalElementAttribute(index);
if (old_attributes == ABSENT) {
- if (self->IsJSArray() &&
- !old_length_handle->SameValue(Handle<JSArray>::cast(self)->length())) {
- new_length_handle = handle(Handle<JSArray>::cast(self)->length(),
+ if (object->IsJSArray() &&
+ !old_length_handle->SameValue(
+ Handle<JSArray>::cast(object)->length())) {
+ new_length_handle = handle(Handle<JSArray>::cast(object)->length(),
isolate);
uint32_t old_length = 0;
uint32_t new_length = 0;
CHECK(old_length_handle->ToArrayIndex(&old_length));
CHECK(new_length_handle->ToArrayIndex(&new_length));
- BeginPerformSplice(Handle<JSArray>::cast(self));
- EnqueueChangeRecord(self, "new", name, old_value);
- EnqueueChangeRecord(self, "updated", isolate->factory()->length_string(),
+ BeginPerformSplice(Handle<JSArray>::cast(object));
+ EnqueueChangeRecord(object, "add", name, old_value);
+ EnqueueChangeRecord(object, "update", isolate->factory()->length_string(),
old_length_handle);
- EndPerformSplice(Handle<JSArray>::cast(self));
+ EndPerformSplice(Handle<JSArray>::cast(object));
Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
- EnqueueSpliceRecord(Handle<JSArray>::cast(self), old_length, deleted,
+ EnqueueSpliceRecord(Handle<JSArray>::cast(object), old_length, deleted,
new_length - old_length);
} else {
- EnqueueChangeRecord(self, "new", name, old_value);
+ EnqueueChangeRecord(object, "add", name, old_value);
}
} else if (old_value->IsTheHole()) {
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ EnqueueChangeRecord(object, "reconfigure", name, old_value);
} else {
- Handle<Object> new_value = Object::GetElement(isolate, self, index);
+ Handle<Object> new_value = Object::GetElement(isolate, object, index);
bool value_changed = !old_value->SameValue(*new_value);
if (old_attributes != new_attributes) {
if (!value_changed) old_value = isolate->factory()->the_hole_value();
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ EnqueueChangeRecord(object, "reconfigure", name, old_value);
} else if (value_changed) {
- EnqueueChangeRecord(self, "updated", name, old_value);
+ EnqueueChangeRecord(object, "update", name, old_value);
}
}
- return *hresult;
+ return result;
}
-MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
- Object* value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode) {
- ASSERT(HasDictionaryElements() ||
- HasDictionaryArgumentsElements() ||
- (attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
- Isolate* isolate = GetIsolate();
+Handle<Object> JSObject::SetElementWithoutInterceptor(
+ Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool check_prototype,
+ SetPropertyMode set_mode) {
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements() ||
+ (attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
+ Isolate* isolate = object->GetIsolate();
if (FLAG_trace_external_array_abuse &&
- IsExternalArrayElementsKind(GetElementsKind())) {
- CheckArrayAbuse(this, "external elements write", index);
+ IsExternalArrayElementsKind(object->GetElementsKind())) {
+ CheckArrayAbuse(*object, "external elements write", index);
}
if (FLAG_trace_js_array_abuse &&
- !IsExternalArrayElementsKind(GetElementsKind())) {
- if (IsJSArray()) {
- CheckArrayAbuse(this, "elements write", index, true);
+ !IsExternalArrayElementsKind(object->GetElementsKind())) {
+ if (object->IsJSArray()) {
+ CheckArrayAbuse(*object, "elements write", index, true);
}
}
- switch (GetElementsKind()) {
+ switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- return SetFastElement(index, value, strict_mode, check_prototype);
+ return SetFastElement(object, index, value, strict_mode, check_prototype);
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- return SetFastDoubleElement(index, value, strict_mode, check_prototype);
+ return SetFastDoubleElement(object, index, value, strict_mode,
+ check_prototype);
case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- return pixels->SetValue(index, value);
+ ExternalPixelArray* pixels = ExternalPixelArray::cast(object->elements());
+ return handle(pixels->SetValue(index, *value), isolate);
}
case EXTERNAL_BYTE_ELEMENTS: {
- ExternalByteArray* array = ExternalByteArray::cast(elements());
- return array->SetValue(index, value);
+ Handle<ExternalByteArray> array(
+ ExternalByteArray::cast(object->elements()));
+ return ExternalByteArray::SetValue(array, index, value);
}
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
- ExternalUnsignedByteArray* array =
- ExternalUnsignedByteArray::cast(elements());
- return array->SetValue(index, value);
+ Handle<ExternalUnsignedByteArray> array(
+ ExternalUnsignedByteArray::cast(object->elements()));
+ return ExternalUnsignedByteArray::SetValue(array, index, value);
}
case EXTERNAL_SHORT_ELEMENTS: {
- ExternalShortArray* array = ExternalShortArray::cast(elements());
- return array->SetValue(index, value);
+ Handle<ExternalShortArray> array(ExternalShortArray::cast(
+ object->elements()));
+ return ExternalShortArray::SetValue(array, index, value);
}
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
- ExternalUnsignedShortArray* array =
- ExternalUnsignedShortArray::cast(elements());
- return array->SetValue(index, value);
+ Handle<ExternalUnsignedShortArray> array(
+ ExternalUnsignedShortArray::cast(object->elements()));
+ return ExternalUnsignedShortArray::SetValue(array, index, value);
}
case EXTERNAL_INT_ELEMENTS: {
- ExternalIntArray* array = ExternalIntArray::cast(elements());
- return array->SetValue(index, value);
+ Handle<ExternalIntArray> array(
+ ExternalIntArray::cast(object->elements()));
+ return ExternalIntArray::SetValue(array, index, value);
}
case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- ExternalUnsignedIntArray* array =
- ExternalUnsignedIntArray::cast(elements());
- return array->SetValue(index, value);
+ Handle<ExternalUnsignedIntArray> array(
+ ExternalUnsignedIntArray::cast(object->elements()));
+ return ExternalUnsignedIntArray::SetValue(array, index, value);
}
case EXTERNAL_FLOAT_ELEMENTS: {
- ExternalFloatArray* array = ExternalFloatArray::cast(elements());
- return array->SetValue(index, value);
+ Handle<ExternalFloatArray> array(
+ ExternalFloatArray::cast(object->elements()));
+ return ExternalFloatArray::SetValue(array, index, value);
}
case EXTERNAL_DOUBLE_ELEMENTS: {
- ExternalDoubleArray* array = ExternalDoubleArray::cast(elements());
- return array->SetValue(index, value);
+ Handle<ExternalDoubleArray> array(
+ ExternalDoubleArray::cast(object->elements()));
+ return ExternalDoubleArray::SetValue(array, index, value);
}
case DICTIONARY_ELEMENTS:
- return SetDictionaryElement(index, value, attr, strict_mode,
- check_prototype, set_mode);
+ return SetDictionaryElement(object, index, value, attributes, strict_mode,
+ check_prototype,
+ set_mode);
case NON_STRICT_ARGUMENTS_ELEMENTS: {
- FixedArray* parameter_map = FixedArray::cast(elements());
+ Handle<FixedArray> parameter_map(FixedArray::cast(object->elements()));
uint32_t length = parameter_map->length();
- Object* probe =
- (index < length - 2) ? parameter_map->get(index + 2) : NULL;
- if (probe != NULL && !probe->IsTheHole()) {
- Context* context = Context::cast(parameter_map->get(0));
- int context_index = Smi::cast(probe)->value();
+ Handle<Object> probe = index < length - 2 ?
+ Handle<Object>(parameter_map->get(index + 2), isolate) :
+ Handle<Object>();
+ if (!probe.is_null() && !probe->IsTheHole()) {
+ Handle<Context> context(Context::cast(parameter_map->get(0)));
+ int context_index = Handle<Smi>::cast(probe)->value();
ASSERT(!context->get(context_index)->IsTheHole());
- context->set(context_index, value);
+ context->set(context_index, *value);
// Redefining attributes of an aliased element destroys fast aliasing.
- if (set_mode == SET_PROPERTY || attr == NONE) return value;
+ if (set_mode == SET_PROPERTY || attributes == NONE) return value;
parameter_map->set_the_hole(index + 2);
// For elements that are still writable we re-establish slow aliasing.
- if ((attr & READ_ONLY) == 0) {
- MaybeObject* maybe_entry =
- isolate->heap()->AllocateAliasedArgumentsEntry(context_index);
- if (!maybe_entry->ToObject(&value)) return maybe_entry;
+ if ((attributes & READ_ONLY) == 0) {
+ value = Handle<Object>::cast(
+ isolate->factory()->NewAliasedArgumentsEntry(context_index));
}
}
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
if (arguments->IsDictionary()) {
- return SetDictionaryElement(index, value, attr, strict_mode,
- check_prototype, set_mode);
+ return SetDictionaryElement(object, index, value, attributes,
+ strict_mode,
+ check_prototype,
+ set_mode);
} else {
- return SetFastElement(index, value, strict_mode, check_prototype);
+ return SetFastElement(object, index, value, strict_mode,
+ check_prototype);
}
}
}
// All possible cases have been handled above. Add a return to avoid the
// complaints from the compiler.
UNREACHABLE();
- return isolate->heap()->null_value();
+ return isolate->factory()->null_value();
}
-Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->TransitionElementsKind(to_kind),
- Object);
+void JSObject::TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+ object->TransitionElementsKind(to_kind));
}
-MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
- if (!FLAG_track_allocation_sites || !IsJSArray()) {
- return this;
- }
+const double AllocationSite::kPretenureRatio = 0.60;
- AllocationMemento* memento = AllocationMemento::FindForJSObject(this);
- if (memento == NULL || !memento->IsValid()) {
- return this;
+
+bool AllocationSite::IsNestedSite() {
+ ASSERT(FLAG_trace_track_allocation_sites);
+ Object* current = GetHeap()->allocation_sites_list();
+ while (current != NULL && current->IsAllocationSite()) {
+ AllocationSite* current_site = AllocationSite::cast(current);
+ if (current_site->nested_site() == this) {
+ return true;
+ }
+ current = current_site->weak_next();
}
+ return false;
+}
- // Walk through to the Allocation Site
- AllocationSite* site = memento->GetAllocationSite();
- if (site->IsLiteralSite()) {
- JSArray* transition_info = JSArray::cast(site->transition_info());
+
+MaybeObject* AllocationSite::DigestTransitionFeedback(ElementsKind to_kind) {
+ Isolate* isolate = GetIsolate();
+
+ if (SitePointsToLiteral() && transition_info()->IsJSArray()) {
+ JSArray* transition_info = JSArray::cast(this->transition_info());
ElementsKind kind = transition_info->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
if (IsHoleyElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
- if (AllocationSite::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
// If the array is huge, it's not likely to be defined in a local
// function, so we shouldn't make new instances of it very often.
uint32_t length = 0;
CHECK(transition_info->length()->ToArrayIndex(&length));
- if (length <= AllocationSite::kMaximumArrayBytesToPretransition) {
+ if (length <= kMaximumArrayBytesToPretransition) {
if (FLAG_trace_track_allocation_sites) {
+ bool is_nested = IsNestedSite();
PrintF(
- "AllocationSite: JSArray %p boilerplate updated %s->%s\n",
+ "AllocationSite: JSArray %p boilerplate %s updated %s->%s\n",
reinterpret_cast<void*>(this),
+ is_nested ? "(nested)" : "",
ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
- return transition_info->TransitionElementsKind(to_kind);
+ MaybeObject* result = transition_info->TransitionElementsKind(to_kind);
+ if (result->IsFailure()) return result;
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
}
}
} else {
- ElementsKind kind = site->GetElementsKind();
+ ElementsKind kind = GetElementsKind();
// if kind is holey ensure that to_kind is as well.
if (IsHoleyElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
- if (AllocationSite::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
reinterpret_cast<void*>(this),
ElementsKindToString(kind),
ElementsKindToString(to_kind));
}
- site->set_transition_info(Smi::FromInt(to_kind));
+ SetElementsKind(to_kind);
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
}
}
return this;
}
+void AllocationSite::AddDependentCompilationInfo(Reason reason,
+ CompilationInfo* info) {
+ DependentCode::DependencyGroup group = ToDependencyGroup(reason);
+ Handle<DependentCode> dep(dependent_code());
+ Handle<DependentCode> codes =
+ DependentCode::Insert(dep, group, info->object_wrapper());
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+ info->dependencies(group)->Add(Handle<HeapObject>(this), info->zone());
+}
+
+
+void AllocationSite::AddDependentCode(Reason reason, Handle<Code> code) {
+ DependentCode::DependencyGroup group = ToDependencyGroup(reason);
+ Handle<DependentCode> codes = DependentCode::Insert(
+ Handle<DependentCode>(dependent_code()), group, code);
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+}
+
+
+void JSObject::UpdateAllocationSite(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+ object->UpdateAllocationSite(to_kind));
+}
+
+
+MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) {
+ if (!FLAG_track_allocation_sites || !IsJSArray()) {
+ return this;
+ }
+
+ AllocationMemento* memento = AllocationMemento::FindForJSObject(this);
+ if (memento == NULL || !memento->IsValid()) {
+ return this;
+ }
+
+ // Walk through to the Allocation Site
+ AllocationSite* site = memento->GetAllocationSite();
+ return site->DigestTransitionFeedback(to_kind);
+}
+
+
MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
- ASSERT(!map()->is_observed());
ElementsKind from_kind = map()->elements_kind();
if (IsFastHoleyElementsKind(from_kind)) {
@@ -12535,9 +12921,11 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
}
if (from_kind == to_kind) return this;
-
- MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ // Don't update the site if to_kind isn't fast
+ if (IsFastElementsKind(to_kind)) {
+ MaybeObject* maybe_failure = UpdateAllocationSite(to_kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
Isolate* isolate = GetIsolate();
if (elements() == isolate->heap()->empty_fixed_array() ||
@@ -12613,6 +13001,14 @@ bool Map::IsValidElementsTransition(ElementsKind from_kind,
}
+void JSArray::JSArrayUpdateLengthFromIndex(Handle<JSArray> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION_VOID(array->GetIsolate(),
+ array->JSArrayUpdateLengthFromIndex(index, *value));
+}
+
+
MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
Object* value) {
uint32_t old_len = 0;
@@ -12638,7 +13034,7 @@ MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChange ncc(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
Handle<Object> this_handle(receiver, isolate);
@@ -12718,8 +13114,7 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
}
break;
case DICTIONARY_ELEMENTS: {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(FixedArray::cast(elements()));
+ SeededNumberDictionary* dictionary = element_dictionary();
*capacity = dictionary->Capacity();
*used = dictionary->NumberOfElements();
break;
@@ -12818,8 +13213,7 @@ bool JSObject::ShouldConvertToFastDoubleElements(
*has_smi_only_elements = false;
if (FLAG_unbox_double_arrays) {
ASSERT(HasDictionaryElements());
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(elements());
+ SeededNumberDictionary* dictionary = element_dictionary();
bool found_double = false;
for (int i = 0; i < dictionary->Capacity(); i++) {
Object* key = dictionary->KeyAt(i);
@@ -12902,21 +13296,26 @@ InterceptorInfo* JSObject::GetIndexedInterceptor() {
}
-MaybeObject* JSObject::GetPropertyPostInterceptor(
- Object* receiver,
- Name* name,
+Handle<Object> JSObject::GetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (result.IsFound()) {
- return GetProperty(receiver, &result, name, attributes);
+ Isolate* isolate = object->GetIsolate();
+ LookupResult lookup(isolate);
+ object->LocalLookupRealNamedProperty(*name, &lookup);
+ Handle<Object> result;
+ if (lookup.IsFound()) {
+ result = GetProperty(object, receiver, &lookup, name, attributes);
+ } else {
+ // Continue searching via the prototype chain.
+ Handle<Object> prototype(object->GetPrototype(), isolate);
+ *attributes = ABSENT;
+ if (prototype->IsNull()) return isolate->factory()->undefined_value();
+ result = GetPropertyWithReceiver(prototype, receiver, name, attributes);
}
- // Continue searching via the prototype chain.
- Object* pt = GetPrototype();
- *attributes = ABSENT;
- if (pt->IsNull()) return GetHeap()->undefined_value();
- return pt->GetPropertyWithReceiver(receiver, name, attributes);
+ return result;
}
@@ -12934,93 +13333,98 @@ MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
}
-MaybeObject* JSObject::GetPropertyWithInterceptor(
- Object* receiver,
- Name* name,
+Handle<Object> JSObject::GetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
PropertyAttributes* attributes) {
+ Isolate* isolate = object->GetIsolate();
+
// TODO(rossberg): Support symbols in the API.
- if (name->IsSymbol()) return GetHeap()->undefined_value();
+ if (name->IsSymbol()) return isolate->factory()->undefined_value();
- Isolate* isolate = GetIsolate();
- InterceptorInfo* interceptor = GetNamedInterceptor();
- HandleScope scope(isolate);
- Handle<Object> receiver_handle(receiver, isolate);
- Handle<JSObject> holder_handle(this);
- Handle<String> name_handle(String::cast(name));
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor(), isolate);
+ Handle<String> name_string = Handle<String>::cast(name);
if (!interceptor->getter()->IsUndefined()) {
v8::NamedPropertyGetterCallback getter =
v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
+ ApiNamedPropertyAccess("interceptor-named-get", *object, *name));
PropertyCallbackArguments
- args(isolate, interceptor->data(), receiver, this);
+ args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Value> result =
- args.Call(getter, v8::Utils::ToLocal(name_handle));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ args.Call(getter, v8::Utils::ToLocal(name_string));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
*attributes = NONE;
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
- return *result_internal;
+ // Rebox handle to escape this scope.
+ return handle(*result_internal, isolate);
}
}
- MaybeObject* result = holder_handle->GetPropertyPostInterceptor(
- *receiver_handle,
- *name_handle,
- attributes);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return result;
+ return GetPropertyPostInterceptor(object, receiver, name, attributes);
}
-bool JSObject::HasRealNamedProperty(Isolate* isolate, Name* key) {
+bool JSObject::HasRealNamedProperty(Handle<JSObject> object,
+ Handle<Name> key) {
+ Isolate* isolate = object->GetIsolate();
+ SealHandleScope shs(isolate);
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
return false;
}
}
LookupResult result(isolate);
- LocalLookupRealNamedProperty(key, &result);
+ object->LocalLookupRealNamedProperty(*key, &result);
return result.IsFound() && !result.IsInterceptor();
}
-bool JSObject::HasRealElementProperty(Isolate* isolate, uint32_t index) {
+bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
+ SealHandleScope shs(isolate);
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
return false;
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
+ if (object->IsJSGlobalProxy()) {
+ HandleScope scope(isolate);
+ Handle<Object> proto(object->GetPrototype(), isolate);
if (proto->IsNull()) return false;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->HasRealElementProperty(isolate, index);
+ return HasRealElementProperty(Handle<JSObject>::cast(proto), index);
}
- return GetElementAttributeWithoutInterceptor(this, index, false) != ABSENT;
+ return object->GetElementAttributeWithoutInterceptor(
+ *object, index, false) != ABSENT;
}
-bool JSObject::HasRealNamedCallbackProperty(Isolate* isolate, Name* key) {
+bool JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
+ Handle<Name> key) {
+ Isolate* isolate = object->GetIsolate();
+ SealHandleScope shs(isolate);
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS);
return false;
}
}
LookupResult result(isolate);
- LocalLookupRealNamedProperty(key, &result);
+ object->LocalLookupRealNamedProperty(*key, &result);
return result.IsPropertyCallbacks();
}
@@ -13031,7 +13435,7 @@ int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
if (filter == NONE) return map->NumberOfOwnDescriptors();
if (filter & DONT_ENUM) {
int result = map->EnumLength();
- if (result != Map::kInvalidEnumCache) return result;
+ if (result != kInvalidEnumCacheSentinel) return result;
}
return map->NumberOfDescribedProperties(OWN_DESCRIPTORS, filter);
}
@@ -13854,7 +14258,9 @@ void HashTable<Shape, Key>::Rehash(Key key) {
template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
+MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n,
+ Key key,
+ PretenureFlag pretenure) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
int nod = NumberOfDeletedElements();
@@ -13867,14 +14273,14 @@ MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
}
const int kMinCapacityForPretenure = 256;
- bool pretenure =
- (capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this);
+ bool should_pretenure = pretenure == TENURED ||
+ ((capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this));
Object* obj;
{ MaybeObject* maybe_obj =
Allocate(GetHeap(),
nof * 2,
USE_DEFAULT_MINIMUM_CAPACITY,
- pretenure ? TENURED : NOT_TENURED);
+ should_pretenure ? TENURED : NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -13942,6 +14348,8 @@ template class HashTable<ObjectHashTableShape<1>, Object*>;
template class HashTable<ObjectHashTableShape<2>, Object*>;
+template class HashTable<WeakHashTableShape<2>, Object*>;
+
template class Dictionary<NameDictionaryShape, Name*>;
template class Dictionary<SeededNumberDictionaryShape, uint32_t>;
@@ -14042,6 +14450,14 @@ template
int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
+Handle<Object> JSObject::PrepareSlowElementsForSort(
+ Handle<JSObject> object, uint32_t limit) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->PrepareSlowElementsForSort(limit),
+ Object);
+}
+
+
// Collates undefined and unexisting elements below limit from position
// zero of the elements. The object stays in Dictionary mode.
MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
@@ -14144,74 +14560,57 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
// the start of the elements array.
// If the object is in dictionary mode, it is converted to fast elements
// mode.
-MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
- Heap* heap = GetHeap();
+Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
+ uint32_t limit) {
+ Isolate* isolate = object->GetIsolate();
- ASSERT(!map()->is_observed());
- if (HasDictionaryElements()) {
+ ASSERT(!object->map()->is_observed());
+ if (object->HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
- SeededNumberDictionary* dict = element_dictionary();
- if (IsJSArray() || dict->requires_slow_elements() ||
+ Handle<SeededNumberDictionary> dict(object->element_dictionary());
+ if (object->IsJSArray() || dict->requires_slow_elements() ||
dict->max_number_key() >= limit) {
- return PrepareSlowElementsForSort(limit);
+ return JSObject::PrepareSlowElementsForSort(object, limit);
}
// Convert to fast elements.
- Object* obj;
- MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
- FAST_HOLEY_ELEMENTS);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- Map* new_map = Map::cast(obj);
+ Handle<Map> new_map =
+ JSObject::GetElementsTransitionMap(object, FAST_HOLEY_ELEMENTS);
- PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED;
- Object* new_array;
- { MaybeObject* maybe_new_array =
- heap->AllocateFixedArray(dict->NumberOfElements(), tenure);
- if (!maybe_new_array->ToObject(&new_array)) return maybe_new_array;
- }
- FixedArray* fast_elements = FixedArray::cast(new_array);
- dict->CopyValuesTo(fast_elements);
- ValidateElements();
+ PretenureFlag tenure = isolate->heap()->InNewSpace(*object) ?
+ NOT_TENURED: TENURED;
+ Handle<FixedArray> fast_elements =
+ isolate->factory()->NewFixedArray(dict->NumberOfElements(), tenure);
+ dict->CopyValuesTo(*fast_elements);
+ object->ValidateElements();
- set_map_and_elements(new_map, fast_elements);
- } else if (HasExternalArrayElements()) {
+ object->set_map_and_elements(*new_map, *fast_elements);
+ } else if (object->HasExternalArrayElements()) {
// External arrays cannot have holes or undefined elements.
- return Smi::FromInt(ExternalArray::cast(elements())->length());
- } else if (!HasFastDoubleElements()) {
- Object* obj;
- { MaybeObject* maybe_obj = EnsureWritableFastElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ return handle(Smi::FromInt(
+ ExternalArray::cast(object->elements())->length()), isolate);
+ } else if (!object->HasFastDoubleElements()) {
+ EnsureWritableFastElements(object);
}
- ASSERT(HasFastSmiOrObjectElements() || HasFastDoubleElements());
+ ASSERT(object->HasFastSmiOrObjectElements() ||
+ object->HasFastDoubleElements());
// Collect holes at the end, undefined before that and the rest at the
// start, and return the number of non-hole, non-undefined values.
- FixedArrayBase* elements_base = FixedArrayBase::cast(this->elements());
+ Handle<FixedArrayBase> elements_base(object->elements());
uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
if (limit > elements_length) {
limit = elements_length ;
}
if (limit == 0) {
- return Smi::FromInt(0);
- }
-
- HeapNumber* result_double = NULL;
- if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
- // Pessimistically allocate space for return value before
- // we start mutating the array.
- Object* new_double;
- { MaybeObject* maybe_new_double = heap->AllocateHeapNumber(0.0);
- if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
- }
- result_double = HeapNumber::cast(new_double);
+ return handle(Smi::FromInt(0), isolate);
}
uint32_t result = 0;
- if (elements_base->map() == heap->fixed_double_array_map()) {
- FixedDoubleArray* elements = FixedDoubleArray::cast(elements_base);
+ if (elements_base->map() == isolate->heap()->fixed_double_array_map()) {
+ FixedDoubleArray* elements = FixedDoubleArray::cast(*elements_base);
// Split elements into defined and the_hole, in that order.
unsigned int holes = limit;
// Assume most arrays contain no holes and undefined values, so minimize the
@@ -14238,7 +14637,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
holes++;
}
} else {
- FixedArray* elements = FixedArray::cast(elements_base);
+ FixedArray* elements = FixedArray::cast(*elements_base);
DisallowHeapAllocation no_gc;
// Split elements into defined, undefined and the_hole, in that order. Only
@@ -14283,12 +14682,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
}
}
- if (result <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Smi::FromInt(static_cast<int>(result));
- }
- ASSERT_NE(NULL, result_double);
- result_double->set_value(static_cast<double>(result));
- return result_double;
+ return isolate->factory()->NewNumberFromUint(result);
}
@@ -14404,12 +14798,31 @@ static MaybeObject* ExternalArrayIntSetter(Heap* heap,
}
+Handle<Object> ExternalByteArray::SetValue(Handle<ExternalByteArray> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
MaybeObject* ExternalByteArray::SetValue(uint32_t index, Object* value) {
return ExternalArrayIntSetter<ExternalByteArray, int8_t>
(GetHeap(), this, index, value);
}
+Handle<Object> ExternalUnsignedByteArray::SetValue(
+ Handle<ExternalUnsignedByteArray> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
MaybeObject* ExternalUnsignedByteArray::SetValue(uint32_t index,
Object* value) {
return ExternalArrayIntSetter<ExternalUnsignedByteArray, uint8_t>
@@ -14417,6 +14830,16 @@ MaybeObject* ExternalUnsignedByteArray::SetValue(uint32_t index,
}
+Handle<Object> ExternalShortArray::SetValue(
+ Handle<ExternalShortArray> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
MaybeObject* ExternalShortArray::SetValue(uint32_t index,
Object* value) {
return ExternalArrayIntSetter<ExternalShortArray, int16_t>
@@ -14424,6 +14847,16 @@ MaybeObject* ExternalShortArray::SetValue(uint32_t index,
}
+Handle<Object> ExternalUnsignedShortArray::SetValue(
+ Handle<ExternalUnsignedShortArray> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
MaybeObject* ExternalUnsignedShortArray::SetValue(uint32_t index,
Object* value) {
return ExternalArrayIntSetter<ExternalUnsignedShortArray, uint16_t>
@@ -14431,12 +14864,31 @@ MaybeObject* ExternalUnsignedShortArray::SetValue(uint32_t index,
}
+Handle<Object> ExternalIntArray::SetValue(Handle<ExternalIntArray> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
MaybeObject* ExternalIntArray::SetValue(uint32_t index, Object* value) {
return ExternalArrayIntSetter<ExternalIntArray, int32_t>
(GetHeap(), this, index, value);
}
+Handle<Object> ExternalUnsignedIntArray::SetValue(
+ Handle<ExternalUnsignedIntArray> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
MaybeObject* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
uint32_t cast_value = 0;
Heap* heap = GetHeap();
@@ -14458,6 +14910,15 @@ MaybeObject* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
}
+Handle<Object> ExternalFloatArray::SetValue(Handle<ExternalFloatArray> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
float cast_value = static_cast<float>(OS::nan_value());
Heap* heap = GetHeap();
@@ -14479,6 +14940,15 @@ MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
}
+Handle<Object> ExternalDoubleArray::SetValue(Handle<ExternalDoubleArray> array,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(array->GetIsolate(),
+ array->SetValue(index, *value),
+ Object);
+}
+
+
MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) {
double double_value = OS::nan_value();
Heap* heap = GetHeap();
@@ -14506,17 +14976,6 @@ PropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
}
-// TODO(mstarzinger): Temporary wrapper until handlified.
-static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
- Handle<Name> name,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(dict->GetIsolate(),
- dict->Add(*name, *value, details),
- NameDictionary);
-}
-
-
Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
Handle<JSGlobalObject> global,
Handle<Name> name) {
@@ -14906,7 +15365,7 @@ MaybeObject* Dictionary<Shape, Key>::Allocate(Heap* heap,
HashTable<Shape, Key>::Allocate(
heap,
at_least_space_for,
- HashTable<Shape, Key>::USE_DEFAULT_MINIMUM_CAPACITY,
+ USE_DEFAULT_MINIMUM_CAPACITY,
pretenure);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -15111,6 +15570,15 @@ void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key) {
}
}
+Handle<SeededNumberDictionary> SeededNumberDictionary::AddNumberEntry(
+ Handle<SeededNumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
+ dictionary->AddNumberEntry(key, *value, details),
+ SeededNumberDictionary);
+}
MaybeObject* SeededNumberDictionary::AddNumberEntry(uint32_t key,
Object* value,
@@ -15334,7 +15802,7 @@ MaybeObject* NameDictionary::TransformPropertiesToFastFor(
// Make sure we preserve dictionary representation if there are too many
// descriptors.
int number_of_elements = NumberOfElements();
- if (number_of_elements > DescriptorArray::kMaxNumberOfDescriptors) return obj;
+ if (number_of_elements > kMaxNumberOfDescriptors) return obj;
if (number_of_elements != NextEnumerationIndex()) {
MaybeObject* maybe_result = GenerateNewEnumerationIndices();
@@ -15472,61 +15940,99 @@ MaybeObject* NameDictionary::TransformPropertiesToFastFor(
}
+Handle<ObjectHashSet> ObjectHashSet::EnsureCapacity(
+ Handle<ObjectHashSet> table,
+ int n,
+ Handle<Object> key,
+ PretenureFlag pretenure) {
+ Handle<HashTable<ObjectHashTableShape<1>, Object*> > table_base = table;
+ CALL_HEAP_FUNCTION(table_base->GetIsolate(),
+ table_base->EnsureCapacity(n, *key, pretenure),
+ ObjectHashSet);
+}
+
+
+Handle<ObjectHashSet> ObjectHashSet::Shrink(Handle<ObjectHashSet> table,
+ Handle<Object> key) {
+ Handle<HashTable<ObjectHashTableShape<1>, Object*> > table_base = table;
+ CALL_HEAP_FUNCTION(table_base->GetIsolate(),
+ table_base->Shrink(*key),
+ ObjectHashSet);
+}
+
+
bool ObjectHashSet::Contains(Object* key) {
ASSERT(IsKey(key));
// If the object does not have an identity hash, it was never used as a key.
- { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
- if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return false;
- }
+ Object* hash = key->GetHash();
+ if (hash->IsUndefined()) return false;
+
return (FindEntry(key) != kNotFound);
}
-MaybeObject* ObjectHashSet::Add(Object* key) {
- ASSERT(IsKey(key));
+Handle<ObjectHashSet> ObjectHashSet::Add(Handle<ObjectHashSet> table,
+ Handle<Object> key) {
+ ASSERT(table->IsKey(*key));
// Make sure the key object has an identity hash code.
- int hash;
- { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
- if (maybe_hash->IsFailure()) return maybe_hash;
- ASSERT(key->GetHash(OMIT_CREATION) == maybe_hash);
- hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
- }
- int entry = FindEntry(key);
+ Handle<Object> object_hash = Object::GetOrCreateHash(key,
+ table->GetIsolate());
+
+ int entry = table->FindEntry(*key);
// Check whether key is already present.
- if (entry != kNotFound) return this;
+ if (entry != kNotFound) return table;
// Check whether the hash set should be extended and add entry.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- ObjectHashSet* table = ObjectHashSet::cast(obj);
- entry = table->FindInsertionEntry(hash);
- table->set(EntryToIndex(entry), key);
- table->ElementAdded();
- return table;
+ Handle<ObjectHashSet> new_table =
+ ObjectHashSet::EnsureCapacity(table, 1, key);
+ entry = new_table->FindInsertionEntry(Smi::cast(*object_hash)->value());
+ new_table->set(EntryToIndex(entry), *key);
+ new_table->ElementAdded();
+ return new_table;
}
-MaybeObject* ObjectHashSet::Remove(Object* key) {
- ASSERT(IsKey(key));
+Handle<ObjectHashSet> ObjectHashSet::Remove(Handle<ObjectHashSet> table,
+ Handle<Object> key) {
+ ASSERT(table->IsKey(*key));
// If the object does not have an identity hash, it was never used as a key.
- { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
- if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return this;
- }
- int entry = FindEntry(key);
+ if (key->GetHash()->IsUndefined()) return table;
+
+ int entry = table->FindEntry(*key);
// Check whether key is actually present.
- if (entry == kNotFound) return this;
+ if (entry == kNotFound) return table;
// Remove entry and try to shrink this hash set.
- set_the_hole(EntryToIndex(entry));
- ElementRemoved();
- return Shrink(key);
+ table->set_the_hole(EntryToIndex(entry));
+ table->ElementRemoved();
+
+ return ObjectHashSet::Shrink(table, key);
+}
+
+
+Handle<ObjectHashTable> ObjectHashTable::EnsureCapacity(
+ Handle<ObjectHashTable> table,
+ int n,
+ Handle<Object> key,
+ PretenureFlag pretenure) {
+ Handle<HashTable<ObjectHashTableShape<2>, Object*> > table_base = table;
+ CALL_HEAP_FUNCTION(table_base->GetIsolate(),
+ table_base->EnsureCapacity(n, *key, pretenure),
+ ObjectHashTable);
+}
+
+
+Handle<ObjectHashTable> ObjectHashTable::Shrink(
+ Handle<ObjectHashTable> table, Handle<Object> key) {
+ Handle<HashTable<ObjectHashTableShape<2>, Object*> > table_base = table;
+ CALL_HEAP_FUNCTION(table_base->GetIsolate(),
+ table_base->Shrink(*key),
+ ObjectHashTable);
}
@@ -15534,10 +16040,9 @@ Object* ObjectHashTable::Lookup(Object* key) {
ASSERT(IsKey(key));
// If the object does not have an identity hash, it was never used as a key.
- { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
- if (maybe_hash->ToObjectUnchecked()->IsUndefined()) {
- return GetHeap()->the_hole_value();
- }
+ Object* hash = key->GetHash();
+ if (hash->IsUndefined()) {
+ return GetHeap()->the_hole_value();
}
int entry = FindEntry(key);
if (entry == kNotFound) return GetHeap()->the_hole_value();
@@ -15545,38 +16050,36 @@ Object* ObjectHashTable::Lookup(Object* key) {
}
-MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
- ASSERT(IsKey(key));
+Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
+ Handle<Object> key,
+ Handle<Object> value) {
+ ASSERT(table->IsKey(*key));
+
+ Isolate* isolate = table->GetIsolate();
// Make sure the key object has an identity hash code.
- int hash;
- { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
- if (maybe_hash->IsFailure()) return maybe_hash;
- ASSERT(key->GetHash(OMIT_CREATION) == maybe_hash);
- hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
- }
- int entry = FindEntry(key);
+ Handle<Object> hash = Object::GetOrCreateHash(key, isolate);
+
+ int entry = table->FindEntry(*key);
// Check whether to perform removal operation.
if (value->IsTheHole()) {
- if (entry == kNotFound) return this;
- RemoveEntry(entry);
- return Shrink(key);
+ if (entry == kNotFound) return table;
+ table->RemoveEntry(entry);
+ return Shrink(table, key);
}
// Key is already in table, just overwrite value.
if (entry != kNotFound) {
- set(EntryToIndex(entry) + 1, value);
- return this;
+ table->set(EntryToIndex(entry) + 1, *value);
+ return table;
}
// Check whether the hash table should be extended.
- Object* obj;
- { MaybeObject* maybe_obj = EnsureCapacity(1, key);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- ObjectHashTable* table = ObjectHashTable::cast(obj);
- table->AddEntry(table->FindInsertionEntry(hash), key, value);
+ table = EnsureCapacity(table, 1, key);
+ table->AddEntry(table->FindInsertionEntry(Handle<Smi>::cast(hash)->value()),
+ *key,
+ *value);
return table;
}
@@ -15595,6 +16098,41 @@ void ObjectHashTable::RemoveEntry(int entry) {
}
+Object* WeakHashTable::Lookup(Object* key) {
+ ASSERT(IsKey(key));
+ int entry = FindEntry(key);
+ if (entry == kNotFound) return GetHeap()->the_hole_value();
+ return get(EntryToValueIndex(entry));
+}
+
+
+MaybeObject* WeakHashTable::Put(Object* key, Object* value) {
+ ASSERT(IsKey(key));
+ int entry = FindEntry(key);
+ // Key is already in table, just overwrite value.
+ if (entry != kNotFound) {
+ set(EntryToValueIndex(entry), value);
+ return this;
+ }
+
+ // Check whether the hash table should be extended.
+ Object* obj;
+ { MaybeObject* maybe_obj = EnsureCapacity(1, key, TENURED);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ WeakHashTable* table = WeakHashTable::cast(obj);
+ table->AddEntry(table->FindInsertionEntry(Hash(key)), key, value);
+ return table;
+}
+
+
+void WeakHashTable::AddEntry(int entry, Object* key, Object* value) {
+ set(EntryToIndex(entry), key);
+ set(EntryToValueIndex(entry), value);
+ ElementAdded();
+}
+
+
DeclaredAccessorDescriptorIterator::DeclaredAccessorDescriptorIterator(
DeclaredAccessorDescriptor* descriptor)
: array_(descriptor->serialized_data()->GetDataStartAddress()),
@@ -16070,8 +16608,8 @@ void PropertyCell::set_type(Type* type, WriteBarrierMode ignored) {
}
-Type* PropertyCell::UpdateType(Handle<PropertyCell> cell,
- Handle<Object> value) {
+Handle<Type> PropertyCell::UpdatedType(Handle<PropertyCell> cell,
+ Handle<Object> value) {
Isolate* isolate = cell->GetIsolate();
Handle<Type> old_type(cell->type(), isolate);
// TODO(2803): Do not track ConsString as constant because they cannot be
@@ -16081,34 +16619,27 @@ Type* PropertyCell::UpdateType(Handle<PropertyCell> cell,
: Type::Constant(value, isolate), isolate);
if (new_type->Is(old_type)) {
- return *old_type;
+ return old_type;
}
cell->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
if (old_type->Is(Type::None()) || old_type->Is(Type::Undefined())) {
- return *new_type;
+ return new_type;
}
- return Type::Any();
+ return handle(Type::Any(), isolate);
}
-MaybeObject* PropertyCell::SetValueInferType(Object* value,
- WriteBarrierMode ignored) {
- set_value(value, ignored);
- if (!Type::Any()->Is(type())) {
- IdempotentPointerToHandleCodeTrampoline trampoline(GetIsolate());
- MaybeObject* maybe_type = trampoline.CallWithReturnValue(
- &PropertyCell::UpdateType,
- Handle<PropertyCell>(this),
- Handle<Object>(value, GetIsolate()));
- Type* new_type = NULL;
- if (!maybe_type->To(&new_type)) return maybe_type;
- set_type(new_type);
+void PropertyCell::SetValueInferType(Handle<PropertyCell> cell,
+ Handle<Object> value) {
+ cell->set_value(*value);
+ if (!Type::Any()->Is(cell->type())) {
+ Handle<Type> new_type = UpdatedType(cell, value);
+ cell->set_type(*new_type);
}
- return value;
}
diff --git a/chromium/v8/src/objects.h b/chromium/v8/src/objects.h
index 12087eb00a6..a7f01d18569 100644
--- a/chromium/v8/src/objects.h
+++ b/chromium/v8/src/objects.h
@@ -179,6 +179,12 @@ enum KeyedAccessStoreMode {
};
+enum ContextualMode {
+ NOT_CONTEXTUAL,
+ CONTEXTUAL
+};
+
+
static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
STANDARD_STORE;
STATIC_ASSERT(STANDARD_STORE == 0);
@@ -255,13 +261,6 @@ enum NormalizedMapSharingMode {
};
-// Indicates whether a get method should implicitly create the object looked up.
-enum CreationFlag {
- ALLOW_CREATION,
- OMIT_CREATION
-};
-
-
// Indicates whether transitions can be added to a source map or not.
enum TransitionFlag {
INSERT_TRANSITION,
@@ -304,10 +303,15 @@ enum MarkingParity {
EVEN_MARKING_PARITY
};
+// ICs store extra state in a Code object. The default extra state is
+// kNoExtraICState.
+typedef int ExtraICState;
+static const ExtraICState kNoExtraICState = 0;
+
// Instance size sentinel for objects of variable size.
const int kVariableSizeSentinel = 0;
-const int kStubMajorKeyBits = 6;
+const int kStubMajorKeyBits = 7;
const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// All Maps have a field instance_type containing a InstanceType.
@@ -333,7 +337,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// NOTE: Everything following JS_VALUE_TYPE is considered a
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST_ALL(V) \
+#define INSTANCE_TYPE_LIST(V) \
V(STRING_TYPE) \
V(ASCII_STRING_TYPE) \
V(CONS_STRING_TYPE) \
@@ -359,6 +363,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
\
V(SYMBOL_TYPE) \
+ \
V(MAP_TYPE) \
V(CODE_TYPE) \
V(ODDBALL_TYPE) \
@@ -405,6 +410,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
\
V(FIXED_ARRAY_TYPE) \
V(FIXED_DOUBLE_ARRAY_TYPE) \
+ V(CONSTANT_POOL_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
\
V(JS_MESSAGE_OBJECT_TYPE) \
@@ -431,18 +437,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
\
V(JS_FUNCTION_TYPE) \
V(JS_FUNCTION_PROXY_TYPE) \
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
V(DEBUG_INFO_TYPE) \
V(BREAK_POINT_INFO_TYPE)
-#else
-#define INSTANCE_TYPE_LIST_DEBUGGER(V)
-#endif
-
-#define INSTANCE_TYPE_LIST(V) \
- INSTANCE_TYPE_LIST_ALL(V) \
- INSTANCE_TYPE_LIST_DEBUGGER(V)
// Since string types are not consecutive, this macro is used to
@@ -700,7 +696,7 @@ enum InstanceType {
| kNotInternalizedTag,
// Non-string names
- SYMBOL_TYPE = kNotStringTag, // LAST_NAME_TYPE, FIRST_NONSTRING_TYPE
+ SYMBOL_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
// Objects allocated in their own spaces (never in new space).
MAP_TYPE,
@@ -725,6 +721,7 @@ enum InstanceType {
EXTERNAL_DOUBLE_ARRAY_TYPE,
EXTERNAL_PIXEL_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
FIXED_DOUBLE_ARRAY_TYPE,
+ CONSTANT_POOL_ARRAY_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
@@ -873,24 +870,19 @@ enum CompareResult {
inline void set_##name(type* value, \
WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
-
class AccessorPair;
+class AllocationSite;
+class AllocationSiteCreationContext;
+class AllocationSiteUsageContext;
class DictionaryElementsAccessor;
class ElementsAccessor;
class Failure;
class FixedArrayBase;
+class GlobalObject;
class ObjectVisitor;
class StringStream;
class Type;
-struct ValueInfo : public Malloced {
- ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
- InstanceType type;
- Object* ptr;
- const char* str;
- double number;
-};
-
// A template-ized version of the IsXXX functions.
template <class C> inline bool Is(Object* obj);
@@ -1010,6 +1002,7 @@ class MaybeObject BASE_EMBEDDED {
V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
+ V(ConstantPoolArray) \
V(Context) \
V(NativeContext) \
V(ScopeInfo) \
@@ -1054,7 +1047,8 @@ class MaybeObject BASE_EMBEDDED {
V(AccessCheckNeeded) \
V(Cell) \
V(PropertyCell) \
- V(ObjectHashTable)
+ V(ObjectHashTable) \
+ V(WeakHashTable)
#define ERROR_MESSAGES_LIST(V) \
@@ -1206,6 +1200,7 @@ class MaybeObject BASE_EMBEDDED {
V(kModuleStatement, "Module statement") \
V(kModuleVariable, "Module variable") \
V(kModuleUrl, "Module url") \
+ V(kNativeFunctionLiteral, "Native function literal") \
V(kNoCasesLeft, "no cases left") \
V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin, \
"No empty arrays here in EmitFastAsciiArrayJoin") \
@@ -1214,6 +1209,7 @@ class MaybeObject BASE_EMBEDDED {
V(kNonSmiIndex, "Non-smi index") \
V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal") \
V(kNonSmiValue, "Non-smi value") \
+ V(kNonObject, "Non-object value") \
V(kNotEnoughVirtualRegistersForValues, \
"not enough virtual registers for values") \
V(kNotEnoughSpillSlotsForOsr, \
@@ -1249,7 +1245,6 @@ class MaybeObject BASE_EMBEDDED {
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "register was clobbered") \
V(kScopedBlock, "ScopedBlock") \
- V(kSharedFunctionInfoLiteral, "Shared function info literal") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
V(kStackFrameTypesMustMatch, "stack frame types must match") \
@@ -1370,10 +1365,6 @@ class Object : public MaybeObject {
inline bool IsExternal();
inline bool IsAccessorInfo();
- // Returns true if this object is an instance of the specified
- // function template.
- inline bool IsInstanceOf(FunctionTemplateInfo* type);
-
inline bool IsStruct();
#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) inline bool Is##Name();
STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
@@ -1381,6 +1372,7 @@ class Object : public MaybeObject {
INLINE(bool IsSpecObject());
INLINE(bool IsSpecFunction());
+ bool IsCallable();
// Oddball testing.
INLINE(bool IsUndefined());
@@ -1440,8 +1432,7 @@ class Object : public MaybeObject {
}
inline MaybeObject* AllocateNewStorageFor(Heap* heap,
- Representation representation,
- PretenureFlag tenure = NOT_TENURED);
+ Representation representation);
// Returns true if the object is of the correct type to be used as a
// implementation of a JSObject's elements.
@@ -1467,6 +1458,12 @@ class Object : public MaybeObject {
MUST_USE_RESULT inline MaybeObject* GetProperty(
Name* key,
PropertyAttributes* attributes);
+
+ // TODO(yangguo): this should eventually replace the non-handlified version.
+ static Handle<Object> GetPropertyWithReceiver(Handle<Object> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
+ PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetPropertyWithReceiver(
Object* receiver,
Name* key,
@@ -1508,11 +1505,19 @@ class Object : public MaybeObject {
// Return the object's prototype (might be Heap::null_value()).
Object* GetPrototype(Isolate* isolate);
+ Map* GetMarkerMap(Isolate* isolate);
+
+ // Returns the permanent hash code associated with this object. May return
+ // undefined if not yet created.
+ Object* GetHash();
// Returns the permanent hash code associated with this object depending on
- // the actual object type. Might return a failure in case no hash was
- // created yet or GC was caused by creation.
- MUST_USE_RESULT MaybeObject* GetHash(CreationFlag flag);
+ // the actual object type. May create and store a hash code if needed and none
+ // exists.
+ // TODO(rafaelw): Remove isolate parameter when objects.cc is fully
+ // handlified.
+ static Handle<Object> GetOrCreateHash(Handle<Object> object,
+ Isolate* isolate);
// Checks whether this object has the same value as the given one. This
// function is implemented according to ES5, section 9.12 and can be used
@@ -1950,42 +1955,27 @@ class JSReceiver: public HeapObject {
// Casting.
static inline JSReceiver* cast(Object* obj);
+ // Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5.
static Handle<Object> SetProperty(Handle<JSReceiver> object,
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode =
+ MAY_BE_STORE_FROM_KEYED);
static Handle<Object> SetElement(Handle<JSReceiver> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* SetPropertyOrFail(
- Handle<JSReceiver> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
-
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetProperty(
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT MaybeObject* SetProperty(
- LookupResult* result,
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
- Object* value);
+ // Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
+ static inline bool HasProperty(Handle<JSReceiver> object, Handle<Name> name);
+ static inline bool HasLocalProperty(Handle<JSReceiver>, Handle<Name> name);
+ static inline bool HasElement(Handle<JSReceiver> object, uint32_t index);
+ static inline bool HasLocalElement(Handle<JSReceiver> object, uint32_t index);
+ // Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7.
static Handle<Object> DeleteProperty(Handle<JSReceiver> object,
Handle<Name> name,
DeleteMode mode = NORMAL_DELETION);
@@ -2011,12 +2001,6 @@ class JSReceiver: public HeapObject {
inline PropertyAttributes GetElementAttribute(uint32_t index);
inline PropertyAttributes GetLocalElementAttribute(uint32_t index);
- // Can cause a GC.
- inline bool HasProperty(Name* name);
- inline bool HasLocalProperty(Name* name);
- inline bool HasElement(uint32_t index);
- inline bool HasLocalElement(uint32_t index);
-
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
@@ -2024,8 +2008,13 @@ class JSReceiver: public HeapObject {
inline Object* GetConstructor();
// Retrieves a permanent object identity hash code. The undefined value might
- // be returned in case no hash was created yet and OMIT_CREATION was used.
- inline MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+ // be returned in case no hash was created yet.
+ inline Object* GetIdentityHash();
+
+ // Retrieves a permanent object identity hash code. May create and store a
+ // hash code if needed and none exists.
+ inline static Handle<Object> GetOrCreateIdentityHash(
+ Handle<JSReceiver> object);
// Lookup a property. If found, the result is valid and has
// detailed information.
@@ -2036,15 +2025,30 @@ class JSReceiver: public HeapObject {
protected:
Smi* GenerateIdentityHash();
+ static Handle<Object> SetPropertyWithDefinedSetter(Handle<JSReceiver> object,
+ Handle<JSReceiver> setter,
+ Handle<Object> value);
+
private:
PropertyAttributes GetPropertyAttributeForResult(JSReceiver* receiver,
LookupResult* result,
Name* name,
bool continue_search);
+ static Handle<Object> SetProperty(Handle<JSReceiver> receiver,
+ LookupResult* result,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_from_keyed);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
};
+// Forward declaration for JSObject::GetOrCreateHiddenPropertiesHashTable.
+class ObjectHashTable;
+
// The JSObject describes real heap allocated JavaScript objects with
// properties.
// Note that the map of JSObject changes during execution to enable inline
@@ -2121,50 +2125,49 @@ class JSObject: public JSReceiver {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Requires: HasFastElements().
+ static Handle<FixedArray> EnsureWritableFastElements(
+ Handle<JSObject> object);
MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
// Collects elements starting at index 0.
// Undefined values are placed after non-undefined values.
// Returns the number of non-undefined values.
- MUST_USE_RESULT MaybeObject* PrepareElementsForSort(uint32_t limit);
+ static Handle<Object> PrepareElementsForSort(Handle<JSObject> object,
+ uint32_t limit);
// As PrepareElementsForSort, but only on objects where elements is
// a dictionary, and it will stay a dictionary.
+ static Handle<Object> PrepareSlowElementsForSort(Handle<JSObject> object,
+ uint32_t limit);
MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
- MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
- Object* structure,
- Name* name);
+ static Handle<Object> GetPropertyWithCallback(Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Object> structure,
+ Handle<Name> name);
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck(
- LookupResult* result,
- Name* name,
- Object* value,
- bool check_prototype,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithCallback(
- Object* structure,
- Name* name,
- Object* value,
- JSObject* holder,
+ static Handle<Object> SetPropertyWithCallback(
+ Handle<JSObject> object,
+ Handle<Object> structure,
+ Handle<Name> name,
+ Handle<Object> value,
+ Handle<JSObject> holder,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
- Name* name,
- Object* value,
+
+ static Handle<Object> SetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor(
- Name* name,
- Object* value,
+
+ static Handle<Object> SetPropertyForResult(
+ Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- StoreMode mode = ALLOW_AS_CONSTANT);
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
static Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
@@ -2183,53 +2186,34 @@ class JSObject: public JSReceiver {
static inline Handle<Map> FindTransitionToField(Handle<Map> map,
Handle<Name> key);
- inline int LastAddedFieldIndex();
-
// Extend the receiver with a single fast property appeared first in the
// passed map. This also extends the property backing store if necessary.
static void AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map);
- inline MUST_USE_RESULT MaybeObject* AllocateStorageForMap(Map* map);
+ // Migrates the given object to a map whose field representations are the
+ // lowest upper bound of all known representations for that field.
static void MigrateInstance(Handle<JSObject> instance);
- inline MUST_USE_RESULT MaybeObject* MigrateInstance();
+ // Migrates the given object only if the target map is already available,
+ // or returns an empty handle if such a map is not yet available.
static Handle<Object> TryMigrateInstance(Handle<JSObject> instance);
- inline MUST_USE_RESULT MaybeObject* TryMigrateInstance();
-
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributesTrampoline(
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
// Retrieve a value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
Object* GetNormalizedProperty(LookupResult* result);
- // Sets the property value in a normalized object given (key, value).
- // Handles the special representation of JS global objects.
- static Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
- Handle<Object> value);
-
// Sets the property value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
- MUST_USE_RESULT MaybeObject* SetNormalizedProperty(LookupResult* result,
- Object* value);
+ static void SetNormalizedProperty(Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Object> value);
// Sets the property value in a normalized object given (key, value, details).
// Handles the special representation of JS global objects.
- static Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyDetails details);
-
- MUST_USE_RESULT MaybeObject* SetNormalizedProperty(Name* name,
- Object* value,
- PropertyDetails details);
+ static void SetNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyDetails details);
static void OptimizeAsPrototype(Handle<JSObject> object);
@@ -2253,6 +2237,15 @@ class JSObject: public JSReceiver {
uint32_t index,
bool continue_search);
+ // Retrieves an AccessorPair property from the given object. Might return
+ // undefined if the property doesn't exist or is of a different kind.
+ static Handle<Object> GetAccessor(Handle<JSObject> object,
+ Handle<Name> name,
+ AccessorComponent component);
+
+ // Defines an AccessorPair property on the given object.
+ // TODO(mstarzinger): Rename to SetAccessor() and return empty handle on
+ // exception instead of letting callers check for scheduled exception.
static void DefineAccessor(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> getter,
@@ -2260,24 +2253,19 @@ class JSObject: public JSReceiver {
PropertyAttributes attributes,
v8::AccessControl access_control = v8::DEFAULT);
- MaybeObject* LookupAccessor(Name* name, AccessorComponent component);
-
+ // Defines an AccessorInfo property on the given object.
static Handle<Object> SetAccessor(Handle<JSObject> object,
Handle<AccessorInfo> info);
- // Used from Object::GetProperty().
- MUST_USE_RESULT MaybeObject* GetPropertyWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- Name* name,
- PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyWithInterceptor(
- Object* receiver,
- Name* name,
+ static Handle<Object> GetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
PropertyAttributes* attributes);
- MUST_USE_RESULT MaybeObject* GetPropertyPostInterceptor(
- Object* receiver,
- Name* name,
+ static Handle<Object> GetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ Handle<Name> name,
PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetLocalPropertyPostInterceptor(
Object* receiver,
@@ -2303,11 +2291,9 @@ class JSObject: public JSReceiver {
// Sets a hidden property on this object. Returns this object if successful,
// undefined if called on a detached proxy.
- static Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
+ static Handle<Object> SetHiddenProperty(Handle<JSObject> object,
Handle<Name> key,
Handle<Object> value);
- // Returns a failure if a GC is required.
- MUST_USE_RESULT MaybeObject* SetHiddenProperty(Name* key, Object* value);
// Gets the value of a hidden property with the given key. Returns the hole
// if the property doesn't exist (or if called on a detached proxy),
// otherwise returns the value set for the key.
@@ -2319,8 +2305,7 @@ class JSObject: public JSReceiver {
// Returns true if the object has a property with the hidden string as name.
bool HasHiddenProperties();
- static int GetIdentityHash(Handle<JSObject> object);
- static void SetIdentityHash(Handle<JSObject> object, Smi* hash);
+ static void SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash);
inline void ValidateElements();
@@ -2361,31 +2346,14 @@ class JSObject: public JSReceiver {
return old_capacity + (old_capacity >> 1) + 16;
}
- PropertyType GetLocalPropertyType(Name* name);
- PropertyType GetLocalElementType(uint32_t index);
-
// These methods do not perform access checks!
AccessorPair* GetLocalPropertyAccessorPair(Name* name);
AccessorPair* GetLocalElementAccessorPair(uint32_t index);
- MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype);
-
- MUST_USE_RESULT MaybeObject* SetDictionaryElement(
- uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype,
- SetPropertyMode set_mode = SET_PROPERTY);
-
- MUST_USE_RESULT MaybeObject* SetFastDoubleElement(
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode,
- bool check_prototype = true);
+ static Handle<Object> SetFastElement(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode,
+ bool check_prototype);
static Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
@@ -2397,14 +2365,6 @@ class JSObject: public JSReceiver {
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode,
- SetPropertyMode set_mode = SET_PROPERTY);
-
- // A Failure object is returned if GC is needed.
- MUST_USE_RESULT MaybeObject* SetElement(
- uint32_t index,
- Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype = true,
@@ -2421,6 +2381,11 @@ class JSObject: public JSReceiver {
kDontAllowSmiElements
};
+ static Handle<FixedArray> SetFastElementsCapacityAndLength(
+ Handle<JSObject> object,
+ int capacity,
+ int length,
+ SetFastElementsCapacitySmiMode smi_mode);
// Replace the elements' backing store with fast elements of the given
// capacity. Update the length for JSArrays. Returns the new backing
// store.
@@ -2428,6 +2393,10 @@ class JSObject: public JSReceiver {
int capacity,
int length,
SetFastElementsCapacitySmiMode smi_mode);
+ static void SetFastDoubleElementsCapacityAndLength(
+ Handle<JSObject> object,
+ int capacity,
+ int length);
MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
int capacity,
int length);
@@ -2438,9 +2407,11 @@ class JSObject: public JSReceiver {
inline bool HasIndexedInterceptor();
// Support functions for v8 api (needed for correct interceptor behavior).
- bool HasRealNamedProperty(Isolate* isolate, Name* key);
- bool HasRealElementProperty(Isolate* isolate, uint32_t index);
- bool HasRealNamedCallbackProperty(Isolate* isolate, Name* key);
+ static bool HasRealNamedProperty(Handle<JSObject> object,
+ Handle<Name> key);
+ static bool HasRealElementProperty(Handle<JSObject> object, uint32_t index);
+ static bool HasRealNamedCallbackProperty(Handle<JSObject> object,
+ Handle<Name> key);
// Get the header size for a JSObject. Used to compute the index of
// internal fields as well as the number of internal fields.
@@ -2456,8 +2427,6 @@ class JSObject: public JSReceiver {
void LocalLookupRealNamedProperty(Name* name, LookupResult* result);
void LookupRealNamedProperty(Name* name, LookupResult* result);
void LookupRealNamedPropertyInPrototypes(Name* name, LookupResult* result);
- MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
- uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode);
void LookupCallbackProperty(Name* name, LookupResult* result);
// Returns the number of properties on this object filtering out properties
@@ -2483,32 +2452,6 @@ class JSObject: public JSReceiver {
// Returns the number of enumerable elements.
int GetEnumElementKeys(FixedArray* storage);
- // Add a property to a fast-case object using a map transition to
- // new_map.
- MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(
- Map* new_map,
- Name* name,
- Object* value,
- int field_index,
- Representation representation);
-
- // Add a constant function property to a fast-case object.
- // This leaves a CONSTANT_TRANSITION in the old map, and
- // if it is called on a second object with this map, a
- // normal property is added instead, with a map transition.
- // This avoids the creation of many maps with the same constant
- // function, all orphaned.
- MUST_USE_RESULT MaybeObject* AddConstantProperty(
- Name* name,
- Object* constant,
- PropertyAttributes attributes,
- TransitionFlag flag);
-
- MUST_USE_RESULT MaybeObject* ReplaceSlowProperty(
- Name* name,
- Object* value,
- PropertyAttributes attributes);
-
// Returns a new map with all transitions dropped from the object's current
// map and the ElementsKind set.
static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
@@ -2519,43 +2462,17 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* GetElementsTransitionMapSlow(
ElementsKind elements_kind);
- static Handle<Object> TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind);
+ static void TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
- MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
-
- MUST_USE_RESULT MaybeObject* MigrateToMap(Map* new_map);
- MUST_USE_RESULT MaybeObject* GeneralizeFieldRepresentation(
- int modify_index,
- Representation new_representation,
- StoreMode store_mode);
- // Add a property to a fast-case object.
- MUST_USE_RESULT MaybeObject* AddFastProperty(
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
- ValueType value_type = OPTIMAL_REPRESENTATION,
- TransitionFlag flag = INSERT_TRANSITION);
-
- // Add a property to a slow-case object.
- MUST_USE_RESULT MaybeObject* AddSlowProperty(Name* name,
- Object* value,
- PropertyAttributes attributes);
-
- // Add a property to an object. May cause GC.
- MUST_USE_RESULT MaybeObject* AddProperty(
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
- ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT,
- TransitionFlag flag = INSERT_TRANSITION);
+ // TODO(mstarzinger): Both public because of ConvertAnsSetLocalProperty().
+ static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map);
+ static void GeneralizeFieldRepresentation(Handle<JSObject> object,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode);
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
@@ -2565,10 +2482,6 @@ class JSObject: public JSReceiver {
PropertyNormalizationMode mode,
int expected_additional_properties);
- MUST_USE_RESULT MaybeObject* NormalizeProperties(
- PropertyNormalizationMode mode,
- int expected_additional_properties);
-
// Convert and update the elements backing store to be a
// SeededNumberDictionary dictionary. Returns the backing after conversion.
static Handle<SeededNumberDictionary> NormalizeElements(
@@ -2577,13 +2490,9 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* NormalizeElements();
// Transform slow named properties to fast variants.
- // Returns failure if allocation failed.
static void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
- MUST_USE_RESULT MaybeObject* TransformToFastProperties(
- int unused_property_fields);
-
// Access fast-case object properties at index.
MUST_USE_RESULT inline MaybeObject* FastPropertyAt(
Representation representation,
@@ -2616,22 +2525,30 @@ class JSObject: public JSReceiver {
// Check whether this object references another object
bool ReferencesObject(Object* obj);
- // Casting.
- static inline JSObject* cast(Object* obj);
-
// Disalow further properties to be added to the object.
static Handle<Object> PreventExtensions(Handle<JSObject> object);
- MUST_USE_RESULT MaybeObject* PreventExtensions();
// ES5 Object.freeze
- MUST_USE_RESULT MaybeObject* Freeze(Isolate* isolate);
-
+ static Handle<Object> Freeze(Handle<JSObject> object);
// Called the first time an object is observed with ES7 Object.observe.
- MUST_USE_RESULT MaybeObject* SetObserved(Isolate* isolate);
+ static void SetObserved(Handle<JSObject> object);
+
+ // Copy object.
+ enum DeepCopyHints {
+ kNoHints = 0,
+ kObjectIsShallowArray = 1
+ };
+
+ static Handle<JSObject> Copy(Handle<JSObject> object);
+ static Handle<JSObject> DeepCopy(Handle<JSObject> object,
+ AllocationSiteUsageContext* site_context,
+ DeepCopyHints hints = kNoHints);
+ static Handle<JSObject> DeepWalk(Handle<JSObject> object,
+ AllocationSiteCreationContext* site_context);
- // Copy object
- MUST_USE_RESULT MaybeObject* DeepCopy(Isolate* isolate);
+ // Casting.
+ static inline JSObject* cast(Object* obj);
// Dispatched behavior.
void JSObjectShortPrint(StringStream* accumulator);
@@ -2670,6 +2587,14 @@ class JSObject: public JSReceiver {
void IncrementSpillStatistics(SpillInformation* info);
#endif
+
+#ifdef VERIFY_HEAP
+ // If a GC was caused while constructing this object, the elements pointer
+ // may point to a one pointer filler map. The object won't be rooted, but
+ // our heap verification code could stumble across it.
+ bool ElementsAreSafeToExamine();
+#endif
+
Object* SlowReverseLookup(Object* value);
// Maximal number of fast properties for the JSObject. Used to
@@ -2727,21 +2652,22 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> old_value);
- // Deliver change records to observers. May cause GC.
- static void DeliverChangeRecords(Isolate* isolate);
-
private:
friend class DictionaryElementsAccessor;
friend class JSReceiver;
+ friend class Object;
- // TODO(mstarzinger): Soon to be handlified.
- MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
+ static void UpdateAllocationSite(Handle<JSObject> object,
+ ElementsKind to_kind);
+ MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
+
+ // Used from Object::GetProperty().
+ static Handle<Object> GetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
+ Handle<Object> receiver,
+ LookupResult* result,
+ Handle<Name> name,
+ PropertyAttributes* attributes);
MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver,
Object* structure,
@@ -2755,37 +2681,129 @@ class JSObject: public JSReceiver {
JSReceiver* receiver,
uint32_t index,
bool continue_search);
- MUST_USE_RESULT MaybeObject* SetElementWithCallback(
- Object* structure,
+ static Handle<Object> SetElementWithCallback(
+ Handle<JSObject> object,
+ Handle<Object> structure,
uint32_t index,
- Object* value,
- JSObject* holder,
+ Handle<Object> value,
+ Handle<JSObject> holder,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
+ static Handle<Object> SetElementWithInterceptor(
+ Handle<JSObject> object,
uint32_t index,
- Object* value,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
- MUST_USE_RESULT MaybeObject* SetElementWithoutInterceptor(
+ static Handle<Object> SetElementWithoutInterceptor(
+ Handle<JSObject> object,
uint32_t index,
- Object* value,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
+ static Handle<Object> SetElementWithCallbackSetterInPrototypes(
+ Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ bool* found,
+ StrictModeFlag strict_mode);
+ static Handle<Object> SetDictionaryElement(
+ Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool check_prototype,
+ SetPropertyMode set_mode = SET_PROPERTY);
+ static Handle<Object> SetFastDoubleElement(
+ Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode,
+ bool check_prototype = true);
// Searches the prototype chain for property 'name'. If it is found and
// has a setter, invoke it and set '*done' to true. If it is found and is
// read-only, reject and set '*done' to true. Otherwise, set '*done' to
- // false. Can cause GC and can return a failure result with '*done==true'.
- MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypes(
- Name* name,
- Object* value,
+ // false. Can throw and return an empty handle with '*done==true'.
+ static Handle<Object> SetPropertyViaPrototypes(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool* done);
+ static Handle<Object> SetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
+ static Handle<Object> SetPropertyUsingTransition(
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+ static Handle<Object> SetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Name> name,
+ Handle<Object> value,
+ bool check_prototype,
+ StrictModeFlag strict_mode);
+
+ // Add a property to an object.
+ static Handle<Object> AddProperty(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ TransitionFlag flag = INSERT_TRANSITION);
+
+ // Add a constant function property to a fast-case object.
+ // This leaves a CONSTANT_TRANSITION in the old map, and
+ // if it is called on a second object with this map, a
+ // normal property is added instead, with a map transition.
+ // This avoids the creation of many maps with the same constant
+ // function, all orphaned.
+ static void AddConstantProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> constant,
+ PropertyAttributes attributes,
+ TransitionFlag flag);
+
+ // Add a property to a fast-case object.
+ static void AddFastProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StoreFromKeyed store_mode,
+ ValueType value_type,
+ TransitionFlag flag);
+
+ // Add a property to a fast-case object using a map transition to
+ // new_map.
+ static void AddFastPropertyUsingMap(Handle<JSObject> object,
+ Handle<Map> new_map,
+ Handle<Name> name,
+ Handle<Object> value,
+ int field_index,
+ Representation representation);
+
+ // Add a property to a slow-case object.
+ static void AddSlowProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes);
static Handle<Object> DeleteProperty(Handle<JSObject> object,
Handle<Name> name,
@@ -2849,23 +2867,25 @@ class JSObject: public JSReceiver {
Handle<Object> accessor,
PropertyAttributes attributes);
- enum InitializeHiddenProperties {
- CREATE_NEW_IF_ABSENT,
- ONLY_RETURN_INLINE_VALUE
- };
- // If create_if_absent is true, return the hash table backing store
- // for hidden properties. If there is no backing store, allocate one.
- // If create_if_absent is false, return the hash table backing store
- // or the inline stored identity hash, whatever is found.
- MUST_USE_RESULT MaybeObject* GetHiddenPropertiesHashTable(
- InitializeHiddenProperties init_option);
+ // Return the hash table backing store or the inline stored identity hash,
+ // whatever is found.
+ MUST_USE_RESULT Object* GetHiddenPropertiesHashTable();
+
+ // Return the hash table backing store for hidden properties. If there is no
+ // backing store, allocate one.
+ static Handle<ObjectHashTable> GetOrCreateHiddenPropertiesHashtable(
+ Handle<JSObject> object);
+
// Set the hidden property backing store to either a hash table or
// the inline-stored identity hash.
- MUST_USE_RESULT MaybeObject* SetHiddenPropertiesHashTable(
- Object* value);
+ static Handle<Object> SetHiddenPropertiesHashTable(
+ Handle<JSObject> object,
+ Handle<Object> value);
+
+ MUST_USE_RESULT Object* GetIdentityHash();
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+ static Handle<Object> GetOrCreateIdentityHash(Handle<JSObject> object);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2919,7 +2939,8 @@ class FixedArray: public FixedArrayBase {
// Copy operations.
MUST_USE_RESULT inline MaybeObject* Copy();
- MUST_USE_RESULT MaybeObject* CopySize(int new_length);
+ MUST_USE_RESULT MaybeObject* CopySize(int new_length,
+ PretenureFlag pretenure = NOT_TENURED);
// Add the elements of a JSArray to this FixedArray.
MUST_USE_RESULT MaybeObject* AddKeysFromJSArray(JSArray* array);
@@ -3042,6 +3063,100 @@ class FixedDoubleArray: public FixedArrayBase {
};
+// ConstantPoolArray describes a fixed-sized array containing constant pool
+// entires.
+// The format of the pool is:
+// [0]: Field holding the first index which is a pointer entry
+// [1]: Field holding the first index which is a int32 entry
+// [2] ... [first_ptr_index() - 1]: 64 bit entries
+// [first_ptr_index()] ... [first_int32_index() - 1]: pointer entries
+// [first_int32_index()] ... [length - 1]: 32 bit entries
+class ConstantPoolArray: public FixedArrayBase {
+ public:
+ // Getters for the field storing the first index for different type entries.
+ inline int first_ptr_index();
+ inline int first_int64_index();
+ inline int first_int32_index();
+
+ // Getters for counts of different type entries.
+ inline int count_of_ptr_entries();
+ inline int count_of_int64_entries();
+ inline int count_of_int32_entries();
+
+ // Setter and getter for pool elements.
+ inline Object* get_ptr_entry(int index);
+ inline int64_t get_int64_entry(int index);
+ inline int32_t get_int32_entry(int index);
+ inline double get_int64_entry_as_double(int index);
+
+ inline void set(int index, Object* value);
+ inline void set(int index, int64_t value);
+ inline void set(int index, double value);
+ inline void set(int index, int32_t value);
+
+ // Set up initial state.
+ inline void SetEntryCounts(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries);
+
+ // Copy operations
+ MUST_USE_RESULT inline MaybeObject* Copy();
+
+ // Garbage collection support.
+ inline static int SizeFor(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ return RoundUp(OffsetAt(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries),
+ kPointerSize);
+ }
+
+ // Code Generation support.
+ inline int OffsetOfElementAt(int index) {
+ ASSERT(index < length());
+ if (index >= first_int32_index()) {
+ return OffsetAt(count_of_int64_entries(), count_of_ptr_entries(),
+ index - first_int32_index());
+ } else if (index >= first_ptr_index()) {
+ return OffsetAt(count_of_int64_entries(), index - first_ptr_index(), 0);
+ } else {
+ return OffsetAt(index, 0, 0);
+ }
+ }
+
+ // Casting.
+ static inline ConstantPoolArray* cast(Object* obj);
+
+ // Layout description.
+ static const int kFirstPointerIndexOffset = FixedArray::kHeaderSize;
+ static const int kFirstInt32IndexOffset =
+ kFirstPointerIndexOffset + kPointerSize;
+ static const int kFirstOffset = kFirstInt32IndexOffset + kPointerSize;
+
+ // Dispatched behavior.
+ void ConstantPoolIterateBody(ObjectVisitor* v);
+
+ DECLARE_PRINTER(ConstantPoolArray)
+ DECLARE_VERIFIER(ConstantPoolArray)
+
+ private:
+ inline void set_first_ptr_index(int value);
+ inline void set_first_int32_index(int value);
+
+ inline static int OffsetAt(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ return kFirstOffset
+ + (number_of_int64_entries * kInt64Size)
+ + (number_of_ptr_entries * kPointerSize)
+ + (number_of_int32_entries * kInt32Size);
+ }
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolArray);
+};
+
+
// DescriptorArrays are fixed arrays used to hold instance descriptors.
// The format of the these objects is:
// [0]: Number of descriptors
@@ -3175,6 +3290,13 @@ class DescriptorArray: public FixedArray {
DescriptorArray* src,
int src_index,
const WhitenessWitness&);
+ static Handle<DescriptorArray> Merge(Handle<DescriptorArray> desc,
+ int verbatim,
+ int valid,
+ int new_size,
+ int modify_index,
+ StoreMode store_mode,
+ Handle<DescriptorArray> other);
MUST_USE_RESULT MaybeObject* Merge(int verbatim,
int valid,
int new_size,
@@ -3191,6 +3313,10 @@ class DescriptorArray: public FixedArray {
return CopyUpToAddAttributes(enumeration_index, NONE);
}
+ static Handle<DescriptorArray> CopyUpToAddAttributes(
+ Handle<DescriptorArray> desc,
+ int enumeration_index,
+ PropertyAttributes attributes);
MUST_USE_RESULT MaybeObject* CopyUpToAddAttributes(
int enumeration_index,
PropertyAttributes attributes);
@@ -3256,10 +3382,6 @@ class DescriptorArray: public FixedArray {
bool IsEqualTo(DescriptorArray* other);
#endif
- // The maximum number of descriptors we want in a descriptor array (should
- // fit in a page).
- static const int kMaxNumberOfDescriptors = 1024 + 512;
-
// Returns the fixed array length required to hold number_of_descriptors
// descriptors.
static int LengthFor(int number_of_descriptors) {
@@ -3369,11 +3491,6 @@ class BaseShape {
template<typename Shape, typename Key>
class HashTable: public FixedArray {
public:
- enum MinimumCapacity {
- USE_DEFAULT_MINIMUM_CAPACITY,
- USE_CUSTOM_MINIMUM_CAPACITY
- };
-
// Wrapper methods
inline uint32_t Hash(Key key) {
if (Shape::UsesSeed) {
@@ -3484,6 +3601,9 @@ class HashTable: public FixedArray {
void Rehash(Key key);
protected:
+ friend class ObjectHashSet;
+ friend class ObjectHashTable;
+
// Find the entry at which to insert element with the given key that
// has the given hash value.
uint32_t FindInsertionEntry(uint32_t hash);
@@ -3543,7 +3663,10 @@ class HashTable: public FixedArray {
MUST_USE_RESULT MaybeObject* Shrink(Key key);
// Ensure enough space for n additional elements.
- MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
+ MUST_USE_RESULT MaybeObject* EnsureCapacity(
+ int n,
+ Key key,
+ PretenureFlag pretenure = NOT_TENURED);
};
@@ -3858,6 +3981,11 @@ class SeededNumberDictionary
// Type specific at put (default NONE attributes is used when adding).
MUST_USE_RESULT MaybeObject* AtNumberPut(uint32_t key, Object* value);
+ MUST_USE_RESULT static Handle<SeededNumberDictionary> AddNumberEntry(
+ Handle<SeededNumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value,
+ PropertyDetails details);
MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key,
Object* value,
PropertyDetails details);
@@ -3944,11 +4072,23 @@ class ObjectHashSet: public HashTable<ObjectHashTableShape<1>, Object*> {
// Looks up whether the given key is part of this hash set.
bool Contains(Object* key);
+ static Handle<ObjectHashSet> EnsureCapacity(
+ Handle<ObjectHashSet> table,
+ int n,
+ Handle<Object> key,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Attempt to shrink hash table after removal of key.
+ static Handle<ObjectHashSet> Shrink(Handle<ObjectHashSet> table,
+ Handle<Object> key);
+
// Adds the given key to this hash set.
- MUST_USE_RESULT MaybeObject* Add(Object* key);
+ static Handle<ObjectHashSet> Add(Handle<ObjectHashSet> table,
+ Handle<Object> key);
// Removes the given key from this hash set.
- MUST_USE_RESULT MaybeObject* Remove(Object* key);
+ static Handle<ObjectHashSet> Remove(Handle<ObjectHashSet> table,
+ Handle<Object> key);
};
@@ -3961,13 +4101,25 @@ class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
return reinterpret_cast<ObjectHashTable*>(obj);
}
+ static Handle<ObjectHashTable> EnsureCapacity(
+ Handle<ObjectHashTable> table,
+ int n,
+ Handle<Object> key,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Attempt to shrink hash table after removal of key.
+ static Handle<ObjectHashTable> Shrink(Handle<ObjectHashTable> table,
+ Handle<Object> key);
+
// Looks up the value associated with the given key. The hole value is
// returned in case the key is not present.
Object* Lookup(Object* key);
// Adds (or overwrites) the value associated with the given key. Mapping a
// key to the hole value causes removal of the whole entry.
- MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
+ static Handle<ObjectHashTable> Put(Handle<ObjectHashTable> table,
+ Handle<Object> key,
+ Handle<Object> value);
private:
friend class MarkCompactCollector;
@@ -3982,6 +4134,58 @@ class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
};
+template <int entrysize>
+class WeakHashTableShape : public BaseShape<Object*> {
+ public:
+ static inline bool IsMatch(Object* key, Object* other);
+ static inline uint32_t Hash(Object* key);
+ static inline uint32_t HashForObject(Object* key, Object* object);
+ MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
+ Object* key);
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = entrysize;
+};
+
+
+// WeakHashTable maps keys that are arbitrary objects to object values.
+// It is used for the global weak hash table that maps objects
+// embedded in optimized code to dependent code lists.
+class WeakHashTable: public HashTable<WeakHashTableShape<2>, Object*> {
+ public:
+ static inline WeakHashTable* cast(Object* obj) {
+ ASSERT(obj->IsHashTable());
+ return reinterpret_cast<WeakHashTable*>(obj);
+ }
+
+ // Looks up the value associated with the given key. The hole value is
+ // returned in case the key is not present.
+ Object* Lookup(Object* key);
+
+ // Adds (or overwrites) the value associated with the given key. Mapping a
+ // key to the hole value causes removal of the whole entry.
+ MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
+
+ // This function is called when heap verification is turned on.
+ void Zap(Object* value) {
+ int capacity = Capacity();
+ for (int i = 0; i < capacity; i++) {
+ set(EntryToIndex(i), value);
+ set(EntryToValueIndex(i), value);
+ }
+ }
+
+ private:
+ friend class MarkCompactCollector;
+
+ void AddEntry(int entry, Object* key, Object* value);
+
+ // Returns the index to the value of an entry.
+ static inline int EntryToValueIndex(int entry) {
+ return EntryToIndex(entry) + 1;
+ }
+};
+
+
// JSFunctionResultCache caches results of some JSFunction invocation.
// It is a fixed array with fixed structure:
// [0]: factory function
@@ -4120,9 +4324,9 @@ class ScopeInfo : public FixedArray {
// Copies all the context locals into an object used to materialize a scope.
- bool CopyContextLocalsToScopeObject(Isolate* isolate,
- Handle<Context> context,
- Handle<JSObject> scope_object);
+ static bool CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<JSObject> scope_object);
static Handle<ScopeInfo> Create(Scope* scope, Zone* zone);
@@ -4233,8 +4437,9 @@ class NormalizedMapCache: public FixedArray {
public:
static const int kEntries = 64;
- MUST_USE_RESULT MaybeObject* Get(JSObject* object,
- PropertyNormalizationMode mode);
+ static Handle<Map> Get(Handle<NormalizedMapCache> cache,
+ Handle<JSObject> object,
+ PropertyNormalizationMode mode);
void Clear();
@@ -4405,6 +4610,10 @@ class ExternalByteArray: public ExternalArray {
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, int8_t value);
+ static Handle<Object> SetValue(Handle<ExternalByteArray> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
@@ -4428,6 +4637,10 @@ class ExternalUnsignedByteArray: public ExternalArray {
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, uint8_t value);
+ static Handle<Object> SetValue(Handle<ExternalUnsignedByteArray> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
@@ -4451,6 +4664,10 @@ class ExternalShortArray: public ExternalArray {
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, int16_t value);
+ static Handle<Object> SetValue(Handle<ExternalShortArray> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
@@ -4474,6 +4691,10 @@ class ExternalUnsignedShortArray: public ExternalArray {
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, uint16_t value);
+ static Handle<Object> SetValue(Handle<ExternalUnsignedShortArray> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
@@ -4497,6 +4718,10 @@ class ExternalIntArray: public ExternalArray {
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, int32_t value);
+ static Handle<Object> SetValue(Handle<ExternalIntArray> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
@@ -4520,6 +4745,10 @@ class ExternalUnsignedIntArray: public ExternalArray {
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, uint32_t value);
+ static Handle<Object> SetValue(Handle<ExternalUnsignedIntArray> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
@@ -4543,6 +4772,10 @@ class ExternalFloatArray: public ExternalArray {
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, float value);
+ static Handle<Object> SetValue(Handle<ExternalFloatArray> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
@@ -4566,6 +4799,10 @@ class ExternalDoubleArray: public ExternalArray {
MUST_USE_RESULT inline MaybeObject* get(int index);
inline void set(int index, double value);
+ static Handle<Object> SetValue(Handle<ExternalDoubleArray> array,
+ uint32_t index,
+ Handle<Object> value);
+
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
@@ -4772,6 +5009,7 @@ class Code: public HeapObject {
V(FUNCTION) \
V(OPTIMIZED_FUNCTION) \
V(STUB) \
+ V(HANDLER) \
V(BUILTIN) \
V(REGEXP)
@@ -4807,22 +5045,10 @@ class Code: public HeapObject {
// Types of stubs.
enum StubType {
NORMAL,
- FIELD,
- CONSTANT,
- CALLBACKS,
- INTERCEPTOR,
- MAP_TRANSITION,
- NONEXISTENT
- };
-
- enum StubHolder {
- OWN_STUB,
- PROTOTYPE_STUB
+ FAST
};
- typedef int ExtraICState;
-
- static const ExtraICState kNoExtraICState = 0;
+ static const int kPrologueOffsetNotSet = -1;
#ifdef ENABLE_DISASSEMBLER
// Printing
@@ -4839,6 +5065,7 @@ class Code: public HeapObject {
// [relocation_info]: Code relocation information
DECL_ACCESSORS(relocation_info, ByteArray)
void InvalidateRelocation();
+ void InvalidateEmbeddedObjects();
// [handler_table]: Fixed array containing offsets of exception handlers.
DECL_ACCESSORS(handler_table, FixedArray)
@@ -4846,13 +5073,15 @@ class Code: public HeapObject {
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
- // [type_feedback_info]: This field stores various things, depending on the
- // kind of the code object.
+ // [raw_type_feedback_info]: This field stores various things, depending on
+ // the kind of the code object.
// FUNCTION => type feedback information.
// STUB => various things, e.g. a SMI
// OPTIMIZED_FUNCTION => the next_code_link for optimized code list.
- DECL_ACCESSORS(type_feedback_info, Object)
- inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value);
+ DECL_ACCESSORS(raw_type_feedback_info, Object)
+ inline Object* type_feedback_info();
+ inline void set_type_feedback_info(
+ Object* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline int stub_info();
inline void set_stub_info(int info);
@@ -4886,6 +5115,9 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
+ inline Kind handler_kind() {
+ return static_cast<Kind>(arguments_count());
+ }
inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
@@ -4895,7 +5127,8 @@ class Code: public HeapObject {
// TODO(danno): This is a bit of a hack right now since there are still
// clients of this API that pass "extra" values in for argc. These clients
// should be retrofitted to used ExtendedExtraICState.
- return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC;
+ return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
+ kind == BINARY_OP_IC;
}
inline StubType type(); // Only valid for monomorphic IC stubs.
@@ -4904,6 +5137,7 @@ class Code: public HeapObject {
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
inline bool is_debug_stub();
+ inline bool is_handler() { return kind() == HANDLER; }
inline bool is_load_stub() { return kind() == LOAD_IC; }
inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
inline bool is_store_stub() { return kind() == STORE_IC; }
@@ -4914,21 +5148,21 @@ class Code: public HeapObject {
inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; }
inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
+ inline bool is_keyed_stub();
+
+ inline void set_raw_kind_specific_flags1(int value);
+ inline void set_raw_kind_specific_flags2(int value);
// [major_key]: For kind STUB or BINARY_OP_IC, the major key.
inline int major_key();
inline void set_major_key(int value);
+ inline bool has_major_key();
// For kind STUB or ICs, tells whether or not a code object was generated by
// the optimizing compiler (but it may not be an optimized function).
bool is_crankshafted();
inline void set_is_crankshafted(bool value);
- // For stubs, tells whether they should always exist, so that they can be
- // called from other stubs.
- inline bool is_pregenerated();
- inline void set_is_pregenerated(bool value);
-
// [optimizable]: For FUNCTION kind, tells if it is optimizable.
inline bool optimizable();
inline void set_optimizable(bool value);
@@ -4997,8 +5231,6 @@ class Code: public HeapObject {
inline bool marked_for_deoptimization();
inline void set_marked_for_deoptimization(bool flag);
- bool allowed_in_shared_map_code_cache();
-
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
@@ -5009,42 +5241,29 @@ class Code: public HeapObject {
// Find the first map in an IC stub.
Map* FindFirstMap();
void FindAllMaps(MapHandleList* maps);
+ void FindAllTypes(TypeHandleList* types);
void ReplaceFirstMap(Map* replace);
- // Find the first code in an IC stub.
- Code* FindFirstCode();
- void FindAllCode(CodeHandleList* code_list, int length);
+ // Find the first handler in an IC stub.
+ Code* FindFirstHandler();
+
+ // Find |length| handlers and put them into |code_list|. Returns false if not
+ // enough handlers can be found.
+ bool FindHandlers(CodeHandleList* code_list, int length = -1);
// Find the first name in an IC stub.
Name* FindFirstName();
void ReplaceNthCell(int n, Cell* replace_with);
- class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
- class ExtraICStateKeyedAccessStoreMode:
- public BitField<KeyedAccessStoreMode, 1, 4> {}; // NOLINT
-
- class ExtraICStateStubHolder: public BitField<StubHolder, 0, 1> {};
-
- static inline StrictModeFlag GetStrictMode(ExtraICState extra_ic_state) {
- return ExtraICStateStrictMode::decode(extra_ic_state);
- }
-
- static inline KeyedAccessStoreMode GetKeyedAccessStoreMode(
- ExtraICState extra_ic_state) {
- return ExtraICStateKeyedAccessStoreMode::decode(extra_ic_state);
- }
-
- static inline ExtraICState ComputeExtraICState(
- KeyedAccessStoreMode store_mode,
- StrictModeFlag strict_mode) {
- return ExtraICStateKeyedAccessStoreMode::encode(store_mode) |
- ExtraICStateStrictMode::encode(strict_mode);
- }
-
- static inline ExtraICState ComputeExtraICState(StubHolder stub_holder) {
- return ExtraICStateStubHolder::encode(stub_holder);
- }
+ // The entire code object including its header is copied verbatim to the
+ // snapshot so that it can be written in one, fast, memcpy during
+ // deserialization. The deserializer will overwrite some pointers, rather
+ // like a runtime linker, but the random allocation addresses used in the
+ // mksnapshot process would still be present in the unlinked snapshot data,
+ // which would make snapshot production non-reproducible. This method wipes
+ // out the to-be-overwritten header data for reproducible snapshots.
+ inline void WipeOutHeader();
// Flags operations.
static inline Flags ComputeFlags(
@@ -5058,9 +5277,9 @@ class Code: public HeapObject {
static inline Flags ComputeMonomorphicFlags(
Kind kind,
ExtraICState extra_ic_state = kNoExtraICState,
+ InlineCacheHolderFlag holder = OWN_MAP,
StubType type = NORMAL,
- int argc = -1,
- InlineCacheHolderFlag holder = OWN_MAP);
+ int argc = -1);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
static inline StubType ExtractTypeFromFlags(Flags flags);
@@ -5136,17 +5355,24 @@ class Code: public HeapObject {
DECLARE_VERIFIER(Code)
void ClearInlineCaches();
+ void ClearInlineCaches(Kind kind);
+
void ClearTypeFeedbackCells(Heap* heap);
BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
enum Age {
- kNoAge = 0,
+ kNotExecutedCodeAge = -2,
+ kExecutedOnceCodeAge = -1,
+ kNoAgeCodeAge = 0,
CODE_AGE_LIST(DECLARE_CODE_AGE_ENUM)
kAfterLastCodeAge,
+ kFirstCodeAge = kNotExecutedCodeAge,
kLastCodeAge = kAfterLastCodeAge - 1,
- kCodeAgeCount = kAfterLastCodeAge - 1
+ kCodeAgeCount = kAfterLastCodeAge - kNotExecutedCodeAge - 1,
+ kIsOldCodeAge = kSexagenarianCodeAge,
+ kPreAgedCodeAge = kIsOldCodeAge - 1
};
#undef DECLARE_CODE_AGE_ENUM
@@ -5154,19 +5380,28 @@ class Code: public HeapObject {
// being entered through the prologue. Used to determine when it is
// relatively safe to flush this code object and replace it with the lazy
// compilation stub.
- static void MakeCodeAgeSequenceYoung(byte* sequence);
+ static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
+ static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate);
void MakeOlder(MarkingParity);
static bool IsYoungSequence(byte* sequence);
bool IsOld();
- int GetAge();
+ Age GetAge();
+ // Gets the raw code age, including psuedo code-age values such as
+ // kNotExecutedCodeAge and kExecutedOnceCodeAge.
+ Age GetRawAge();
+ static inline Code* GetPreAgedCodeAgeStub(Isolate* isolate) {
+ return GetCodeAgeStub(isolate, kNotExecutedCodeAge, NO_MARKING_PARITY);
+ }
- void PrintDeoptLocation(int bailout_id);
+ void PrintDeoptLocation(FILE* out, int bailout_id);
bool CanDeoptAt(Address pc);
#ifdef VERIFY_HEAP
- void VerifyEmbeddedMapsDependency();
+ void VerifyEmbeddedObjectsDependency();
#endif
+ static bool IsWeakEmbeddedObject(Kind kind, Object* object);
+
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
@@ -5212,13 +5447,13 @@ class Code: public HeapObject {
// Flags layout. BitField<type, shift, size>.
class ICStateField: public BitField<InlineCacheState, 0, 3> {};
- class TypeField: public BitField<StubType, 3, 3> {};
- class CacheHolderField: public BitField<InlineCacheHolderFlag, 6, 1> {};
- class KindField: public BitField<Kind, 7, 4> {};
- class IsPregeneratedField: public BitField<bool, 11, 1> {};
- class ExtraICStateField: public BitField<ExtraICState, 12, 5> {};
- class ExtendedExtraICStateField: public BitField<ExtraICState, 12,
- PlatformSmiTagging::kSmiValueSize - 12 + 1> {}; // NOLINT
+ class TypeField: public BitField<StubType, 3, 1> {};
+ class CacheHolderField: public BitField<InlineCacheHolderFlag, 5, 1> {};
+ class KindField: public BitField<Kind, 6, 4> {};
+ // TODO(bmeurer): Bit 10 is available for free use. :-)
+ class ExtraICStateField: public BitField<ExtraICState, 11, 6> {};
+ class ExtendedExtraICStateField: public BitField<ExtraICState, 11,
+ PlatformSmiTagging::kSmiValueSize - 11 + 1> {}; // NOLINT
STATIC_ASSERT(ExtraICStateField::kShift == ExtendedExtraICStateField::kShift);
// KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
@@ -5253,7 +5488,7 @@ class Code: public HeapObject {
static const int kStubMajorKeyFirstBit = kIsCrankshaftedBit + 1;
static const int kSafepointTableOffsetFirstBit =
kStubMajorKeyFirstBit + kStubMajorKeyBits;
- static const int kSafepointTableOffsetBitCount = 25;
+ static const int kSafepointTableOffsetBitCount = 24;
STATIC_ASSERT(kStubMajorKeyFirstBit + kStubMajorKeyBits <= 32);
STATIC_ASSERT(kSafepointTableOffsetFirstBit +
@@ -5294,16 +5529,19 @@ class Code: public HeapObject {
private:
friend class RelocIterator;
+ void ClearInlineCaches(Kind* kind);
+
// Code aging
byte* FindCodeAgeSequence();
static void GetCodeAgeAndParity(Code* code, Age* age,
MarkingParity* parity);
static void GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity);
- static Code* GetCodeAgeStub(Age age, MarkingParity parity);
+ static Code* GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity);
// Code aging -- platform-specific
- static void PatchPlatformCodeAge(byte* sequence, Age age,
+ static void PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence, Age age,
MarkingParity parity);
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
@@ -5351,7 +5589,13 @@ class DependentCode: public FixedArray {
// Group of code that depends on global property values in property cells
// not being changed.
kPropertyCellChangedGroup,
- kGroupCount = kPropertyCellChangedGroup + 1
+ // Group of code that depends on tenuring information in AllocationSites
+ // not being changed.
+ kAllocationSiteTenuringChangedGroup,
+ // Group of code that depends on element transition information in
+ // AllocationSites not being changed.
+ kAllocationSiteTransitionChangedGroup,
+ kGroupCount = kAllocationSiteTransitionChangedGroup + 1
};
// Array for holding the index of the first code object of each group.
@@ -5445,17 +5689,20 @@ class Map: public HeapObject {
inline uint32_t bit_field3();
inline void set_bit_field3(uint32_t bits);
- class EnumLengthBits: public BitField<int, 0, 11> {};
- class NumberOfOwnDescriptorsBits: public BitField<int, 11, 11> {};
- class IsShared: public BitField<bool, 22, 1> {};
- class FunctionWithPrototype: public BitField<bool, 23, 1> {};
- class DictionaryMap: public BitField<bool, 24, 1> {};
- class OwnsDescriptors: public BitField<bool, 25, 1> {};
- class IsObserved: public BitField<bool, 26, 1> {};
- class Deprecated: public BitField<bool, 27, 1> {};
- class IsFrozen: public BitField<bool, 28, 1> {};
- class IsUnstable: public BitField<bool, 29, 1> {};
- class IsMigrationTarget: public BitField<bool, 30, 1> {};
+ class EnumLengthBits: public BitField<int,
+ 0, kDescriptorIndexBitCount> {}; // NOLINT
+ class NumberOfOwnDescriptorsBits: public BitField<int,
+ kDescriptorIndexBitCount, kDescriptorIndexBitCount> {}; // NOLINT
+ STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20);
+ class IsShared: public BitField<bool, 20, 1> {};
+ class FunctionWithPrototype: public BitField<bool, 21, 1> {};
+ class DictionaryMap: public BitField<bool, 22, 1> {};
+ class OwnsDescriptors: public BitField<bool, 23, 1> {};
+ class HasInstanceCallHandler: public BitField<bool, 24, 1> {};
+ class Deprecated: public BitField<bool, 25, 1> {};
+ class IsFrozen: public BitField<bool, 26, 1> {};
+ class IsUnstable: public BitField<bool, 27, 1> {};
+ class IsMigrationTarget: public BitField<bool, 28, 1> {};
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
@@ -5514,12 +5761,12 @@ class Map: public HeapObject {
}
// Tells whether the instance has a call-as-function handler.
- inline void set_has_instance_call_handler() {
- set_bit_field(bit_field() | (1 << kHasInstanceCallHandler));
+ inline void set_is_observed() {
+ set_bit_field(bit_field() | (1 << kIsObserved));
}
- inline bool has_instance_call_handler() {
- return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
+ inline bool is_observed() {
+ return ((1 << kIsObserved) & bit_field()) != 0;
}
inline void set_is_extensible(bool value);
@@ -5528,10 +5775,6 @@ class Map: public HeapObject {
inline void set_elements_kind(ElementsKind elements_kind) {
ASSERT(elements_kind < kElementsKindCount);
ASSERT(kElementsKindCount <= (1 << kElementsKindBitCount));
- ASSERT(!is_observed() ||
- elements_kind == DICTIONARY_ELEMENTS ||
- elements_kind == NON_STRICT_ARGUMENTS_ELEMENTS ||
- IsExternalArrayElementsKind(elements_kind));
set_bit_field2((bit_field2() & ~kElementsKindMask) |
(elements_kind << kElementsKindShift));
ASSERT(this->elements_kind() == elements_kind);
@@ -5584,6 +5827,10 @@ class Map: public HeapObject {
static bool IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind);
+ // Returns true if the current map doesn't have DICTIONARY_ELEMENTS but if a
+ // map with DICTIONARY_ELEMENTS was found in the prototype chain.
+ bool DictionaryElementsInPrototypeChainOnly();
+
inline bool HasTransitionArray();
inline bool HasElementsTransition();
inline Map* elements_transition_map();
@@ -5591,6 +5838,12 @@ class Map: public HeapObject {
Map* transitioned_map);
inline void SetTransition(int transition_index, Map* target);
inline Map* GetTransition(int transition_index);
+
+ static Handle<TransitionArray> AddTransition(Handle<Map> map,
+ Handle<Name> key,
+ Handle<Map> target,
+ SimpleTransitionFlag flag);
+
MUST_USE_RESULT inline MaybeObject* AddTransition(Name* key,
Map* target,
SimpleTransitionFlag flag);
@@ -5611,16 +5864,16 @@ class Map: public HeapObject {
int target_number_of_fields,
int target_inobject,
int target_unused);
+ static Handle<Map> GeneralizeAllFieldRepresentations(
+ Handle<Map> map,
+ Representation new_representation);
static Handle<Map> GeneralizeRepresentation(
Handle<Map> map,
int modify_index,
Representation new_representation,
StoreMode store_mode);
- MUST_USE_RESULT MaybeObject* GeneralizeRepresentation(
- int modify_index,
- Representation representation,
- StoreMode store_mode);
- MUST_USE_RESULT MaybeObject* CopyGeneralizeAllRepresentations(
+ static Handle<Map> CopyGeneralizeAllRepresentations(
+ Handle<Map> map,
int modify_index,
StoreMode store_mode,
PropertyAttributes attributes,
@@ -5737,6 +5990,8 @@ class Map: public HeapObject {
Name* name,
LookupResult* result);
+ inline PropertyDetails GetLastDescriptorDetails();
+
// The size of transition arrays are limited so they do not end up in large
// object space. Otherwise ClearNonLiveTransitions would leak memory while
// applying in-place right trimming.
@@ -5764,7 +6019,7 @@ class Map: public HeapObject {
}
void SetEnumLength(int length) {
- if (length != kInvalidEnumCache) {
+ if (length != kInvalidEnumCacheSentinel) {
ASSERT(length >= 0);
ASSERT(length == 0 || instance_descriptors()->HasEnumCache());
ASSERT(length <= NumberOfOwnDescriptors());
@@ -5774,8 +6029,8 @@ class Map: public HeapObject {
inline bool owns_descriptors();
inline void set_owns_descriptors(bool is_shared);
- inline bool is_observed();
- inline void set_is_observed(bool is_observed);
+ inline bool has_instance_call_handler();
+ inline void set_has_instance_call_handler();
inline void freeze();
inline bool is_frozen();
inline void mark_unstable();
@@ -5789,20 +6044,29 @@ class Map: public HeapObject {
// deprecated, it is directly returned. Otherwise, the non-deprecated version
// is found by re-transitioning from the root of the transition tree using the
// descriptor array of the map. Returns NULL if no updated map is found.
- Map* CurrentMapForDeprecated();
+ // This method also applies any pending migrations along the prototype chain.
+ static Handle<Map> CurrentMapForDeprecated(Handle<Map> map);
+ // Same as above, but does not touch the prototype chain.
+ static Handle<Map> CurrentMapForDeprecatedInternal(Handle<Map> map);
+ static Handle<Map> RawCopy(Handle<Map> map, int instance_size);
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
static Handle<Map> CopyDropDescriptors(Handle<Map> map);
MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
+ static Handle<Map> CopyReplaceDescriptors(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ TransitionFlag flag,
+ Handle<Name> name);
MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors(
DescriptorArray* descriptors,
TransitionFlag flag,
Name* name = NULL,
SimpleTransitionFlag simple_flag = FULL_TRANSITION);
- MUST_USE_RESULT MaybeObject* CopyInstallDescriptors(
+ static Handle<Map> CopyInstallDescriptors(
+ Handle<Map> map,
int new_descriptor,
- DescriptorArray* descriptors);
+ Handle<DescriptorArray> descriptors);
MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors,
Descriptor* descriptor);
MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor,
@@ -5818,13 +6082,12 @@ class Map: public HeapObject {
MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind,
TransitionFlag flag);
- MUST_USE_RESULT MaybeObject* CopyForObserved();
+
+ static Handle<Map> CopyForObserved(Handle<Map> map);
static Handle<Map> CopyNormalized(Handle<Map> map,
PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing);
- MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
- NormalizedMapSharingMode sharing);
inline void AppendDescriptor(Descriptor* desc,
const DescriptorArray::WhitenessWitness&);
@@ -5929,6 +6192,16 @@ class Map: public HeapObject {
bool IsJSObjectMap() {
return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
+ bool IsJSGlobalProxyMap() {
+ return instance_type() == JS_GLOBAL_PROXY_TYPE;
+ }
+ bool IsJSGlobalObjectMap() {
+ return instance_type() == JS_GLOBAL_OBJECT_TYPE;
+ }
+ bool IsGlobalObjectMap() {
+ const InstanceType type = instance_type();
+ return type == JS_GLOBAL_OBJECT_TYPE || type == JS_BUILTINS_OBJECT_TYPE;
+ }
// Fires when the layout of an object with a leaf map changes.
// This includes adding transitions to the leaf map or changing
@@ -5977,9 +6250,6 @@ class Map: public HeapObject {
static const int kMaxPreAllocatedPropertyFields = 255;
- // Constant for denoting that the enum cache is not yet initialized.
- static const int kInvalidEnumCache = EnumLengthBits::kMax;
-
// Layout description.
static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
@@ -6029,7 +6299,7 @@ class Map: public HeapObject {
static const int kHasNamedInterceptor = 3;
static const int kHasIndexedInterceptor = 4;
static const int kIsUndetectable = 5;
- static const int kHasInstanceCallHandler = 6;
+ static const int kIsObserved = 6;
static const int kIsAccessCheckNeeded = 7;
// Bit positions for bit field 2
@@ -6221,9 +6491,6 @@ class Script: public Struct {
//
// Installation of ids for the selected builtin functions is handled
// by the bootstrapper.
-//
-// NOTE: Order is important: math functions should be at the end of
-// the list and MathFloor should be the first math function.
#define FUNCTIONS_WITH_ID_LIST(V) \
V(Array.prototype, push, ArrayPush) \
V(Array.prototype, pop, ArrayPop) \
@@ -6245,7 +6512,6 @@ class Script: public Struct {
V(Math, exp, MathExp) \
V(Math, sqrt, MathSqrt) \
V(Math, pow, MathPow) \
- V(Math, random, MathRandom) \
V(Math, max, MathMax) \
V(Math, min, MathMin) \
V(Math, imul, MathImul)
@@ -6258,8 +6524,7 @@ enum BuiltinFunctionId {
#undef DECLARE_FUNCTION_ID
// Fake id for a special case of Math.pow. Note, it continues the
// list of math functions.
- kMathPowHalf,
- kFirstMathFunctionId = kMathFloor
+ kMathPowHalf
};
@@ -6557,6 +6822,9 @@ class SharedFunctionInfo: public HeapObject {
// global object.
DECL_BOOLEAN_ACCESSORS(native)
+ // Indicate that this builtin needs to be inlined in crankshaft.
+ DECL_BOOLEAN_ACCESSORS(inline_builtin)
+
// Indicates that the function was created by the Function function.
// Though it's anonymous, toString should treat it as if it had the name
// "anonymous". We don't set the name itself so that the system does not
@@ -6646,6 +6914,9 @@ class SharedFunctionInfo: public HeapObject {
set_dont_optimize(reason != kNoReason);
}
+ // Check whether or not this function is inlineable.
+ bool IsInlineable();
+
// Source size of this function.
int SourceSize();
@@ -6796,6 +7067,7 @@ class SharedFunctionInfo: public HeapObject {
kUsesArguments,
kHasDuplicateParameters,
kNative,
+ kInlineBuiltin,
kBoundFunction,
kIsAnonymous,
kNameShouldPrintAsAnonymous,
@@ -7022,9 +7294,6 @@ class JSFunction: public JSObject {
// Tells whether or not the function is on the concurrent recompilation queue.
inline bool IsInRecompileQueue();
- // Check whether or not this function is inlineable.
- bool IsInlineable();
-
// [literals_or_bindings]: Fixed array holding either
// the materialized literals or the bindings of a bound function.
//
@@ -7051,6 +7320,7 @@ class JSFunction: public JSObject {
inline Map* initial_map();
inline void set_initial_map(Map* value);
inline bool has_initial_map();
+ static void EnsureHasInitialMap(Handle<JSFunction> function);
// Get and set the prototype property on a JSFunction. If the
// function has an initial map the prototype is set on the initial
@@ -7157,6 +7427,8 @@ class JSGlobalProxy : public JSObject {
// Casting.
static inline JSGlobalProxy* cast(Object* obj);
+ inline bool IsDetachedFrom(GlobalObject* global);
+
// Dispatched behavior.
DECLARE_PRINTER(JSGlobalProxy)
DECLARE_VERIFIER(JSGlobalProxy)
@@ -7226,6 +7498,8 @@ class JSGlobalObject: public GlobalObject {
static Handle<PropertyCell> EnsurePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name);
+ inline bool IsDetached();
+
// Dispatched behavior.
DECLARE_PRINTER(JSGlobalObject)
DECLARE_VERIFIER(JSGlobalObject)
@@ -7839,30 +8113,101 @@ enum AllocationSiteMode {
class AllocationSite: public Struct {
public:
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
+ static const double kPretenureRatio;
+ static const int kPretenureMinimumCreated = 100;
+
+ // Values for pretenure decision field.
+ enum {
+ kUndecided = 0,
+ kDontTenure = 1,
+ kTenure = 2,
+ kZombie = 3
+ };
DECL_ACCESSORS(transition_info, Object)
+ // nested_site threads a list of sites that represent nested literals
+ // walked in a particular order. So [[1, 2], 1, 2] will have one
+ // nested_site, but [[1, 2], 3, [4]] will have a list of two.
+ DECL_ACCESSORS(nested_site, Object)
+ DECL_ACCESSORS(memento_found_count, Smi)
+ DECL_ACCESSORS(memento_create_count, Smi)
+ // TODO(mvstanton): we don't need a whole integer to record pretenure
+ // decision. Consider sharing space with memento_found_count.
+ DECL_ACCESSORS(pretenure_decision, Smi)
+ DECL_ACCESSORS(dependent_code, DependentCode)
DECL_ACCESSORS(weak_next, Object)
- void Initialize() {
- SetElementsKind(GetInitialFastElementsKind());
+ inline void Initialize();
+
+ // This method is expensive, it should only be called for reporting.
+ bool IsNestedSite();
+
+ class ElementsKindBits: public BitField<ElementsKind, 0, 15> {};
+ class UnusedBits: public BitField<int, 15, 14> {};
+ class DoNotInlineBit: public BitField<bool, 29, 1> {};
+
+ inline void IncrementMementoFoundCount();
+
+ inline void IncrementMementoCreateCount();
+
+ PretenureFlag GetPretenureMode() {
+ int mode = pretenure_decision()->value();
+ // Zombie objects "decide" to be untenured.
+ return (mode == kTenure) ? TENURED : NOT_TENURED;
}
+ // The pretenuring decision is made during gc, and the zombie state allows
+ // us to recognize when an allocation site is just being kept alive because
+ // a later traversal of new space may discover AllocationMementos that point
+ // to this AllocationSite.
+ bool IsZombie() {
+ return pretenure_decision()->value() == kZombie;
+ }
+
+ inline void MarkZombie();
+
+ inline bool DigestPretenuringFeedback();
+
ElementsKind GetElementsKind() {
- ASSERT(!IsLiteralSite());
- return static_cast<ElementsKind>(Smi::cast(transition_info())->value());
+ ASSERT(!SitePointsToLiteral());
+ int value = Smi::cast(transition_info())->value();
+ return ElementsKindBits::decode(value);
}
void SetElementsKind(ElementsKind kind) {
- set_transition_info(Smi::FromInt(static_cast<int>(kind)));
+ int value = Smi::cast(transition_info())->value();
+ set_transition_info(Smi::FromInt(ElementsKindBits::update(value, kind)),
+ SKIP_WRITE_BARRIER);
}
- bool IsLiteralSite() {
+ bool CanInlineCall() {
+ int value = Smi::cast(transition_info())->value();
+ return DoNotInlineBit::decode(value) == 0;
+ }
+
+ void SetDoNotInlineCall() {
+ int value = Smi::cast(transition_info())->value();
+ set_transition_info(Smi::FromInt(DoNotInlineBit::update(value, true)),
+ SKIP_WRITE_BARRIER);
+ }
+
+ bool SitePointsToLiteral() {
// If transition_info is a smi, then it represents an ElementsKind
// for a constructed array. Otherwise, it must be a boilerplate
- // for an array literal
- return transition_info()->IsJSArray();
+ // for an object or array literal.
+ return transition_info()->IsJSArray() || transition_info()->IsJSObject();
}
+ MaybeObject* DigestTransitionFeedback(ElementsKind to_kind);
+
+ enum Reason {
+ TENURING,
+ TRANSITIONS
+ };
+
+ void AddDependentCompilationInfo(Reason reason, CompilationInfo* info);
+ void AddDependentCode(Reason reason, Handle<Code> code);
+
DECLARE_PRINTER(AllocationSite)
DECLARE_VERIFIER(AllocationSite)
@@ -7873,14 +8218,33 @@ class AllocationSite: public Struct {
static inline bool CanTrack(InstanceType type);
static const int kTransitionInfoOffset = HeapObject::kHeaderSize;
- static const int kWeakNextOffset = kTransitionInfoOffset + kPointerSize;
+ static const int kNestedSiteOffset = kTransitionInfoOffset + kPointerSize;
+ static const int kMementoFoundCountOffset = kNestedSiteOffset + kPointerSize;
+ static const int kMementoCreateCountOffset =
+ kMementoFoundCountOffset + kPointerSize;
+ static const int kPretenureDecisionOffset =
+ kMementoCreateCountOffset + kPointerSize;
+ static const int kDependentCodeOffset =
+ kPretenureDecisionOffset + kPointerSize;
+ static const int kWeakNextOffset = kDependentCodeOffset + kPointerSize;
static const int kSize = kWeakNextOffset + kPointerSize;
+ // During mark compact we need to take special care for the dependent code
+ // field.
+ static const int kPointerFieldsBeginOffset = kTransitionInfoOffset;
+ static const int kPointerFieldsEndOffset = kDependentCodeOffset;
+
+ // For other visitors, use the fixed body descriptor below.
typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
- kTransitionInfoOffset + kPointerSize,
+ kDependentCodeOffset + kPointerSize,
kSize> BodyDescriptor;
private:
+ inline DependentCode::DependencyGroup ToDependencyGroup(Reason reason);
+ bool PretenuringDecisionMade() {
+ return pretenure_decision()->value() != kUndecided;
+ }
+
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSite);
};
@@ -7892,7 +8256,10 @@ class AllocationMemento: public Struct {
DECL_ACCESSORS(allocation_site, Object)
- bool IsValid() { return allocation_site()->IsAllocationSite(); }
+ bool IsValid() {
+ return allocation_site()->IsAllocationSite() &&
+ !AllocationSite::cast(allocation_site())->IsZombie();
+ }
AllocationSite* GetAllocationSite() {
ASSERT(IsValid());
return AllocationSite::cast(allocation_site());
@@ -7902,7 +8269,8 @@ class AllocationMemento: public Struct {
DECLARE_VERIFIER(AllocationMemento)
// Returns NULL if no AllocationMemento is available for object.
- static AllocationMemento* FindForJSObject(JSObject* object);
+ static AllocationMemento* FindForJSObject(JSObject* object,
+ bool in_GC = false);
static inline AllocationMemento* cast(Object* obj);
private:
@@ -8138,6 +8506,11 @@ class Symbol: public Name {
// [name]: the print name of a symbol, or undefined if none.
DECL_ACCESSORS(name, Object)
+ DECL_ACCESSORS(flags, Smi)
+
+ // [is_private]: whether this is a private symbol.
+ DECL_BOOLEAN_ACCESSORS(is_private)
+
// Casting.
static inline Symbol* cast(Object* obj);
@@ -8147,12 +8520,14 @@ class Symbol: public Name {
// Layout description.
static const int kNameOffset = Name::kSize;
- static const int kSize = kNameOffset + kPointerSize;
+ static const int kFlagsOffset = kNameOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
- typedef FixedBodyDescriptor<kNameOffset, kNameOffset + kPointerSize, kSize>
- BodyDescriptor;
+ typedef FixedBodyDescriptor<kNameOffset, kFlagsOffset, kSize> BodyDescriptor;
private:
+ static const int kPrivateBit = 0;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
};
@@ -8373,7 +8748,6 @@ class String: public Name {
static const int kMaxShortPrintLength = 1024;
// Support for regular expressions.
- const uc16* GetTwoByteData();
const uc16* GetTwoByteData(unsigned start);
// Helper function for flattening strings.
@@ -9018,9 +9392,17 @@ class PropertyCell: public Cell {
// of the cell's current type and the value's type. If the change causes
// a change of the type of the cell's contents, code dependent on the cell
// will be deoptimized.
- MUST_USE_RESULT MaybeObject* SetValueInferType(
- Object* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ static void SetValueInferType(Handle<PropertyCell> cell,
+ Handle<Object> value);
+
+ // Computes the new type of the cell's contents for the given value, but
+ // without actually modifying the 'type' field.
+ static Handle<Type> UpdatedType(Handle<PropertyCell> cell,
+ Handle<Object> value);
+
+ void AddDependentCompilationInfo(CompilationInfo* info);
+
+ void AddDependentCode(Handle<Code> code);
// Casting.
static inline PropertyCell* cast(Object* obj);
@@ -9045,13 +9427,6 @@ class PropertyCell: public Cell {
kSize,
kSize> BodyDescriptor;
- void AddDependentCompilationInfo(CompilationInfo* info);
-
- void AddDependentCode(Handle<Code> code);
-
- static Type* UpdateType(Handle<PropertyCell> cell,
- Handle<Object> value);
-
private:
DECL_ACCESSORS(type_raw, Object)
DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyCell);
@@ -9070,9 +9445,6 @@ class JSProxy: public JSReceiver {
// Casting.
static inline JSProxy* cast(Object* obj);
- bool HasPropertyWithHandler(Name* name);
- bool HasElementWithHandler(uint32_t index);
-
MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(
Object* receiver,
Name* name);
@@ -9080,21 +9452,15 @@ class JSProxy: public JSReceiver {
Object* receiver,
uint32_t index);
- MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
- JSReceiver* receiver,
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
-
// If the handler defines an accessor property with a setter, invoke it.
// If it defines an accessor property without a setter, or a data property
// that is read-only, throw. In all these cases set '*done' to true,
// otherwise set it to false.
- MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypesWithHandler(
- JSReceiver* receiver,
- Name* name,
- Object* value,
+ static Handle<Object> SetPropertyViaPrototypesWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool* done);
@@ -9142,12 +9508,21 @@ class JSProxy: public JSReceiver {
private:
friend class JSReceiver;
+ static Handle<Object> SetPropertyWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
static Handle<Object> SetElementWithHandler(Handle<JSProxy> proxy,
Handle<JSReceiver> receiver,
uint32_t index,
Handle<Object> value,
StrictModeFlag strict_mode);
+ static bool HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name);
+ static bool HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index);
+
static Handle<Object> DeletePropertyWithHandler(Handle<JSProxy> proxy,
Handle<Name> name,
DeleteMode mode);
@@ -9155,9 +9530,9 @@ class JSProxy: public JSReceiver {
uint32_t index,
DeleteMode mode);
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
- static Handle<Object> GetIdentityHash(Handle<JSProxy> proxy,
- CreationFlag flag);
+ MUST_USE_RESULT Object* GetIdentityHash();
+
+ static Handle<Object> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
};
@@ -9300,6 +9675,9 @@ class JSArrayBuffer: public JSObject {
inline bool is_external();
inline void set_is_external(bool value);
+ inline bool should_be_freed();
+ inline void set_should_be_freed(bool value);
+
// [weak_next]: linked list of array buffers.
DECL_ACCESSORS(weak_next, Object)
@@ -9329,6 +9707,7 @@ class JSArrayBuffer: public JSObject {
private:
// Bit position in a flag
static const int kIsExternalBit = 0;
+ static const int kShouldBeFreed = 1;
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
};
@@ -9466,6 +9845,10 @@ class JSArray: public JSObject {
// is set to a smi. This matches the set function on FixedArray.
inline void set_length(Smi* length);
+ static void JSArrayUpdateLengthFromIndex(Handle<JSArray> array,
+ uint32_t index,
+ Handle<Object> value);
+
MUST_USE_RESULT MaybeObject* JSArrayUpdateLengthFromIndex(uint32_t index,
Object* value);
@@ -9940,6 +10323,10 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kLengthOffset = kFlagOffset + kPointerSize;
static const int kSize = kLengthOffset + kPointerSize;
+ // Returns true if |object| is an instance of this function template.
+ bool IsTemplateFor(Object* object);
+ bool IsTemplateFor(Map* map);
+
private:
// Bit position in the flag, from least significant bit position.
static const int kHiddenPrototypeBit = 0;
@@ -10157,6 +10544,9 @@ class ObjectVisitor BASE_EMBEDDED {
// [start, end). Any or all of the values may be modified on return.
virtual void VisitPointers(Object** start, Object** end) = 0;
+ // Handy shorthand for visiting a single pointer.
+ virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
+
// To allow lazy clearing of inline caches the visitor has
// a rich interface for iterating over Code objects..
@@ -10185,22 +10575,14 @@ class ObjectVisitor BASE_EMBEDDED {
// about the code's age.
virtual void VisitCodeAgeSequence(RelocInfo* rinfo);
- // Handy shorthand for visiting a single pointer.
- virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
-
// Visit pointer embedded into a code object.
virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
- // Visits a contiguous arrays of external references (references to the C++
- // heap) in the half-open range [start, end). Any or all of the values
- // may be modified on return.
- virtual void VisitExternalReferences(Address* start, Address* end) {}
-
+ // Visits an external reference embedded into a code object.
virtual void VisitExternalReference(RelocInfo* rinfo);
- inline void VisitExternalReference(Address* p) {
- VisitExternalReferences(p, p + 1);
- }
+ // Visits an external reference. The value may be modified on return.
+ virtual void VisitExternalReference(Address* p) {}
// Visits a handle that has an embedder-assigned class ID.
virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
diff --git a/chromium/v8/src/optimizing-compiler-thread.cc b/chromium/v8/src/optimizing-compiler-thread.cc
index 085143d9983..32a7f971401 100644
--- a/chromium/v8/src/optimizing-compiler-thread.cc
+++ b/chromium/v8/src/optimizing-compiler-thread.cc
@@ -29,6 +29,7 @@
#include "v8.h"
+#include "full-codegen.h"
#include "hydrogen.h"
#include "isolate.h"
#include "v8threads.h"
@@ -36,6 +37,19 @@
namespace v8 {
namespace internal {
+OptimizingCompilerThread::~OptimizingCompilerThread() {
+ ASSERT_EQ(0, input_queue_length_);
+ DeleteArray(input_queue_);
+ if (FLAG_concurrent_osr) {
+#ifdef DEBUG
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ CHECK_EQ(NULL, osr_buffer_[i]);
+ }
+#endif
+ DeleteArray(osr_buffer_);
+ }
+}
+
void OptimizingCompilerThread::Run() {
#ifdef DEBUG
@@ -74,7 +88,6 @@ void OptimizingCompilerThread::Run() {
{ AllowHandleDereference allow_handle_dereference;
FlushInputQueue(true);
}
- Release_Store(&queue_length_, static_cast<AtomicWord>(0));
Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
stop_semaphore_.Signal();
// Return to start of consumer loop.
@@ -93,99 +106,125 @@ void OptimizingCompilerThread::Run() {
}
+RecompileJob* OptimizingCompilerThread::NextInput() {
+ LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
+ if (input_queue_length_ == 0) return NULL;
+ RecompileJob* job = input_queue_[InputQueueIndex(0)];
+ ASSERT_NE(NULL, job);
+ input_queue_shift_ = InputQueueIndex(1);
+ input_queue_length_--;
+ return job;
+}
+
+
void OptimizingCompilerThread::CompileNext() {
- OptimizingCompiler* optimizing_compiler = NULL;
- bool result = input_queue_.Dequeue(&optimizing_compiler);
- USE(result);
- ASSERT(result);
- Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
+ RecompileJob* job = NextInput();
+ ASSERT_NE(NULL, job);
// The function may have already been optimized by OSR. Simply continue.
- OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
+ RecompileJob::Status status = job->OptimizeGraph();
USE(status); // Prevent an unused-variable error in release mode.
- ASSERT(status != OptimizingCompiler::FAILED);
+ ASSERT(status != RecompileJob::FAILED);
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
// are always also queued.
- if (!optimizing_compiler->info()->osr_ast_id().IsNone()) {
- ASSERT(FLAG_concurrent_osr);
- LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
- osr_candidates_.RemoveElement(optimizing_compiler);
- ready_for_osr_.Add(optimizing_compiler);
- } else {
- output_queue_.Enqueue(optimizing_compiler);
- isolate_->stack_guard()->RequestInstallCode();
+ output_queue_.Enqueue(job);
+ isolate_->stack_guard()->RequestInstallCode();
+}
+
+
+static void DisposeRecompileJob(RecompileJob* job,
+ bool restore_function_code) {
+ // The recompile job is allocated in the CompilationInfo's zone.
+ CompilationInfo* info = job->info();
+ if (restore_function_code) {
+ if (info->is_osr()) {
+ if (!job->IsWaitingForInstall()) BackEdgeTable::RemoveStackCheck(info);
+ } else {
+ Handle<JSFunction> function = info->closure();
+ function->ReplaceCode(function->shared()->code());
+ }
}
+ delete info;
}
void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
- OptimizingCompiler* optimizing_compiler;
- // The optimizing compiler is allocated in the CompilationInfo's zone.
- while (input_queue_.Dequeue(&optimizing_compiler)) {
+ RecompileJob* job;
+ while ((job = NextInput())) {
// This should not block, since we have one signal on the input queue
// semaphore corresponding to each element in the input queue.
input_queue_semaphore_.Wait();
- CompilationInfo* info = optimizing_compiler->info();
- if (restore_function_code) {
- Handle<JSFunction> function = info->closure();
- function->ReplaceCode(function->shared()->code());
+ // OSR jobs are dealt with separately.
+ if (!job->info()->is_osr()) {
+ DisposeRecompileJob(job, restore_function_code);
}
- delete info;
}
}
void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
- OptimizingCompiler* optimizing_compiler;
- // The optimizing compiler is allocated in the CompilationInfo's zone.
- while (output_queue_.Dequeue(&optimizing_compiler)) {
- CompilationInfo* info = optimizing_compiler->info();
- if (restore_function_code) {
- Handle<JSFunction> function = info->closure();
- function->ReplaceCode(function->shared()->code());
+ RecompileJob* job;
+ while (output_queue_.Dequeue(&job)) {
+ // OSR jobs are dealt with separately.
+ if (!job->info()->is_osr()) {
+ DisposeRecompileJob(job, restore_function_code);
}
- delete info;
}
+}
+
- osr_candidates_.Clear();
- RemoveStaleOSRCandidates(0);
+void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ if (osr_buffer_[i] != NULL) {
+ DisposeRecompileJob(osr_buffer_[i], restore_function_code);
+ osr_buffer_[i] = NULL;
+ }
+ }
}
void OptimizingCompilerThread::Flush() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
+ if (FLAG_block_concurrent_recompilation) Unblock();
input_queue_semaphore_.Signal();
stop_semaphore_.Wait();
FlushOutputQueue(true);
+ if (FLAG_concurrent_osr) FlushOsrBuffer(true);
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Flushed concurrent recompilation queues.\n");
+ }
}
void OptimizingCompilerThread::Stop() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
+ if (FLAG_block_concurrent_recompilation) Unblock();
input_queue_semaphore_.Signal();
stop_semaphore_.Wait();
if (FLAG_concurrent_recompilation_delay != 0) {
- // Barrier when loading queue length is not necessary since the write
- // happens in CompileNext on the same thread.
- // This is used only for testing.
- while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
+ // At this point the optimizing compiler thread's event loop has stopped.
+ // There is no need for a mutex when reading input_queue_length_.
+ while (input_queue_length_ > 0) CompileNext();
InstallOptimizedFunctions();
} else {
FlushInputQueue(false);
FlushOutputQueue(false);
}
+ if (FLAG_concurrent_osr) FlushOsrBuffer(false);
+
if (FLAG_trace_concurrent_recompilation) {
double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
}
- if (FLAG_trace_osr && FLAG_concurrent_osr) {
+ if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
+ FLAG_concurrent_osr) {
PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
}
@@ -196,60 +235,96 @@ void OptimizingCompilerThread::Stop() {
void OptimizingCompilerThread::InstallOptimizedFunctions() {
ASSERT(!IsOptimizerThread());
HandleScope handle_scope(isolate_);
- OptimizingCompiler* compiler;
- while (true) {
- if (!output_queue_.Dequeue(&compiler)) return;
- Compiler::InstallOptimizedCode(compiler);
- }
- // Remove the oldest OSR candidates that are ready so that we
- // only have limited number of them waiting.
- if (FLAG_concurrent_osr) RemoveStaleOSRCandidates();
+ RecompileJob* job;
+ while (output_queue_.Dequeue(&job)) {
+ CompilationInfo* info = job->info();
+ if (info->is_osr()) {
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - ");
+ info->closure()->PrintName();
+ PrintF(" is ready for install and entry at AST id %d]\n",
+ info->osr_ast_id().ToInt());
+ }
+ job->WaitForInstall();
+ BackEdgeTable::RemoveStackCheck(info);
+ } else {
+ Compiler::InstallOptimizedCode(job);
+ }
+ }
}
-void OptimizingCompilerThread::QueueForOptimization(
- OptimizingCompiler* optimizing_compiler) {
+void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
ASSERT(IsQueueAvailable());
ASSERT(!IsOptimizerThread());
- Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
- if (optimizing_compiler->info()->osr_ast_id().IsNone()) {
- optimizing_compiler->info()->closure()->MarkInRecompileQueue();
- } else {
- LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
- osr_candidates_.Add(optimizing_compiler);
+ CompilationInfo* info = job->info();
+ if (info->is_osr()) {
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Queueing ");
+ info->closure()->PrintName();
+ PrintF(" for concurrent on-stack replacement.\n");
+ }
osr_attempts_++;
+ BackEdgeTable::AddStackCheck(info);
+ AddToOsrBuffer(job);
+ // Add job to the front of the input queue.
+ LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ ASSERT_LT(input_queue_length_, input_queue_capacity_);
+ // Move shift_ back by one.
+ input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
+ input_queue_[InputQueueIndex(0)] = job;
+ input_queue_length_++;
+ } else {
+ info->closure()->MarkInRecompileQueue();
+ // Add job to the back of the input queue.
+ LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ ASSERT_LT(input_queue_length_, input_queue_capacity_);
+ input_queue_[InputQueueIndex(input_queue_length_)] = job;
+ input_queue_length_++;
+ }
+ if (FLAG_block_concurrent_recompilation) {
+ blocked_jobs_++;
+ } else {
+ input_queue_semaphore_.Signal();
}
- input_queue_.Enqueue(optimizing_compiler);
- input_queue_semaphore_.Signal();
}
-OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate(
+void OptimizingCompilerThread::Unblock() {
+ ASSERT(!IsOptimizerThread());
+ while (blocked_jobs_ > 0) {
+ input_queue_semaphore_.Signal();
+ blocked_jobs_--;
+ }
+}
+
+
+RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
Handle<JSFunction> function, uint32_t osr_pc_offset) {
ASSERT(!IsOptimizerThread());
- OptimizingCompiler* result = NULL;
- { LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
- for (int i = 0; i < ready_for_osr_.length(); i++) {
- if (ready_for_osr_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
- osr_hits_++;
- result = ready_for_osr_.Remove(i);
- break;
- }
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ RecompileJob* current = osr_buffer_[i];
+ if (current != NULL &&
+ current->IsWaitingForInstall() &&
+ current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ osr_hits_++;
+ osr_buffer_[i] = NULL;
+ return current;
}
}
- RemoveStaleOSRCandidates();
- return result;
+ return NULL;
}
bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
uint32_t osr_pc_offset) {
ASSERT(!IsOptimizerThread());
- LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
- for (int i = 0; i < osr_candidates_.length(); i++) {
- if (osr_candidates_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
- return true;
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ RecompileJob* current = osr_buffer_[i];
+ if (current != NULL &&
+ current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ return !current->IsWaitingForInstall();
}
}
return false;
@@ -258,36 +333,50 @@ bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
ASSERT(!IsOptimizerThread());
- LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
- for (int i = 0; i < osr_candidates_.length(); i++) {
- if (*osr_candidates_[i]->info()->closure() == function) {
- return true;
+ for (int i = 0; i < osr_buffer_capacity_; i++) {
+ RecompileJob* current = osr_buffer_[i];
+ if (current != NULL && *current->info()->closure() == function) {
+ return !current->IsWaitingForInstall();
}
}
return false;
}
-void OptimizingCompilerThread::RemoveStaleOSRCandidates(int limit) {
+void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
ASSERT(!IsOptimizerThread());
- LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
- while (ready_for_osr_.length() > limit) {
- OptimizingCompiler* compiler = ready_for_osr_.Remove(0);
- CompilationInfo* throw_away = compiler->info();
+ // Find the next slot that is empty or has a stale job.
+ RecompileJob* stale = NULL;
+ while (true) {
+ stale = osr_buffer_[osr_buffer_cursor_];
+ if (stale == NULL || stale->IsWaitingForInstall()) break;
+ osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
+ }
+
+ // Add to found slot and dispose the evicted job.
+ if (stale != NULL) {
+ ASSERT(stale->IsWaitingForInstall());
+ CompilationInfo* info = stale->info();
if (FLAG_trace_osr) {
PrintF("[COSR - Discarded ");
- throw_away->closure()->PrintName();
- PrintF(", AST id %d]\n",
- throw_away->osr_ast_id().ToInt());
+ info->closure()->PrintName();
+ PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
}
- delete throw_away;
+ DisposeRecompileJob(stale, false);
}
+ osr_buffer_[osr_buffer_cursor_] = job;
+ osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
}
#ifdef DEBUG
+bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
+ return isolate->concurrent_recompilation_enabled() &&
+ isolate->optimizing_compiler_thread()->IsOptimizerThread();
+}
+
+
bool OptimizingCompilerThread::IsOptimizerThread() {
- if (!FLAG_concurrent_recompilation) return false;
LockGuard<Mutex> lock_guard(&thread_id_mutex_);
return ThreadId::Current().ToInteger() == thread_id_;
}
diff --git a/chromium/v8/src/optimizing-compiler-thread.h b/chromium/v8/src/optimizing-compiler-thread.h
index d1ed6a2c59f..795fa65588a 100644
--- a/chromium/v8/src/optimizing-compiler-thread.h
+++ b/chromium/v8/src/optimizing-compiler-thread.h
@@ -40,7 +40,7 @@ namespace v8 {
namespace internal {
class HOptimizedGraphBuilder;
-class OptimizingCompiler;
+class RecompileJob;
class SharedFunctionInfo;
class OptimizingCompilerThread : public Thread {
@@ -53,54 +53,77 @@ class OptimizingCompilerThread : public Thread {
isolate_(isolate),
stop_semaphore_(0),
input_queue_semaphore_(0),
- osr_candidates_(2),
- ready_for_osr_(2),
+ input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
+ input_queue_length_(0),
+ input_queue_shift_(0),
+ osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
+ osr_buffer_cursor_(0),
osr_hits_(0),
- osr_attempts_(0) {
+ osr_attempts_(0),
+ blocked_jobs_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
- NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
+ input_queue_ = NewArray<RecompileJob*>(input_queue_capacity_);
+ if (FLAG_concurrent_osr) {
+ // Allocate and mark OSR buffer slots as empty.
+ osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_capacity_);
+ for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL;
+ }
}
- ~OptimizingCompilerThread() {}
+
+ ~OptimizingCompilerThread();
void Run();
void Stop();
void Flush();
- void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
+ void QueueForOptimization(RecompileJob* optimizing_compiler);
+ void Unblock();
void InstallOptimizedFunctions();
- OptimizingCompiler* FindReadyOSRCandidate(Handle<JSFunction> function,
- uint32_t osr_pc_offset);
+ RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
+ uint32_t osr_pc_offset);
bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);
bool IsQueuedForOSR(JSFunction* function);
inline bool IsQueueAvailable() {
- // We don't need a barrier since we have a data dependency right
- // after.
- Atomic32 current_length = NoBarrier_Load(&queue_length_);
-
- // This can be queried only from the execution thread.
- ASSERT(!IsOptimizerThread());
- // Since only the execution thread increments queue_length_ and
- // only one thread can run inside an Isolate at one time, a direct
- // doesn't introduce a race -- queue_length_ may decreased in
- // meantime, but not increased.
- return (current_length < FLAG_concurrent_recompilation_queue_length);
+ LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+ return input_queue_length_ < input_queue_capacity_;
+ }
+
+ inline void AgeBufferedOsrJobs() {
+ // Advance cursor of the cyclic buffer to next empty slot or stale OSR job.
+ // Dispose said OSR job in the latter case. Calling this on every GC
+ // should make sure that we do not hold onto stale jobs indefinitely.
+ AddToOsrBuffer(NULL);
+ }
+
+ static bool Enabled(int max_available) {
+ return (FLAG_concurrent_recompilation && max_available > 1);
}
#ifdef DEBUG
+ static bool IsOptimizerThread(Isolate* isolate);
bool IsOptimizerThread();
#endif
private:
enum StopFlag { CONTINUE, STOP, FLUSH };
- // Remove the oldest OSR candidates that are ready so that we
- // only have |limit| left waiting.
- void RemoveStaleOSRCandidates(int limit = kReadyForOSRLimit);
-
void FlushInputQueue(bool restore_function_code);
void FlushOutputQueue(bool restore_function_code);
+ void FlushOsrBuffer(bool restore_function_code);
void CompileNext();
+ RecompileJob* NextInput();
+
+ // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
+ // Tasks evicted from the cyclic buffer are discarded.
+ void AddToOsrBuffer(RecompileJob* compiler);
+
+ inline int InputQueueIndex(int i) {
+ int result = (i + input_queue_shift_) % input_queue_capacity_;
+ ASSERT_LE(0, result);
+ ASSERT_LT(result, input_queue_capacity_);
+ return result;
+ }
#ifdef DEBUG
int thread_id_;
@@ -111,25 +134,29 @@ class OptimizingCompilerThread : public Thread {
Semaphore stop_semaphore_;
Semaphore input_queue_semaphore_;
- // Queue of incoming recompilation tasks (including OSR).
- UnboundQueue<OptimizingCompiler*> input_queue_;
+ // Circular queue of incoming recompilation tasks (including OSR).
+ RecompileJob** input_queue_;
+ int input_queue_capacity_;
+ int input_queue_length_;
+ int input_queue_shift_;
+ Mutex input_queue_mutex_;
+
// Queue of recompilation tasks ready to be installed (excluding OSR).
- UnboundQueue<OptimizingCompiler*> output_queue_;
- // List of all OSR related recompilation tasks (both incoming and ready ones).
- List<OptimizingCompiler*> osr_candidates_;
- // List of recompilation tasks ready for OSR.
- List<OptimizingCompiler*> ready_for_osr_;
+ UnboundQueue<RecompileJob*> output_queue_;
+
+ // Cyclic buffer of recompilation tasks for OSR.
+ RecompileJob** osr_buffer_;
+ int osr_buffer_capacity_;
+ int osr_buffer_cursor_;
volatile AtomicWord stop_thread_;
- volatile Atomic32 queue_length_;
TimeDelta time_spent_compiling_;
TimeDelta time_spent_total_;
- Mutex osr_list_mutex_;
int osr_hits_;
int osr_attempts_;
- static const int kReadyForOSRLimit = 4;
+ int blocked_jobs_;
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/parser.cc b/chromium/v8/src/parser.cc
index 05ae11e4291..b1689191ad3 100644
--- a/chromium/v8/src/parser.cc
+++ b/chromium/v8/src/parser.cc
@@ -536,7 +536,8 @@ Parser::FunctionState::~FunctionState() {
// Implementation of Parser
Parser::Parser(CompilationInfo* info)
- : isolate_(info->isolate()),
+ : ParserBase(&scanner_, info->isolate()->stack_guard()->real_climit()),
+ isolate_(info->isolate()),
symbol_cache_(0, info->zone()),
script_(info->script()),
scanner_(isolate_->unicode_cache()),
@@ -548,11 +549,6 @@ Parser::Parser(CompilationInfo* info)
extension_(info->extension()),
pre_parse_data_(NULL),
fni_(NULL),
- allow_natives_syntax_(false),
- allow_lazy_(false),
- allow_generators_(false),
- allow_for_of_(false),
- stack_overflow_(false),
parenthesized_function_(false),
zone_(info->zone()),
info_(info) {
@@ -569,7 +565,9 @@ Parser::Parser(CompilationInfo* info)
FunctionLiteral* Parser::ParseProgram() {
- HistogramTimerScope timer_scope(isolate()->counters()->parse());
+ // TODO(bmeurer): We temporarily need to pass allow_nesting = true here,
+ // see comment for HistogramTimerScope class.
+ HistogramTimerScope timer_scope(isolate()->counters()->parse(), true);
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
ElapsedTimer timer;
@@ -652,10 +650,10 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
top_scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
- int beg_loc = scanner().location().beg_pos;
+ int beg_pos = scanner().location().beg_pos;
ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok);
if (ok && !top_scope_->is_classic_mode()) {
- CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
+ CheckOctalLiteral(beg_pos, scanner().location().end_pos, &ok);
}
if (ok && is_extended_mode()) {
@@ -685,11 +683,12 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
FunctionLiteral::ANONYMOUS_EXPRESSION,
FunctionLiteral::kGlobalOrEval,
FunctionLiteral::kNotParenthesized,
- FunctionLiteral::kNotGenerator);
+ FunctionLiteral::kNotGenerator,
+ 0);
result->set_ast_properties(factory()->visitor()->ast_properties());
result->set_dont_optimize_reason(
factory()->visitor()->dont_optimize_reason());
- } else if (stack_overflow_) {
+ } else if (stack_overflow()) {
isolate()->StackOverflow();
}
}
@@ -786,7 +785,7 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
ASSERT(target_stack_ == NULL);
if (result == NULL) {
- if (stack_overflow_) isolate()->StackOverflow();
+ if (stack_overflow()) isolate()->StackOverflow();
} else {
Handle<String> inferred_name(shared_info->inferred_name());
result->set_inferred_name(inferred_name);
@@ -984,6 +983,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
// ModuleDeclaration:
// 'module' Identifier Module
+ int pos = peek_position();
Handle<String> name = ParseIdentifier(CHECK_OK);
#ifdef DEBUG
@@ -994,7 +994,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
Module* module = ParseModule(CHECK_OK);
VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface());
Declaration* declaration =
- factory()->NewModuleDeclaration(proxy, module, top_scope_);
+ factory()->NewModuleDeclaration(proxy, module, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
#ifdef DEBUG
@@ -1009,9 +1009,9 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
if (names) names->Add(name, zone());
if (module->body() == NULL)
- return factory()->NewEmptyStatement();
+ return factory()->NewEmptyStatement(pos);
else
- return factory()->NewModuleStatement(proxy, module->body());
+ return factory()->NewModuleStatement(proxy, module->body(), pos);
}
@@ -1046,8 +1046,9 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
// Module:
// '{' ModuleElement '}'
+ int pos = peek_position();
// Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(NULL, 16, false);
+ Block* body = factory()->NewBlock(NULL, 16, false, RelocInfo::kNoPosition);
#ifdef DEBUG
if (FLAG_print_interface_details) PrintF("# Literal ");
#endif
@@ -1092,7 +1093,7 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
ASSERT(*ok);
interface->Freeze(ok);
ASSERT(*ok);
- return factory()->NewModuleLiteral(body, interface);
+ return factory()->NewModuleLiteral(body, interface, pos);
}
@@ -1101,6 +1102,7 @@ Module* Parser::ParseModulePath(bool* ok) {
// Identifier
// ModulePath '.' Identifier
+ int pos = peek_position();
Module* result = ParseModuleVariable(CHECK_OK);
while (Check(Token::PERIOD)) {
Handle<String> name = ParseIdentifierName(CHECK_OK);
@@ -1108,7 +1110,7 @@ Module* Parser::ParseModulePath(bool* ok) {
if (FLAG_print_interface_details)
PrintF("# Path .%s ", name->ToAsciiArray());
#endif
- Module* member = factory()->NewModulePath(result, name);
+ Module* member = factory()->NewModulePath(result, name, pos);
result->interface()->Add(name, member->interface(), zone(), ok);
if (!*ok) {
#ifdef DEBUG
@@ -1134,6 +1136,7 @@ Module* Parser::ParseModuleVariable(bool* ok) {
// ModulePath:
// Identifier
+ int pos = peek_position();
Handle<String> name = ParseIdentifier(CHECK_OK);
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -1143,7 +1146,7 @@ Module* Parser::ParseModuleVariable(bool* ok) {
factory(), name, Interface::NewModule(zone()),
scanner().location().beg_pos);
- return factory()->NewModuleVariable(proxy);
+ return factory()->NewModuleVariable(proxy, pos);
}
@@ -1151,6 +1154,7 @@ Module* Parser::ParseModuleUrl(bool* ok) {
// Module:
// String
+ int pos = peek_position();
Expect(Token::STRING, CHECK_OK);
Handle<String> symbol = GetSymbol();
@@ -1163,10 +1167,10 @@ Module* Parser::ParseModuleUrl(bool* ok) {
// Create an empty literal as long as the feature isn't finished.
USE(symbol);
Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
- Block* body = factory()->NewBlock(NULL, 1, false);
+ Block* body = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
body->set_scope(scope);
Interface* interface = scope->interface();
- Module* result = factory()->NewModuleLiteral(body, interface);
+ Module* result = factory()->NewModuleLiteral(body, interface, pos);
interface->Freeze(ok);
ASSERT(*ok);
interface->Unify(scope->interface(), zone(), ok);
@@ -1194,6 +1198,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
//
// TODO(ES6): implement destructuring ImportSpecifiers
+ int pos = peek_position();
Expect(Token::IMPORT, CHECK_OK);
ZoneStringList names(1, zone());
@@ -1211,7 +1216,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
// Generate a separate declaration for each identifier.
// TODO(ES6): once we implement destructuring, make that one declaration.
- Block* block = factory()->NewBlock(NULL, 1, true);
+ Block* block = factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
for (int i = 0; i < names.length(); ++i) {
#ifdef DEBUG
if (FLAG_print_interface_details)
@@ -1232,7 +1237,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) {
}
VariableProxy* proxy = NewUnresolved(names[i], LET, interface);
Declaration* declaration =
- factory()->NewImportDeclaration(proxy, module, top_scope_);
+ factory()->NewImportDeclaration(proxy, module, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
}
@@ -1256,6 +1261,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
ZoneStringList names(1, zone());
switch (peek()) {
case Token::IDENTIFIER: {
+ int pos = position();
Handle<String> name = ParseIdentifier(CHECK_OK);
// Handle 'module' as a context-sensitive keyword.
if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("module"))) {
@@ -1266,7 +1272,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
names.Add(name, zone());
}
ExpectSemicolon(CHECK_OK);
- result = factory()->NewEmptyStatement();
+ result = factory()->NewEmptyStatement(pos);
} else {
result = ParseModuleDeclaration(&names, CHECK_OK);
}
@@ -1305,7 +1311,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// TODO(rossberg): Rethink whether we actually need to store export
// declarations (for compilation?).
// ExportDeclaration* declaration =
- // factory()->NewExportDeclaration(proxy, top_scope_);
+ // factory()->NewExportDeclaration(proxy, top_scope_, position);
// top_scope_->AddDeclaration(declaration);
}
@@ -1363,10 +1369,6 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// labels can be simply ignored in all other cases; except for
// trivial labeled break statements 'label: break label' which is
// parsed into an empty statement.
-
- // Keep the source position of the statement
- int statement_pos = scanner().peek_location().beg_pos;
- Statement* stmt = NULL;
switch (peek()) {
case Token::LBRACE:
return ParseBlock(labels, ok);
@@ -1374,52 +1376,41 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
case Token::CONST: // fall through
case Token::LET:
case Token::VAR:
- stmt = ParseVariableStatement(kStatement, NULL, ok);
- break;
+ return ParseVariableStatement(kStatement, NULL, ok);
case Token::SEMICOLON:
Next();
- return factory()->NewEmptyStatement();
+ return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
case Token::IF:
- stmt = ParseIfStatement(labels, ok);
- break;
+ return ParseIfStatement(labels, ok);
case Token::DO:
- stmt = ParseDoWhileStatement(labels, ok);
- break;
+ return ParseDoWhileStatement(labels, ok);
case Token::WHILE:
- stmt = ParseWhileStatement(labels, ok);
- break;
+ return ParseWhileStatement(labels, ok);
case Token::FOR:
- stmt = ParseForStatement(labels, ok);
- break;
+ return ParseForStatement(labels, ok);
case Token::CONTINUE:
- stmt = ParseContinueStatement(ok);
- break;
+ return ParseContinueStatement(ok);
case Token::BREAK:
- stmt = ParseBreakStatement(labels, ok);
- break;
+ return ParseBreakStatement(labels, ok);
case Token::RETURN:
- stmt = ParseReturnStatement(ok);
- break;
+ return ParseReturnStatement(ok);
case Token::WITH:
- stmt = ParseWithStatement(labels, ok);
- break;
+ return ParseWithStatement(labels, ok);
case Token::SWITCH:
- stmt = ParseSwitchStatement(labels, ok);
- break;
+ return ParseSwitchStatement(labels, ok);
case Token::THROW:
- stmt = ParseThrowStatement(ok);
- break;
+ return ParseThrowStatement(ok);
case Token::TRY: {
// NOTE: It is somewhat complicated to have labels on
@@ -1427,12 +1418,10 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
// one must take great care not to treat it as a
// fall-through. It is much easier just to wrap the entire
// try-statement in a statement block and put the labels there
- Block* result = factory()->NewBlock(labels, 1, false);
+ Block* result =
+ factory()->NewBlock(labels, 1, false, RelocInfo::kNoPosition);
Target target(&this->target_stack_, result);
TryStatement* statement = ParseTryStatement(CHECK_OK);
- if (statement) {
- statement->set_statement_pos(statement_pos);
- }
if (result) result->AddStatement(statement, zone());
return result;
}
@@ -1459,16 +1448,11 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
}
case Token::DEBUGGER:
- stmt = ParseDebuggerStatement(ok);
- break;
+ return ParseDebuggerStatement(ok);
default:
- stmt = ParseExpressionOrLabelledStatement(labels, ok);
+ return ParseExpressionOrLabelledStatement(labels, ok);
}
-
- // Store the source position of the statement
- if (stmt != NULL) stmt->set_statement_pos(statement_pos);
- return stmt;
}
@@ -1480,7 +1464,7 @@ VariableProxy* Parser::NewUnresolved(
// Let/const variables in harmony mode are always added to the immediately
// enclosing scope.
return DeclarationScope(mode)->NewUnresolved(
- factory(), name, interface, scanner().location().beg_pos);
+ factory(), name, interface, position());
}
@@ -1647,6 +1631,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// declaration is resolved by looking up the function through a
// callback provided by the extension.
Statement* Parser::ParseNativeDeclaration(bool* ok) {
+ int pos = peek_position();
Expect(Token::FUNCTION, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
@@ -1667,39 +1652,19 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// because of lazy compilation.
DeclarationScope(VAR)->ForceEagerCompilation();
- // Compute the function template for the native function.
- v8::Handle<v8::FunctionTemplate> fun_template =
- extension_->GetNativeFunction(v8::Utils::ToLocal(name));
- ASSERT(!fun_template.IsEmpty());
-
- // Instantiate the function and create a shared function info from it.
- Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
- const int literals = fun->NumberOfLiterals();
- Handle<Code> code = Handle<Code>(fun->shared()->code());
- Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
- bool is_generator = false;
- Handle<SharedFunctionInfo> shared =
- isolate()->factory()->NewSharedFunctionInfo(name, literals, is_generator,
- code, Handle<ScopeInfo>(fun->shared()->scope_info()));
- shared->set_construct_stub(*construct_stub);
-
- // Copy the function data to the shared function info.
- shared->set_function_data(fun->shared()->function_data());
- int parameters = fun->shared()->formal_parameter_count();
- shared->set_formal_parameter_count(parameters);
-
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
// other functions are set up when entering the surrounding scope.
VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue());
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, VAR, top_scope_);
+ factory()->NewVariableDeclaration(proxy, VAR, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
- SharedFunctionInfoLiteral* lit =
- factory()->NewSharedFunctionInfoLiteral(shared);
+ NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral(
+ name, extension_, RelocInfo::kNoPosition);
return factory()->NewExpressionStatement(
factory()->NewAssignment(
- Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition));
+ Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition),
+ pos);
}
@@ -1710,7 +1675,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
// 'function' '*' Identifier '(' FormalParameterListopt ')'
// '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
- int function_token_position = scanner().location().beg_pos;
+ int pos = position();
bool is_generator = allow_generators() && Check(Token::MUL);
bool is_strict_reserved = false;
Handle<String> name = ParseIdentifierOrStrictReservedWord(
@@ -1718,7 +1683,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
FunctionLiteral* fun = ParseFunctionLiteral(name,
is_strict_reserved,
is_generator,
- function_token_position,
+ pos,
FunctionLiteral::DECLARATION,
CHECK_OK);
// Even if we're not at the top-level of the global or a function
@@ -1730,10 +1695,10 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
is_extended_mode() && !top_scope_->is_global_scope() ? LET : VAR;
VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
Declaration* declaration =
- factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_);
+ factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_, pos);
Declare(declaration, true, CHECK_OK);
if (names) names->Add(name, zone());
- return factory()->NewEmptyStatement();
+ return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
}
@@ -1747,7 +1712,8 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
// (ECMA-262, 3rd, 12.2)
//
// Construct block expecting 16 statements.
- Block* result = factory()->NewBlock(labels, 16, false);
+ Block* result =
+ factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
Target target(&this->target_stack_, result);
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
@@ -1768,7 +1734,8 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
// '{' BlockElement* '}'
// Construct block expecting 16 statements.
- Block* body = factory()->NewBlock(labels, 16, false);
+ Block* body =
+ factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
// Parse the statements and collect escaping labels.
@@ -1838,6 +1805,8 @@ Block* Parser::ParseVariableDeclarations(
// TODO(ES6):
// ConstBinding ::
// BindingPattern '=' AssignmentExpression
+
+ int pos = peek_position();
VariableMode mode = VAR;
// True if the binding needs initialization. 'let' and 'const' declared
// bindings are created uninitialized by their declaration nodes and
@@ -1923,7 +1892,7 @@ Block* Parser::ParseVariableDeclarations(
// is inside an initializer block, it is ignored.
//
// Create new block with one expected declaration.
- Block* block = factory()->NewBlock(NULL, 1, true);
+ Block* block = factory()->NewBlock(NULL, 1, true, pos);
int nvars = 0; // the number of variables declared
Handle<String> name;
do {
@@ -1960,7 +1929,7 @@ Block* Parser::ParseVariableDeclarations(
is_const ? Interface::NewConst() : Interface::NewValue();
VariableProxy* proxy = NewUnresolved(name, mode, interface);
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, mode, top_scope_);
+ factory()->NewVariableDeclaration(proxy, mode, top_scope_, pos);
Declare(declaration, mode != VAR, CHECK_OK);
nvars++;
if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
@@ -2000,11 +1969,11 @@ Block* Parser::ParseVariableDeclarations(
Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
Expression* value = NULL;
- int position = -1;
+ int pos = -1;
// Harmony consts have non-optional initializers.
if (peek() == Token::ASSIGN || mode == CONST_HARMONY) {
Expect(Token::ASSIGN, CHECK_OK);
- position = scanner().location().beg_pos;
+ pos = position();
value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
// Don't infer if it is "a = function(){...}();"-like expression.
if (fni_ != NULL &&
@@ -2019,12 +1988,12 @@ Block* Parser::ParseVariableDeclarations(
// Record the end position of the initializer.
if (proxy->var() != NULL) {
- proxy->var()->set_initializer_position(scanner().location().end_pos);
+ proxy->var()->set_initializer_position(position());
}
// Make sure that 'const x' and 'let x' initialize 'x' to undefined.
if (value == NULL && needs_init) {
- value = GetLiteralUndefined();
+ value = GetLiteralUndefined(position());
}
// Global variable declarations must be compiled in a specific
@@ -2052,7 +2021,7 @@ Block* Parser::ParseVariableDeclarations(
ZoneList<Expression*>* arguments =
new(zone()) ZoneList<Expression*>(3, zone());
// We have at least 1 parameter.
- arguments->Add(factory()->NewLiteral(name), zone());
+ arguments->Add(factory()->NewLiteral(name, pos), zone());
CallRuntime* initialize;
if (is_const) {
@@ -2066,12 +2035,12 @@ Block* Parser::ParseVariableDeclarations(
initialize = factory()->NewCallRuntime(
isolate()->factory()->InitializeConstGlobal_string(),
Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
- arguments);
+ arguments, pos);
} else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
LanguageMode language_mode = initialization_scope->language_mode();
- arguments->Add(factory()->NewNumberLiteral(language_mode), zone());
+ arguments->Add(factory()->NewNumberLiteral(language_mode, pos), zone());
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@@ -2089,11 +2058,12 @@ Block* Parser::ParseVariableDeclarations(
initialize = factory()->NewCallRuntime(
isolate()->factory()->InitializeVarGlobal_string(),
Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
- arguments);
+ arguments, pos);
}
- block->AddStatement(factory()->NewExpressionStatement(initialize),
- zone());
+ block->AddStatement(
+ factory()->NewExpressionStatement(initialize, RelocInfo::kNoPosition),
+ zone());
} else if (needs_init) {
// Constant initializations always assign to the declared constant which
// is always at the function scope level. This is only relevant for
@@ -2106,9 +2076,10 @@ Block* Parser::ParseVariableDeclarations(
ASSERT(proxy->var() != NULL);
ASSERT(value != NULL);
Assignment* assignment =
- factory()->NewAssignment(init_op, proxy, value, position);
- block->AddStatement(factory()->NewExpressionStatement(assignment),
- zone());
+ factory()->NewAssignment(init_op, proxy, value, pos);
+ block->AddStatement(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
value = NULL;
}
@@ -2122,9 +2093,10 @@ Block* Parser::ParseVariableDeclarations(
VariableProxy* proxy =
initialization_scope->NewUnresolved(factory(), name, interface);
Assignment* assignment =
- factory()->NewAssignment(init_op, proxy, value, position);
- block->AddStatement(factory()->NewExpressionStatement(assignment),
- zone());
+ factory()->NewAssignment(init_op, proxy, value, pos);
+ block->AddStatement(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
}
if (fni_ != NULL) fni_->Leave();
@@ -2156,6 +2128,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
// ExpressionStatement | LabelledStatement ::
// Expression ';'
// Identifier ':' Statement
+ int pos = peek_position();
bool starts_with_idenfifier = peek_any_identifier();
Expression* expr = ParseExpression(true, CHECK_OK);
if (peek() == Token::COLON && starts_with_idenfifier && expr != NULL &&
@@ -2215,7 +2188,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
scanner().literal_contains_escapes()) {
ExpectSemicolon(CHECK_OK);
}
- return factory()->NewExpressionStatement(expr);
+ return factory()->NewExpressionStatement(expr, pos);
}
@@ -2223,6 +2196,7 @@ IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) {
// IfStatement ::
// 'if' '(' Expression ')' Statement ('else' Statement)?
+ int pos = peek_position();
Expect(Token::IF, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
Expression* condition = ParseExpression(true, CHECK_OK);
@@ -2233,9 +2207,10 @@ IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) {
Next();
else_statement = ParseStatement(labels, CHECK_OK);
} else {
- else_statement = factory()->NewEmptyStatement();
+ else_statement = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
}
- return factory()->NewIfStatement(condition, then_statement, else_statement);
+ return factory()->NewIfStatement(
+ condition, then_statement, else_statement, pos);
}
@@ -2243,6 +2218,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
// ContinueStatement ::
// 'continue' Identifier? ';'
+ int pos = peek_position();
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
@@ -2265,7 +2241,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
return NULL;
}
ExpectSemicolon(CHECK_OK);
- return factory()->NewContinueStatement(target);
+ return factory()->NewContinueStatement(target, pos);
}
@@ -2273,6 +2249,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
// BreakStatement ::
// 'break' Identifier? ';'
+ int pos = peek_position();
Expect(Token::BREAK, CHECK_OK);
Handle<String> label;
Token::Value tok = peek();
@@ -2284,7 +2261,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
// empty statements, e.g. 'l1: l2: l3: break l2;'
if (!label.is_null() && ContainsLabel(labels, label)) {
ExpectSemicolon(CHECK_OK);
- return factory()->NewEmptyStatement();
+ return factory()->NewEmptyStatement(pos);
}
BreakableStatement* target = NULL;
target = LookupBreakTarget(label, CHECK_OK);
@@ -2301,7 +2278,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
return NULL;
}
ExpectSemicolon(CHECK_OK);
- return factory()->NewBreakStatement(target);
+ return factory()->NewBreakStatement(target, pos);
}
@@ -2309,10 +2286,11 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// ReturnStatement ::
// 'return' Expression? ';'
- // Consume the return token. It is necessary to do the before
+ // Consume the return token. It is necessary to do that before
// reporting any errors on it, because of the way errors are
// reported (underlining).
Expect(Token::RETURN, CHECK_OK);
+ int pos = position();
Token::Value tok = peek();
Statement* result;
@@ -2321,7 +2299,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
- return_value = GetLiteralUndefined();
+ return_value = GetLiteralUndefined(position());
} else {
return_value = ParseExpression(true, CHECK_OK);
}
@@ -2330,10 +2308,10 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Expression* generator = factory()->NewVariableProxy(
current_function_state_->generator_object_variable());
Expression* yield = factory()->NewYield(
- generator, return_value, Yield::FINAL, RelocInfo::kNoPosition);
- result = factory()->NewExpressionStatement(yield);
+ generator, return_value, Yield::FINAL, pos);
+ result = factory()->NewExpressionStatement(yield, pos);
} else {
- result = factory()->NewReturnStatement(return_value);
+ result = factory()->NewReturnStatement(return_value, pos);
}
// An ECMAScript program is considered syntactically incorrect if it
@@ -2347,7 +2325,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Handle<String> message = isolate()->factory()->illegal_return_string();
Expression* throw_error =
NewThrowSyntaxError(message, Handle<Object>::null());
- return factory()->NewExpressionStatement(throw_error);
+ return factory()->NewExpressionStatement(throw_error, pos);
}
return result;
}
@@ -2358,6 +2336,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
// 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK);
+ int pos = position();
if (!top_scope_->is_classic_mode()) {
ReportMessage("strict_mode_with", Vector<const char*>::empty());
@@ -2377,7 +2356,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
stmt = ParseStatement(labels, CHECK_OK);
with_scope->set_end_position(scanner().location().end_pos);
}
- return factory()->NewWithStatement(with_scope, expr, stmt);
+ return factory()->NewWithStatement(with_scope, expr, stmt, pos);
}
@@ -2401,7 +2380,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
*default_seen_ptr = true;
}
Expect(Token::COLON, CHECK_OK);
- int pos = scanner().location().beg_pos;
+ int pos = position();
ZoneList<Statement*>* statements =
new(zone()) ZoneList<Statement*>(5, zone());
while (peek() != Token::CASE &&
@@ -2411,7 +2390,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
statements->Add(stat, zone());
}
- return new(zone()) CaseClause(isolate(), label, statements, pos);
+ return factory()->NewCaseClause(label, statements, pos);
}
@@ -2420,7 +2399,8 @@ SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels,
// SwitchStatement ::
// 'switch' '(' Expression ')' '{' CaseClause* '}'
- SwitchStatement* statement = factory()->NewSwitchStatement(labels);
+ SwitchStatement* statement =
+ factory()->NewSwitchStatement(labels, peek_position());
Target target(&this->target_stack_, statement);
Expect(Token::SWITCH, CHECK_OK);
@@ -2447,7 +2427,7 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
// 'throw' Expression ';'
Expect(Token::THROW, CHECK_OK);
- int pos = scanner().location().beg_pos;
+ int pos = position();
if (scanner().HasAnyLineTerminatorBeforeNext()) {
ReportMessage("newline_after_throw", Vector<const char*>::empty());
*ok = false;
@@ -2456,7 +2436,8 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
Expression* exception = ParseExpression(true, CHECK_OK);
ExpectSemicolon(CHECK_OK);
- return factory()->NewExpressionStatement(factory()->NewThrow(exception, pos));
+ return factory()->NewExpressionStatement(
+ factory()->NewThrow(exception, pos), pos);
}
@@ -2473,6 +2454,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
// 'finally' Block
Expect(Token::TRY, CHECK_OK);
+ int pos = position();
TargetCollector try_collector(zone());
Block* try_block;
@@ -2544,9 +2526,10 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
ASSERT(catch_scope != NULL && catch_variable != NULL);
int index = current_function_state_->NextHandlerIndex();
TryCatchStatement* statement = factory()->NewTryCatchStatement(
- index, try_block, catch_scope, catch_variable, catch_block);
+ index, try_block, catch_scope, catch_variable, catch_block,
+ RelocInfo::kNoPosition);
statement->set_escaping_targets(try_collector.targets());
- try_block = factory()->NewBlock(NULL, 1, false);
+ try_block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
try_block->AddStatement(statement, zone());
catch_block = NULL; // Clear to indicate it's been handled.
}
@@ -2557,11 +2540,12 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
ASSERT(catch_scope != NULL && catch_variable != NULL);
int index = current_function_state_->NextHandlerIndex();
result = factory()->NewTryCatchStatement(
- index, try_block, catch_scope, catch_variable, catch_block);
+ index, try_block, catch_scope, catch_variable, catch_block, pos);
} else {
ASSERT(finally_block != NULL);
int index = current_function_state_->NextHandlerIndex();
- result = factory()->NewTryFinallyStatement(index, try_block, finally_block);
+ result = factory()->NewTryFinallyStatement(
+ index, try_block, finally_block, pos);
// Combine the jump targets of the try block and the possible catch block.
try_collector.targets()->AddAll(*catch_collector.targets(), zone());
}
@@ -2576,7 +2560,8 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
- DoWhileStatement* loop = factory()->NewDoWhileStatement(labels);
+ DoWhileStatement* loop =
+ factory()->NewDoWhileStatement(labels, peek_position());
Target target(&this->target_stack_, loop);
Expect(Token::DO, CHECK_OK);
@@ -2584,11 +2569,6 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- if (loop != NULL) {
- int position = scanner().location().beg_pos;
- loop->set_condition_position(position);
- }
-
Expression* cond = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
@@ -2607,7 +2587,7 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
// WhileStatement ::
// 'while' '(' Expression ')' Statement
- WhileStatement* loop = factory()->NewWhileStatement(labels);
+ WhileStatement* loop = factory()->NewWhileStatement(labels, peek_position());
Target target(&this->target_stack_, loop);
Expect(Token::WHILE, CHECK_OK);
@@ -2643,13 +2623,10 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
if (for_of != NULL) {
Factory* heap_factory = isolate()->factory();
- Handle<String> iterator_str = heap_factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR(".iterator"));
- Handle<String> result_str = heap_factory->InternalizeOneByteString(
- STATIC_ASCII_VECTOR(".result"));
- Variable* iterator =
- top_scope_->DeclarationScope()->NewTemporary(iterator_str);
- Variable* result = top_scope_->DeclarationScope()->NewTemporary(result_str);
+ Variable* iterator = top_scope_->DeclarationScope()->NewTemporary(
+ heap_factory->dot_iterator_string());
+ Variable* result = top_scope_->DeclarationScope()->NewTemporary(
+ heap_factory->dot_result_string());
Expression* assign_iterator;
Expression* next_result;
@@ -2666,8 +2643,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
// var result = iterator.next();
{
Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
- Expression* next_literal =
- factory()->NewLiteral(heap_factory->next_string());
+ Expression* next_literal = factory()->NewLiteral(
+ heap_factory->next_string(), RelocInfo::kNoPosition);
Expression* next_property = factory()->NewProperty(
iterator_proxy, next_literal, RelocInfo::kNoPosition);
ZoneList<Expression*>* next_arguments =
@@ -2681,8 +2658,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
// result.done
{
- Expression* done_literal =
- factory()->NewLiteral(heap_factory->done_string());
+ Expression* done_literal = factory()->NewLiteral(
+ heap_factory->done_string(), RelocInfo::kNoPosition);
Expression* result_proxy = factory()->NewVariableProxy(result);
result_done = factory()->NewProperty(
result_proxy, done_literal, RelocInfo::kNoPosition);
@@ -2690,8 +2667,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
// each = result.value
{
- Expression* value_literal =
- factory()->NewLiteral(heap_factory->value_string());
+ Expression* value_literal = factory()->NewLiteral(
+ heap_factory->value_string(), RelocInfo::kNoPosition);
Expression* result_proxy = factory()->NewVariableProxy(result);
Expression* result_value = factory()->NewProperty(
result_proxy, value_literal, RelocInfo::kNoPosition);
@@ -2711,6 +2688,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
+ int pos = peek_position();
Statement* init = NULL;
// Create an in-between scope for let-bound iteration variables.
@@ -2735,7 +2713,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
if (!name.is_null() && CheckInOrOf(accept_OF, &mode)) {
Interface* interface =
is_const ? Interface::NewConst() : Interface::NewValue();
- ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
+ ForEachStatement* loop =
+ factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
Expression* enumerable = ParseExpression(true, CHECK_OK);
@@ -2745,7 +2724,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
top_scope_->NewUnresolved(factory(), name, interface);
Statement* body = ParseStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, each, enumerable, body);
- Block* result = factory()->NewBlock(NULL, 2, false);
+ Block* result =
+ factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
result->AddStatement(variable_statement, zone());
result->AddStatement(loop, zone());
top_scope_ = saved_scope;
@@ -2789,7 +2769,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Handle<String> tempname = heap_factory->InternalizeString(tempstr);
Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
- ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
+ ForEachStatement* loop =
+ factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
// The expression does not see the loop variable.
@@ -2801,11 +2782,12 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
VariableProxy* each =
top_scope_->NewUnresolved(factory(), name, Interface::NewValue());
Statement* body = ParseStatement(NULL, CHECK_OK);
- Block* body_block = factory()->NewBlock(NULL, 3, false);
+ Block* body_block =
+ factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
Assignment* assignment = factory()->NewAssignment(
Token::ASSIGN, each, temp_proxy, RelocInfo::kNoPosition);
- Statement* assignment_statement =
- factory()->NewExpressionStatement(assignment);
+ Statement* assignment_statement = factory()->NewExpressionStatement(
+ assignment, RelocInfo::kNoPosition);
body_block->AddStatement(variable_statement, zone());
body_block->AddStatement(assignment_statement, zone());
body_block->AddStatement(body, zone());
@@ -2835,7 +2817,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
isolate()->factory()->invalid_lhs_in_for_in_string();
expression = NewThrowReferenceError(message);
}
- ForEachStatement* loop = factory()->NewForEachStatement(mode, labels);
+ ForEachStatement* loop =
+ factory()->NewForEachStatement(mode, labels, pos);
Target target(&this->target_stack_, loop);
Expression* enumerable = ParseExpression(true, CHECK_OK);
@@ -2851,13 +2834,14 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
return loop;
} else {
- init = factory()->NewExpressionStatement(expression);
+ init = factory()->NewExpressionStatement(
+ expression, RelocInfo::kNoPosition);
}
}
}
// Standard 'for' loop
- ForStatement* loop = factory()->NewForStatement(labels);
+ ForStatement* loop = factory()->NewForStatement(labels, pos);
Target target(&this->target_stack_, loop);
// Parsed initializer at this point.
@@ -2872,7 +2856,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* next = NULL;
if (peek() != Token::RPAREN) {
Expression* exp = ParseExpression(true, CHECK_OK);
- next = factory()->NewExpressionStatement(exp);
+ next = factory()->NewExpressionStatement(exp, RelocInfo::kNoPosition);
}
Expect(Token::RPAREN, CHECK_OK);
@@ -2892,7 +2876,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// for (; c; n) b
// }
ASSERT(init != NULL);
- Block* result = factory()->NewBlock(NULL, 2, false);
+ Block* result = factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
result->AddStatement(init, zone());
result->AddStatement(loop, zone());
result->set_scope(for_scope);
@@ -2914,10 +2898,9 @@ Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK);
while (peek() == Token::COMMA) {
Expect(Token::COMMA, CHECK_OK);
- int position = scanner().location().beg_pos;
+ int pos = position();
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- result =
- factory()->NewBinaryOperation(Token::COMMA, result, right, position);
+ result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
}
return result;
}
@@ -2961,7 +2944,7 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
MarkAsLValue(expression);
Token::Value op = Next(); // Get assignment operator.
- int pos = scanner().location().beg_pos;
+ int pos = position();
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
// TODO(1231235): We try to estimate the set of properties set by
@@ -3005,15 +2988,14 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
Expression* Parser::ParseYieldExpression(bool* ok) {
// YieldExpression ::
// 'yield' '*'? AssignmentExpression
- int position = scanner().peek_location().beg_pos;
+ int pos = peek_position();
Expect(Token::YIELD, CHECK_OK);
Yield::Kind kind =
Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND;
Expression* generator_object = factory()->NewVariableProxy(
current_function_state_->generator_object_variable());
Expression* expression = ParseAssignmentExpression(false, CHECK_OK);
- Yield* yield =
- factory()->NewYield(generator_object, expression, kind, position);
+ Yield* yield = factory()->NewYield(generator_object, expression, kind, pos);
if (kind == Yield::DELEGATING) {
yield->set_index(current_function_state_->NextHandlerIndex());
}
@@ -3027,6 +3009,7 @@ Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
// LogicalOrExpression
// LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
+ int pos = peek_position();
// We start using the binary expression parser for prec >= 4 only!
Expression* expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
if (peek() != Token::CONDITIONAL) return expression;
@@ -3034,17 +3017,14 @@ Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) {
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
- int left_position = scanner().peek_location().beg_pos;
Expression* left = ParseAssignmentExpression(true, CHECK_OK);
Expect(Token::COLON, CHECK_OK);
- int right_position = scanner().peek_location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- return factory()->NewConditional(
- expression, left, right, left_position, right_position);
+ return factory()->NewConditional(expression, left, right, pos);
}
-static int Precedence(Token::Value tok, bool accept_IN) {
+int ParserBase::Precedence(Token::Value tok, bool accept_IN) {
if (tok == Token::IN && !accept_IN)
return 0; // 0 precedence will terminate binary expression parsing
@@ -3060,7 +3040,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
Token::Value op = Next();
- int position = scanner().location().beg_pos;
+ int pos = position();
Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
// Compute some expressions involving only number literals.
@@ -3071,47 +3051,47 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
switch (op) {
case Token::ADD:
- x = factory()->NewNumberLiteral(x_val + y_val);
+ x = factory()->NewNumberLiteral(x_val + y_val, pos);
continue;
case Token::SUB:
- x = factory()->NewNumberLiteral(x_val - y_val);
+ x = factory()->NewNumberLiteral(x_val - y_val, pos);
continue;
case Token::MUL:
- x = factory()->NewNumberLiteral(x_val * y_val);
+ x = factory()->NewNumberLiteral(x_val * y_val, pos);
continue;
case Token::DIV:
- x = factory()->NewNumberLiteral(x_val / y_val);
+ x = factory()->NewNumberLiteral(x_val / y_val, pos);
continue;
case Token::BIT_OR: {
int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::BIT_AND: {
int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::BIT_XOR: {
int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::SHL: {
int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::SHR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
uint32_t value = DoubleToUint32(x_val) >> shift;
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
case Token::SAR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
- x = factory()->NewNumberLiteral(value);
+ x = factory()->NewNumberLiteral(value, pos);
continue;
}
default:
@@ -3130,15 +3110,15 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
default: break;
}
- x = factory()->NewCompareOperation(cmp, x, y, position);
+ x = factory()->NewCompareOperation(cmp, x, y, pos);
if (cmp != op) {
// The comparison was negated - add a NOT.
- x = factory()->NewUnaryOperation(Token::NOT, x, position);
+ x = factory()->NewUnaryOperation(Token::NOT, x, pos);
}
} else {
// We have a "normal" binary operation.
- x = factory()->NewBinaryOperation(op, x, y, position);
+ x = factory()->NewBinaryOperation(op, x, y, pos);
}
}
}
@@ -3162,7 +3142,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
Token::Value op = peek();
if (Token::IsUnaryOp(op)) {
op = Next();
- int position = scanner().location().beg_pos;
+ int pos = position();
Expression* expression = ParseUnaryExpression(CHECK_OK);
if (expression != NULL && (expression->AsLiteral() != NULL)) {
@@ -3170,9 +3150,8 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
if (op == Token::NOT) {
// Convert the literal to a boolean condition and negate it.
bool condition = literal->BooleanValue();
- Handle<Object> result(isolate()->heap()->ToBoolean(!condition),
- isolate());
- return factory()->NewLiteral(result);
+ Handle<Object> result = isolate()->factory()->ToBoolean(!condition);
+ return factory()->NewLiteral(result, pos);
} else if (literal->IsNumber()) {
// Compute some expressions involving only number literals.
double value = literal->Number();
@@ -3180,9 +3159,9 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
case Token::ADD:
return expression;
case Token::SUB:
- return factory()->NewNumberLiteral(-value);
+ return factory()->NewNumberLiteral(-value, pos);
case Token::BIT_NOT:
- return factory()->NewNumberLiteral(~DoubleToInt32(value));
+ return factory()->NewNumberLiteral(~DoubleToInt32(value), pos);
default:
break;
}
@@ -3205,25 +3184,25 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
if (op == Token::ADD) {
return factory()->NewBinaryOperation(Token::MUL,
expression,
- factory()->NewNumberLiteral(1),
- position);
+ factory()->NewNumberLiteral(1, pos),
+ pos);
}
// The same idea for '-foo' => 'foo*(-1)'.
if (op == Token::SUB) {
return factory()->NewBinaryOperation(Token::MUL,
expression,
- factory()->NewNumberLiteral(-1),
- position);
+ factory()->NewNumberLiteral(-1, pos),
+ pos);
}
// ...and one more time for '~foo' => 'foo^(~0)'.
if (op == Token::BIT_NOT) {
return factory()->NewBinaryOperation(Token::BIT_XOR,
expression,
- factory()->NewNumberLiteral(~0),
- position);
+ factory()->NewNumberLiteral(~0, pos),
+ pos);
}
- return factory()->NewUnaryOperation(op, expression, position);
+ return factory()->NewUnaryOperation(op, expression, pos);
} else if (Token::IsCountOp(op)) {
op = Next();
@@ -3244,11 +3223,10 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
}
MarkAsLValue(expression);
- int position = scanner().location().beg_pos;
return factory()->NewCountOperation(op,
true /* prefix */,
expression,
- position);
+ position());
} else {
return ParsePostfixExpression(ok);
@@ -3280,12 +3258,11 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
MarkAsLValue(expression);
Token::Value next = Next();
- int position = scanner().location().beg_pos;
expression =
factory()->NewCountOperation(next,
false /* postfix */,
expression,
- position);
+ position());
}
return expression;
}
@@ -3306,7 +3283,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
switch (peek()) {
case Token::LBRACK: {
Consume(Token::LBRACK);
- int pos = scanner().location().beg_pos;
+ int pos = position();
Expression* index = ParseExpression(true, CHECK_OK);
result = factory()->NewProperty(result, index, pos);
Expect(Token::RBRACK, CHECK_OK);
@@ -3318,14 +3295,14 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
if (scanner().current_token() == Token::IDENTIFIER) {
// For call of an identifier we want to report position of
// the identifier as position of the call in the stack trace.
- pos = scanner().location().beg_pos;
+ pos = position();
} else {
// For other kinds of calls we record position of the parenthesis as
// position of the call. Note that this is extremely important for
// expressions of the form function(){...}() for which call position
// should not point to the closing brace otherwise it will intersect
// with positions recorded for function literal and confuse debugger.
- pos = scanner().peek_location().beg_pos;
+ pos = peek_position();
// Also the trailing parenthesis are a hint that the function will
// be called immediately. If we happen to have parsed a preceding
// function literal eagerly, we can also compile it eagerly.
@@ -3354,10 +3331,10 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
case Token::PERIOD: {
Consume(Token::PERIOD);
- int pos = scanner().location().beg_pos;
+ int pos = position();
Handle<String> name = ParseIdentifierName(CHECK_OK);
- result =
- factory()->NewProperty(result, factory()->NewLiteral(name), pos);
+ result = factory()->NewProperty(
+ result, factory()->NewLiteral(name, pos), pos);
if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
@@ -3382,7 +3359,7 @@ Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) {
// member expression parser, which is only allowed to match argument
// lists as long as it has 'new' prefixes left
Expect(Token::NEW, CHECK_OK);
- PositionStack::Element pos(stack, scanner().location().beg_pos);
+ PositionStack::Element pos(stack, position());
Expression* result;
if (peek() == Token::NEW) {
@@ -3421,7 +3398,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
Expression* result = NULL;
if (peek() == Token::FUNCTION) {
Expect(Token::FUNCTION, CHECK_OK);
- int function_token_position = scanner().location().beg_pos;
+ int function_token_position = position();
bool is_generator = allow_generators() && Check(Token::MUL);
Handle<String> name;
bool is_strict_reserved_name = false;
@@ -3446,7 +3423,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
switch (peek()) {
case Token::LBRACK: {
Consume(Token::LBRACK);
- int pos = scanner().location().beg_pos;
+ int pos = position();
Expression* index = ParseExpression(true, CHECK_OK);
result = factory()->NewProperty(result, index, pos);
if (fni_ != NULL) {
@@ -3462,10 +3439,10 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
}
case Token::PERIOD: {
Consume(Token::PERIOD);
- int pos = scanner().location().beg_pos;
+ int pos = position();
Handle<String> name = ParseIdentifierName(CHECK_OK);
- result =
- factory()->NewProperty(result, factory()->NewLiteral(name), pos);
+ result = factory()->NewProperty(
+ result, factory()->NewLiteral(name, pos), pos);
if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
@@ -3473,8 +3450,8 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
if ((stack == NULL) || stack->is_empty()) return result;
// Consume one of the new prefixes (already parsed).
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
- int last = stack->pop();
- result = factory()->NewCallNew(result, args, last);
+ int pos = stack->pop();
+ result = factory()->NewCallNew(result, args, pos);
break;
}
default:
@@ -3491,9 +3468,10 @@ DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) {
// DebuggerStatement ::
// 'debugger' ';'
+ int pos = peek_position();
Expect(Token::DEBUGGER, CHECK_OK);
ExpectSemicolon(CHECK_OK);
- return factory()->NewDebuggerStatement();
+ return factory()->NewDebuggerStatement(pos);
}
@@ -3501,7 +3479,7 @@ void Parser::ReportUnexpectedToken(Token::Value token) {
// We don't report stack overflows here, to avoid increasing the
// stack depth even further. Instead we report it after parsing is
// over, in ParseProgram/ParseJson.
- if (token == Token::ILLEGAL && stack_overflow_) return;
+ if (token == Token::ILLEGAL && stack_overflow()) return;
// Four of the tokens are treated specially
switch (token) {
case Token::EOS:
@@ -3555,6 +3533,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
// RegExpLiteral
// '(' Expression ')'
+ int pos = peek_position();
Expression* result = NULL;
switch (peek()) {
case Token::THIS: {
@@ -3565,17 +3544,17 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::NULL_LITERAL:
Consume(Token::NULL_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->null_value());
+ result = factory()->NewLiteral(isolate()->factory()->null_value(), pos);
break;
case Token::TRUE_LITERAL:
Consume(Token::TRUE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->true_value());
+ result = factory()->NewLiteral(isolate()->factory()->true_value(), pos);
break;
case Token::FALSE_LITERAL:
Consume(Token::FALSE_LITERAL);
- result = factory()->NewLiteral(isolate()->factory()->false_value());
+ result = factory()->NewLiteral(isolate()->factory()->false_value(), pos);
break;
case Token::IDENTIFIER:
@@ -3589,8 +3568,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
PrintF("# Variable %s ", name->ToAsciiArray());
#endif
Interface* interface = Interface::NewUnknown(zone());
- result = top_scope_->NewUnresolved(
- factory(), name, interface, scanner().location().beg_pos);
+ result = top_scope_->NewUnresolved(factory(), name, interface, pos);
break;
}
@@ -3601,14 +3579,14 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
scanner().literal_ascii_string(),
ALLOW_HEX | ALLOW_OCTAL |
ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
- result = factory()->NewNumberLiteral(value);
+ result = factory()->NewNumberLiteral(value, pos);
break;
}
case Token::STRING: {
Consume(Token::STRING);
Handle<String> symbol = GetSymbol();
- result = factory()->NewLiteral(symbol);
+ result = factory()->NewLiteral(symbol, pos);
if (fni_ != NULL) fni_->PushLiteralName(symbol);
break;
}
@@ -3662,12 +3640,13 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
+ int pos = peek_position();
ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4, zone());
Expect(Token::LBRACK, CHECK_OK);
while (peek() != Token::RBRACK) {
Expression* elem;
if (peek() == Token::COMMA) {
- elem = GetLiteralTheHole();
+ elem = GetLiteralTheHole(peek_position());
} else {
elem = ParseAssignmentExpression(true, CHECK_OK);
}
@@ -3681,61 +3660,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// Update the scope information before the pre-parsing bailout.
int literal_index = current_function_state_->NextMaterializedLiteralIndex();
- // Allocate a fixed array to hold all the object literals.
- Handle<JSArray> array =
- isolate()->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
- isolate()->factory()->SetElementsCapacityAndLength(
- array, values->length(), values->length());
-
- // Fill in the literals.
- Heap* heap = isolate()->heap();
- bool is_simple = true;
- int depth = 1;
- bool is_holey = false;
- for (int i = 0, n = values->length(); i < n; i++) {
- MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() + 1 > depth) {
- depth = m_literal->depth() + 1;
- }
- Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
- if (boilerplate_value->IsTheHole()) {
- is_holey = true;
- } else if (boilerplate_value->IsUninitialized()) {
- is_simple = false;
- JSObject::SetOwnElement(
- array, i, handle(Smi::FromInt(0), isolate()), kNonStrictMode);
- } else {
- JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode);
- }
- }
-
- Handle<FixedArrayBase> element_values(array->elements());
-
- // Simple and shallow arrays can be lazily copied, we transform the
- // elements array to a copy-on-write array.
- if (is_simple && depth == 1 && values->length() > 0 &&
- array->HasFastSmiOrObjectElements()) {
- element_values->set_map(heap->fixed_cow_array_map());
- }
-
- // Remember both the literal's constant values as well as the ElementsKind
- // in a 2-element FixedArray.
- Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(2, TENURED);
-
- ElementsKind kind = array->GetElementsKind();
- kind = is_holey ? GetHoleyElementsKind(kind) : GetPackedElementsKind(kind);
-
- literals->set(0, Smi::FromInt(kind));
- literals->set(1, *element_values);
-
- return factory()->NewArrayLiteral(
- literals, values, literal_index, is_simple, depth);
-}
-
-
-bool Parser::IsBoilerplateProperty(ObjectLiteral::Property* property) {
- return property != NULL &&
- property->kind() != ObjectLiteral::Property::PROTOTYPE;
+ return factory()->NewArrayLiteral(values, literal_index, pos);
}
@@ -3782,202 +3707,6 @@ Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
}
-Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
- if (expression->AsLiteral() != NULL) {
- return expression->AsLiteral()->value();
- }
- if (CompileTimeValue::IsCompileTimeValue(expression)) {
- return CompileTimeValue::GetValue(isolate(), expression);
- }
- return isolate()->factory()->uninitialized_value();
-}
-
-
-// Validation per 11.1.5 Object Initialiser
-class ObjectLiteralPropertyChecker {
- public:
- ObjectLiteralPropertyChecker(Parser* parser, LanguageMode language_mode) :
- props_(Literal::Match),
- parser_(parser),
- language_mode_(language_mode) {
- }
-
- void CheckProperty(
- ObjectLiteral::Property* property,
- Scanner::Location loc,
- bool* ok);
-
- private:
- enum PropertyKind {
- kGetAccessor = 0x01,
- kSetAccessor = 0x02,
- kAccessor = kGetAccessor | kSetAccessor,
- kData = 0x04
- };
-
- static intptr_t GetPropertyKind(ObjectLiteral::Property* property) {
- switch (property->kind()) {
- case ObjectLiteral::Property::GETTER:
- return kGetAccessor;
- case ObjectLiteral::Property::SETTER:
- return kSetAccessor;
- default:
- return kData;
- }
- }
-
- HashMap props_;
- Parser* parser_;
- LanguageMode language_mode_;
-};
-
-
-void ObjectLiteralPropertyChecker::CheckProperty(
- ObjectLiteral::Property* property,
- Scanner::Location loc,
- bool* ok) {
- ASSERT(property != NULL);
- Literal* literal = property->key();
- HashMap::Entry* entry = props_.Lookup(literal, literal->Hash(), true);
- intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
- intptr_t curr = GetPropertyKind(property);
-
- // Duplicate data properties are illegal in strict or extended mode.
- if (language_mode_ != CLASSIC_MODE && (curr & prev & kData) != 0) {
- parser_->ReportMessageAt(loc, "strict_duplicate_property",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
- // Data property conflicting with an accessor.
- if (((curr & kData) && (prev & kAccessor)) ||
- ((prev & kData) && (curr & kAccessor))) {
- parser_->ReportMessageAt(loc, "accessor_data_property",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
- // Two accessors of the same type conflicting
- if ((curr & prev & kAccessor) != 0) {
- parser_->ReportMessageAt(loc, "accessor_get_set",
- Vector<const char*>::empty());
- *ok = false;
- return;
- }
-
- // Update map
- entry->value = reinterpret_cast<void*> (prev | curr);
- *ok = true;
-}
-
-
-void Parser::BuildObjectLiteralConstantProperties(
- ZoneList<ObjectLiteral::Property*>* properties,
- Handle<FixedArray> constant_properties,
- bool* is_simple,
- bool* fast_elements,
- int* depth,
- bool* may_store_doubles) {
- int position = 0;
- // Accumulate the value in local variables and store it at the end.
- bool is_simple_acc = true;
- int depth_acc = 1;
- uint32_t max_element_index = 0;
- uint32_t elements = 0;
- for (int i = 0; i < properties->length(); i++) {
- ObjectLiteral::Property* property = properties->at(i);
- if (!IsBoilerplateProperty(property)) {
- is_simple_acc = false;
- continue;
- }
- MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() >= depth_acc) {
- depth_acc = m_literal->depth() + 1;
- }
-
- // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
- // value for COMPUTED properties, the real value is filled in at
- // runtime. The enumeration order is maintained.
- Handle<Object> key = property->key()->value();
- Handle<Object> value = GetBoilerplateValue(property->value());
-
- // Ensure objects that may, at any point in time, contain fields with double
- // representation are always treated as nested objects. This is true for
- // computed fields (value is undefined), and smi and double literals
- // (value->IsNumber()).
- // TODO(verwaest): Remove once we can store them inline.
- if (FLAG_track_double_fields &&
- (value->IsNumber() || value->IsUninitialized())) {
- *may_store_doubles = true;
- }
-
- is_simple_acc = is_simple_acc && !value->IsUninitialized();
-
- // Keep track of the number of elements in the object literal and
- // the largest element index. If the largest element index is
- // much larger than the number of elements, creating an object
- // literal with fast elements will be a waste of space.
- uint32_t element_index = 0;
- if (key->IsString()
- && Handle<String>::cast(key)->AsArrayIndex(&element_index)
- && element_index > max_element_index) {
- max_element_index = element_index;
- elements++;
- } else if (key->IsSmi()) {
- int key_value = Smi::cast(*key)->value();
- if (key_value > 0
- && static_cast<uint32_t>(key_value) > max_element_index) {
- max_element_index = key_value;
- }
- elements++;
- }
-
- // Add name, value pair to the fixed array.
- constant_properties->set(position++, *key);
- constant_properties->set(position++, *value);
- }
- *fast_elements =
- (max_element_index <= 32) || ((2 * elements) >= max_element_index);
- *is_simple = is_simple_acc;
- *depth = depth_acc;
-}
-
-
-ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
- bool* ok) {
- // Special handling of getter and setter syntax:
- // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
- // We have already read the "get" or "set" keyword.
- Token::Value next = Next();
- bool is_keyword = Token::IsKeyword(next);
- if (next == Token::IDENTIFIER || next == Token::NUMBER ||
- next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD ||
- next == Token::STRING || is_keyword) {
- Handle<String> name;
- if (is_keyword) {
- name = isolate_->factory()->InternalizeUtf8String(Token::String(next));
- } else {
- name = GetSymbol();
- }
- FunctionLiteral* value =
- ParseFunctionLiteral(name,
- false, // reserved words are allowed here
- false, // not a generator
- RelocInfo::kNoPosition,
- FunctionLiteral::ANONYMOUS_EXPRESSION,
- CHECK_OK);
- // Allow any number of parameters for compatibilty with JSC.
- // Specification only allows zero parameters for get and one for set.
- return factory()->NewObjectLiteralProperty(is_getter, value);
- } else {
- ReportUnexpectedToken(next);
- *ok = false;
- return NULL;
- }
-}
-
-
Expression* Parser::ParseObjectLiteral(bool* ok) {
// ObjectLiteral ::
// '{' (
@@ -3985,12 +3714,13 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
// )*[','] '}'
+ int pos = peek_position();
ZoneList<ObjectLiteral::Property*>* properties =
new(zone()) ZoneList<ObjectLiteral::Property*>(4, zone());
int number_of_boilerplate_properties = 0;
bool has_function = false;
- ObjectLiteralPropertyChecker checker(this, top_scope_->language_mode());
+ ObjectLiteralChecker checker(this, top_scope_->language_mode());
Expect(Token::LBRACE, CHECK_OK);
@@ -3999,9 +3729,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Literal* key = NULL;
Token::Value next = peek();
-
- // Location of the property name token
- Scanner::Location loc = scanner().peek_location();
+ int next_pos = peek_position();
switch (next) {
case Token::FUTURE_RESERVED_WORD:
@@ -4014,27 +3742,54 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
if (fni_ != NULL) fni_->PushLiteralName(id);
if ((is_getter || is_setter) && peek() != Token::COLON) {
- // Update loc to point to the identifier
- loc = scanner().peek_location();
- ObjectLiteral::Property* property =
- ParseObjectLiteralGetSet(is_getter, CHECK_OK);
- if (IsBoilerplateProperty(property)) {
- number_of_boilerplate_properties++;
- }
- // Validate the property.
- checker.CheckProperty(property, loc, CHECK_OK);
- properties->Add(property, zone());
- if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
-
- if (fni_ != NULL) {
- fni_->Infer();
- fni_->Leave();
- }
- continue; // restart the while
+ // Special handling of getter and setter syntax:
+ // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
+ // We have already read the "get" or "set" keyword.
+ Token::Value next = Next();
+ bool is_keyword = Token::IsKeyword(next);
+ if (next != i::Token::IDENTIFIER &&
+ next != i::Token::FUTURE_RESERVED_WORD &&
+ next != i::Token::FUTURE_STRICT_RESERVED_WORD &&
+ next != i::Token::NUMBER &&
+ next != i::Token::STRING &&
+ !is_keyword) {
+ // Unexpected token.
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return NULL;
+ }
+ // Validate the property.
+ PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
+ checker.CheckProperty(next, type, CHECK_OK);
+ Handle<String> name = is_keyword
+ ? isolate_->factory()->InternalizeUtf8String(Token::String(next))
+ : GetSymbol();
+ FunctionLiteral* value =
+ ParseFunctionLiteral(name,
+ false, // reserved words are allowed here
+ false, // not a generator
+ RelocInfo::kNoPosition,
+ FunctionLiteral::ANONYMOUS_EXPRESSION,
+ CHECK_OK);
+ // Allow any number of parameters for compatibilty with JSC.
+ // Specification only allows zero parameters for get and one for set.
+ ObjectLiteral::Property* property =
+ factory()->NewObjectLiteralProperty(is_getter, value, next_pos);
+ if (ObjectLiteral::IsBoilerplateProperty(property)) {
+ number_of_boilerplate_properties++;
+ }
+ properties->Add(property, zone());
+ if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
+ continue; // restart the while
}
// Failed to parse as get/set property, so it's just a property
// called "get" or "set".
- key = factory()->NewLiteral(id);
+ key = factory()->NewLiteral(id, next_pos);
break;
}
case Token::STRING: {
@@ -4043,10 +3798,10 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
if (fni_ != NULL) fni_->PushLiteralName(string);
uint32_t index;
if (!string.is_null() && string->AsArrayIndex(&index)) {
- key = factory()->NewNumberLiteral(index);
+ key = factory()->NewNumberLiteral(index, next_pos);
break;
}
- key = factory()->NewLiteral(string);
+ key = factory()->NewLiteral(string, next_pos);
break;
}
case Token::NUMBER: {
@@ -4056,14 +3811,14 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
scanner().literal_ascii_string(),
ALLOW_HEX | ALLOW_OCTAL |
ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
- key = factory()->NewNumberLiteral(value);
+ key = factory()->NewNumberLiteral(value, next_pos);
break;
}
default:
if (Token::IsKeyword(next)) {
Consume(next);
Handle<String> string = GetSymbol();
- key = factory()->NewLiteral(string);
+ key = factory()->NewLiteral(string, next_pos);
} else {
// Unexpected token.
Token::Value next = Next();
@@ -4073,6 +3828,9 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
}
+ // Validate the property
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
+
Expect(Token::COLON, CHECK_OK);
Expression* value = ParseAssignmentExpression(true, CHECK_OK);
@@ -4089,9 +3847,9 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
- if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
- // Validate the property
- checker.CheckProperty(property, loc, CHECK_OK);
+ if (ObjectLiteral::IsBoilerplateProperty(property)) {
+ number_of_boilerplate_properties++;
+ }
properties->Add(property, zone());
// TODO(1240767): Consider allowing trailing comma.
@@ -4107,31 +3865,16 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// Computation of literal_index must happen before pre parse bailout.
int literal_index = current_function_state_->NextMaterializedLiteralIndex();
- Handle<FixedArray> constant_properties = isolate()->factory()->NewFixedArray(
- number_of_boilerplate_properties * 2, TENURED);
-
- bool is_simple = true;
- bool fast_elements = true;
- int depth = 1;
- bool may_store_doubles = false;
- BuildObjectLiteralConstantProperties(properties,
- constant_properties,
- &is_simple,
- &fast_elements,
- &depth,
- &may_store_doubles);
- return factory()->NewObjectLiteral(constant_properties,
- properties,
+ return factory()->NewObjectLiteral(properties,
literal_index,
- is_simple,
- fast_elements,
- depth,
- may_store_doubles,
- has_function);
+ number_of_boilerplate_properties,
+ has_function,
+ pos);
}
Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
+ int pos = peek_position();
if (!scanner().ScanRegExpPattern(seen_equal)) {
Next();
ReportMessage("unterminated_regexp", Vector<const char*>::empty());
@@ -4146,7 +3889,7 @@ Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
Handle<String> js_flags = NextLiteralString(TENURED);
Next();
- return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index);
+ return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
}
@@ -4271,12 +4014,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Handle<String> function_name,
bool name_is_strict_reserved,
bool is_generator,
- int function_token_position,
+ int function_token_pos,
FunctionLiteral::FunctionType function_type,
bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
+ int pos = function_token_pos == RelocInfo::kNoPosition
+ ? peek_position() : function_token_pos;
+
// Anonymous functions were passed either the empty symbol or a null
// handle as the function name. Remember if we were passed a non-empty
// handle to decide whether to invoke function name inference.
@@ -4351,9 +4097,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// in a temporary variable, a definition that is used by "yield"
// expressions. Presence of a variable for the generator object in the
// FunctionState indicates that this function is a generator.
- Handle<String> tempname = isolate()->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR(".generator_object"));
- Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
+ Variable* temp = top_scope_->DeclarationScope()->NewTemporary(
+ isolate()->factory()->dot_generator_object_string());
function_state.set_generator_object_variable(temp);
}
@@ -4369,8 +4114,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
while (!done) {
bool is_strict_reserved = false;
Handle<String> param_name =
- ParseIdentifierOrStrictReservedWord(&is_strict_reserved,
- CHECK_OK);
+ ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
// Store locations for possible future error reports.
if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
@@ -4414,8 +4158,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_name, fvar_mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
VariableProxy* proxy = factory()->NewVariableProxy(fvar);
- VariableDeclaration* fvar_declaration =
- factory()->NewVariableDeclaration(proxy, fvar_mode, top_scope_);
+ VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
+ proxy, fvar_mode, top_scope_, RelocInfo::kNoPosition);
top_scope_->DeclareFunctionVar(fvar_declaration);
}
@@ -4436,7 +4180,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
parenthesized_function_ = false; // The bit was set for this function only.
if (is_lazily_compiled) {
- int function_block_pos = scanner().location().beg_pos;
+ int function_block_pos = position();
FunctionEntry entry;
if (pre_parse_data_ != NULL) {
// If we have pre_parse_data_, we use it to skip parsing the function
@@ -4466,11 +4210,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// building an AST. This gathers the data needed to build a lazy
// function.
SingletonLogger logger;
- preparser::PreParser::PreParseResult result =
- LazyParseFunctionLiteral(&logger);
- if (result == preparser::PreParser::kPreParseStackOverflow) {
+ PreParser::PreParseResult result = LazyParseFunctionLiteral(&logger);
+ if (result == PreParser::kPreParseStackOverflow) {
// Propagate stack overflow.
- stack_overflow_ = true;
+ set_stack_overflow();
*ok = false;
return NULL;
}
@@ -4505,9 +4248,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
body->Add(factory()->NewExpressionStatement(
factory()->NewAssignment(fvar_init_op,
fproxy,
- factory()->NewThisFunction(),
- RelocInfo::kNoPosition)),
- zone());
+ factory()->NewThisFunction(pos),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition), zone());
}
// For generators, allocate and yield an iterator on function entry.
@@ -4517,7 +4260,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
CallRuntime* allocation = factory()->NewCallRuntime(
isolate()->factory()->empty_string(),
Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject),
- arguments);
+ arguments, pos);
VariableProxy* init_proxy = factory()->NewVariableProxy(
current_function_state_->generator_object_variable());
Assignment* assignment = factory()->NewAssignment(
@@ -4526,7 +4269,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
current_function_state_->generator_object_variable());
Yield* yield = factory()->NewYield(
get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition);
- body->Add(factory()->NewExpressionStatement(yield), zone());
+ body->Add(factory()->NewExpressionStatement(
+ yield, RelocInfo::kNoPosition), zone());
}
ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK);
@@ -4535,10 +4279,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
VariableProxy* get_proxy = factory()->NewVariableProxy(
current_function_state_->generator_object_variable());
Expression *undefined = factory()->NewLiteral(
- isolate()->factory()->undefined_value());
+ isolate()->factory()->undefined_value(), RelocInfo::kNoPosition);
Yield* yield = factory()->NewYield(
get_proxy, undefined, Yield::FINAL, RelocInfo::kNoPosition);
- body->Add(factory()->NewExpressionStatement(yield), zone());
+ body->Add(factory()->NewExpressionStatement(
+ yield, RelocInfo::kNoPosition), zone());
}
materialized_literal_count = function_state.materialized_literal_count();
@@ -4553,9 +4298,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (!top_scope_->is_classic_mode()) {
if (IsEvalOrArguments(function_name)) {
int start_pos = scope->start_position();
- int position = function_token_position != RelocInfo::kNoPosition
- ? function_token_position
- : (start_pos > 0 ? start_pos - 1 : start_pos);
+ int position = function_token_pos != RelocInfo::kNoPosition
+ ? function_token_pos : (start_pos > 0 ? start_pos - 1 : start_pos);
Scanner::Location location = Scanner::Location(position, start_pos);
ReportMessageAt(location,
"strict_function_name", Vector<const char*>::empty());
@@ -4576,9 +4320,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
if (name_is_strict_reserved) {
int start_pos = scope->start_position();
- int position = function_token_position != RelocInfo::kNoPosition
- ? function_token_position
- : (start_pos > 0 ? start_pos - 1 : start_pos);
+ int position = function_token_pos != RelocInfo::kNoPosition
+ ? function_token_pos : (start_pos > 0 ? start_pos - 1 : start_pos);
Scanner::Location location = Scanner::Location(position, start_pos);
ReportMessageAt(location, "strict_reserved_word",
Vector<const char*>::empty());
@@ -4615,8 +4358,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_type,
FunctionLiteral::kIsFunction,
parenthesized,
- generator);
- function_literal->set_function_token_position(function_token_position);
+ generator,
+ pos);
+ function_literal->set_function_token_position(function_token_pos);
function_literal->set_ast_properties(&ast_properties);
function_literal->set_dont_optimize_reason(dont_optimize_reason);
@@ -4625,16 +4369,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
-preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
+PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
SingletonLogger* logger) {
HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
ASSERT_EQ(Token::LBRACE, scanner().current_token());
if (reusable_preparser_ == NULL) {
intptr_t stack_limit = isolate()->stack_guard()->real_climit();
- reusable_preparser_ = new preparser::PreParser(&scanner_,
- NULL,
- stack_limit);
+ reusable_preparser_ = new PreParser(&scanner_, NULL, stack_limit);
reusable_preparser_->set_allow_harmony_scoping(allow_harmony_scoping());
reusable_preparser_->set_allow_modules(allow_modules());
reusable_preparser_->set_allow_natives_syntax(allow_natives_syntax());
@@ -4644,7 +4386,7 @@ preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
reusable_preparser_->set_allow_harmony_numeric_literals(
allow_harmony_numeric_literals());
}
- preparser::PreParser::PreParseResult result =
+ PreParser::PreParseResult result =
reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
is_generator(),
logger);
@@ -4656,6 +4398,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
// CallRuntime ::
// '%' Identifier Arguments
+ int pos = peek_position();
Expect(Token::MOD, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
@@ -4701,11 +4444,11 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
}
// We have a valid intrinsics call or a call to a builtin.
- return factory()->NewCallRuntime(name, function, args);
+ return factory()->NewCallRuntime(name, function, args, pos);
}
-bool Parser::peek_any_identifier() {
+bool ParserBase::peek_any_identifier() {
Token::Value next = peek();
return next == Token::IDENTIFIER ||
next == Token::FUTURE_RESERVED_WORD ||
@@ -4714,35 +4457,9 @@ bool Parser::peek_any_identifier() {
}
-void Parser::Consume(Token::Value token) {
- Token::Value next = Next();
- USE(next);
- USE(token);
- ASSERT(next == token);
-}
-
-
-void Parser::Expect(Token::Value token, bool* ok) {
- Token::Value next = Next();
- if (next == token) return;
- ReportUnexpectedToken(next);
- *ok = false;
-}
-
-
-bool Parser::Check(Token::Value token) {
- Token::Value next = peek();
- if (next == token) {
- Consume(next);
- return true;
- }
- return false;
-}
-
-
-bool Parser::CheckContextualKeyword(Vector<const char> keyword) {
+bool ParserBase::CheckContextualKeyword(Vector<const char> keyword) {
if (peek() == Token::IDENTIFIER &&
- scanner().is_next_contextual_keyword(keyword)) {
+ scanner()->is_next_contextual_keyword(keyword)) {
Consume(Token::IDENTIFIER);
return true;
}
@@ -4750,7 +4467,7 @@ bool Parser::CheckContextualKeyword(Vector<const char> keyword) {
}
-void Parser::ExpectSemicolon(bool* ok) {
+void ParserBase::ExpectSemicolon(bool* ok) {
// Check for automatic semicolon insertion according to
// the rules given in ECMA-262, section 7.9, page 21.
Token::Value tok = peek();
@@ -4758,7 +4475,7 @@ void Parser::ExpectSemicolon(bool* ok) {
Next();
return;
}
- if (scanner().HasAnyLineTerminatorBeforeNext() ||
+ if (scanner()->HasAnyLineTerminatorBeforeNext() ||
tok == Token::RBRACE ||
tok == Token::EOS) {
return;
@@ -4767,23 +4484,25 @@ void Parser::ExpectSemicolon(bool* ok) {
}
-void Parser::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
+void ParserBase::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
Expect(Token::IDENTIFIER, ok);
if (!*ok) return;
- if (!scanner().is_literal_contextual_keyword(keyword)) {
+ if (!scanner()->is_literal_contextual_keyword(keyword)) {
+ ReportUnexpectedToken(scanner()->current_token());
*ok = false;
- ReportUnexpectedToken(scanner().current_token());
}
}
-Literal* Parser::GetLiteralUndefined() {
- return factory()->NewLiteral(isolate()->factory()->undefined_value());
+Literal* Parser::GetLiteralUndefined(int position) {
+ return factory()->NewLiteral(
+ isolate()->factory()->undefined_value(), position);
}
-Literal* Parser::GetLiteralTheHole() {
- return factory()->NewLiteral(isolate()->factory()->the_hole_value());
+Literal* Parser::GetLiteralTheHole(int position) {
+ return factory()->NewLiteral(
+ isolate()->factory()->the_hole_value(), RelocInfo::kNoPosition);
}
@@ -4865,14 +4584,11 @@ void Parser::CheckStrictModeLValue(Expression* expression,
// Checks whether an octal literal was last seen between beg_pos and end_pos.
// If so, reports an error. Only called for strict mode.
-void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- Scanner::Location octal = scanner().octal_position();
- if (octal.IsValid() &&
- beg_pos <= octal.beg_pos &&
- octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal",
- Vector<const char*>::empty());
- scanner().clear_octal_position();
+void ParserBase::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+ Scanner::Location octal = scanner()->octal_position();
+ if (octal.IsValid() && beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
+ ReportMessageAt(octal, "strict_octal_literal");
+ scanner()->clear_octal_position();
*ok = false;
}
}
@@ -5012,12 +4728,13 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(
elements, FAST_ELEMENTS, TENURED);
+ int pos = position();
ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2, zone());
- args->Add(factory()->NewLiteral(message), zone());
- args->Add(factory()->NewLiteral(array), zone());
+ args->Add(factory()->NewLiteral(message, pos), zone());
+ args->Add(factory()->NewLiteral(array, pos), zone());
CallRuntime* call_constructor =
- factory()->NewCallRuntime(constructor, NULL, args);
- return factory()->NewThrow(call_constructor, scanner().location().beg_pos);
+ factory()->NewCallRuntime(constructor, NULL, args, pos);
+ return factory()->NewThrow(call_constructor, pos);
}
@@ -5829,13 +5546,6 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
// ----------------------------------------------------------------------------
// The Parser interface.
-ParserMessage::~ParserMessage() {
- for (int i = 0; i < args().length(); i++)
- DeleteArray(args()[i]);
- DeleteArray(args().start());
-}
-
-
ScriptDataImpl::~ScriptDataImpl() {
if (owns_store_) store_.Dispose();
}
@@ -5907,15 +5617,15 @@ ScriptDataImpl* PreParserApi::PreParse(Isolate* isolate,
HistogramTimerScope timer(isolate->counters()->pre_parse());
Scanner scanner(isolate->unicode_cache());
intptr_t stack_limit = isolate->stack_guard()->real_climit();
- preparser::PreParser preparser(&scanner, &recorder, stack_limit);
+ PreParser preparser(&scanner, &recorder, stack_limit);
preparser.set_allow_lazy(true);
preparser.set_allow_generators(FLAG_harmony_generators);
preparser.set_allow_for_of(FLAG_harmony_iteration);
preparser.set_allow_harmony_scoping(FLAG_harmony_scoping);
preparser.set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals);
scanner.Initialize(source);
- preparser::PreParser::PreParseResult result = preparser.PreParseProgram();
- if (result == preparser::PreParser::kPreParseStackOverflow) {
+ PreParser::PreParseResult result = preparser.PreParseProgram();
+ if (result == PreParser::kPreParseStackOverflow) {
isolate->StackOverflow();
return NULL;
}
diff --git a/chromium/v8/src/parser.h b/chromium/v8/src/parser.h
index 783626ad190..dd8e600f9d7 100644
--- a/chromium/v8/src/parser.h
+++ b/chromium/v8/src/parser.h
@@ -47,24 +47,6 @@ class Target;
template <typename T> class ZoneListWrapper;
-class ParserMessage : public Malloced {
- public:
- ParserMessage(Scanner::Location loc, const char* message,
- Vector<const char*> args)
- : loc_(loc),
- message_(message),
- args_(args) { }
- ~ParserMessage();
- Scanner::Location location() { return loc_; }
- const char* message() { return message_; }
- Vector<const char*> args() { return args_; }
- private:
- Scanner::Location loc_;
- const char* message_;
- Vector<const char*> args_;
-};
-
-
class FunctionEntry BASE_EMBEDDED {
public:
enum {
@@ -425,7 +407,7 @@ class RegExpParser BASE_EMBEDDED {
// Forward declaration.
class SingletonLogger;
-class Parser BASE_EMBEDDED {
+class Parser : public ParserBase {
public:
explicit Parser(CompilationInfo* info);
~Parser() {
@@ -433,44 +415,12 @@ class Parser BASE_EMBEDDED {
reusable_preparser_ = NULL;
}
- bool allow_natives_syntax() const { return allow_natives_syntax_; }
- bool allow_lazy() const { return allow_lazy_; }
- bool allow_modules() { return scanner().HarmonyModules(); }
- bool allow_harmony_scoping() { return scanner().HarmonyScoping(); }
- bool allow_generators() const { return allow_generators_; }
- bool allow_for_of() const { return allow_for_of_; }
- bool allow_harmony_numeric_literals() {
- return scanner().HarmonyNumericLiterals();
- }
-
- void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
- void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
- void set_allow_modules(bool allow) { scanner().SetHarmonyModules(allow); }
- void set_allow_harmony_scoping(bool allow) {
- scanner().SetHarmonyScoping(allow);
- }
- void set_allow_generators(bool allow) { allow_generators_ = allow; }
- void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
- void set_allow_harmony_numeric_literals(bool allow) {
- scanner().SetHarmonyNumericLiterals(allow);
- }
-
// Parses the source code represented by the compilation info and sets its
// function literal. Returns false (and deallocates any allocated AST
// nodes) if parsing failed.
static bool Parse(CompilationInfo* info) { return Parser(info).Parse(); }
bool Parse();
- // Returns NULL if parsing failed.
- FunctionLiteral* ParseProgram();
-
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<const char*> args);
- void ReportMessageAt(Scanner::Location loc,
- const char* message,
- Vector<Handle<String> > args);
-
private:
static const int kMaxNumFunctionLocals = 131071; // 2^17-1
@@ -568,6 +518,9 @@ class Parser BASE_EMBEDDED {
Mode old_mode_;
};
+ // Returns NULL if parsing failed.
+ FunctionLiteral* ParseProgram();
+
FunctionLiteral* ParseLazy();
FunctionLiteral* ParseLazy(Utf16CharacterStream* source);
@@ -584,6 +537,15 @@ class Parser BASE_EMBEDDED {
void ReportInvalidPreparseData(Handle<String> name, bool* ok);
void ReportMessage(const char* message, Vector<const char*> args);
void ReportMessage(const char* message, Vector<Handle<String> > args);
+ void ReportMessageAt(Scanner::Location location, const char* type) {
+ ReportMessageAt(location, type, Vector<const char*>::empty());
+ }
+ void ReportMessageAt(Scanner::Location loc,
+ const char* message,
+ Vector<const char*> args);
+ void ReportMessageAt(Scanner::Location loc,
+ const char* message,
+ Vector<Handle<String> > args);
void set_pre_parse_data(ScriptDataImpl *data) {
pre_parse_data_ = data;
@@ -671,28 +633,8 @@ class Parser BASE_EMBEDDED {
Expression* ParsePrimaryExpression(bool* ok);
Expression* ParseArrayLiteral(bool* ok);
Expression* ParseObjectLiteral(bool* ok);
- ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
- // Populate the constant properties fixed array for a materialized object
- // literal.
- void BuildObjectLiteralConstantProperties(
- ZoneList<ObjectLiteral::Property*>* properties,
- Handle<FixedArray> constants,
- bool* is_simple,
- bool* fast_elements,
- int* depth,
- bool* may_store_doubles);
-
- // Decide if a property should be in the object boilerplate.
- bool IsBoilerplateProperty(ObjectLiteral::Property* property);
- // If the expression is a literal, return the literal value;
- // if the expression is a materialized literal and is simple return a
- // compile time value as encoded by CompileTimeValue::GetValue().
- // Otherwise, return undefined literal as the placeholder
- // in the object literal boilerplate.
- Handle<Object> GetBoilerplateValue(Expression* expression);
-
// Initialize the components of a for-in / for-of statement.
void InitializeForEachStatement(ForEachStatement* stmt,
Expression* each,
@@ -711,40 +653,10 @@ class Parser BASE_EMBEDDED {
// Magical syntax support.
Expression* ParseV8Intrinsic(bool* ok);
- INLINE(Token::Value peek()) {
- if (stack_overflow_) return Token::ILLEGAL;
- return scanner().peek();
- }
-
- INLINE(Token::Value Next()) {
- // BUG 1215673: Find a thread safe way to set a stack limit in
- // pre-parse mode. Otherwise, we cannot safely pre-parse from other
- // threads.
- if (stack_overflow_) {
- return Token::ILLEGAL;
- }
- if (StackLimitCheck(isolate()).HasOverflowed()) {
- // Any further calls to Next or peek will return the illegal token.
- // The current call must return the next token, which might already
- // have been peek'ed.
- stack_overflow_ = true;
- }
- return scanner().Next();
- }
-
bool is_generator() const { return current_function_state_->is_generator(); }
bool CheckInOrOf(bool accept_OF, ForEachStatement::VisitMode* visit_mode);
- bool peek_any_identifier();
-
- INLINE(void Consume(Token::Value token));
- void Expect(Token::Value token, bool* ok);
- bool Check(Token::Value token);
- void ExpectSemicolon(bool* ok);
- bool CheckContextualKeyword(Vector<const char> keyword);
- void ExpectContextualKeyword(Vector<const char> keyword, bool* ok);
-
Handle<String> LiteralString(PretenureFlag tenured) {
if (scanner().is_literal_ascii()) {
return isolate_->factory()->NewStringFromAscii(
@@ -768,8 +680,8 @@ class Parser BASE_EMBEDDED {
Handle<String> GetSymbol();
// Get odd-ball literals.
- Literal* GetLiteralUndefined();
- Literal* GetLiteralTheHole();
+ Literal* GetLiteralUndefined(int position);
+ Literal* GetLiteralTheHole(int position);
Handle<String> ParseIdentifier(bool* ok);
Handle<String> ParseIdentifierOrStrictReservedWord(
@@ -789,9 +701,6 @@ class Parser BASE_EMBEDDED {
const char* error,
bool* ok);
- // Strict mode octal literal validation.
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
-
// For harmony block scoping mode: Check if the scope has conflicting var/let
// declarations from different scopes. It covers for example
//
@@ -842,7 +751,7 @@ class Parser BASE_EMBEDDED {
Handle<String> type,
Vector< Handle<Object> > arguments);
- preparser::PreParser::PreParseResult LazyParseFunctionLiteral(
+ PreParser::PreParseResult LazyParseFunctionLiteral(
SingletonLogger* logger);
AstNodeFactory<AstConstructionVisitor>* factory() {
@@ -854,7 +763,7 @@ class Parser BASE_EMBEDDED {
Handle<Script> script_;
Scanner scanner_;
- preparser::PreParser* reusable_preparser_;
+ PreParser* reusable_preparser_;
Scope* top_scope_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
FunctionState* current_function_state_;
@@ -864,11 +773,6 @@ class Parser BASE_EMBEDDED {
FuncNameInferrer* fni_;
Mode mode_;
- bool allow_natives_syntax_;
- bool allow_lazy_;
- bool allow_generators_;
- bool allow_for_of_;
- bool stack_overflow_;
// If true, the next (and immediately following) function literal is
// preceded by a parenthesis.
// Heuristically that means that the function will be called immediately,
diff --git a/chromium/v8/src/platform-cygwin.cc b/chromium/v8/src/platform-cygwin.cc
index 4d3b1e313e6..0076d567f8b 100644
--- a/chromium/v8/src/platform-cygwin.cc
+++ b/chromium/v8/src/platform-cygwin.cc
@@ -41,7 +41,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "simulator.h"
#include "v8threads.h"
@@ -88,11 +87,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -205,12 +199,6 @@ void OS::SignalCodeMovingGC() {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // Not supported on Cygwin.
- return 0;
-}
-
-
// The VirtualMemory implementation is taken from platform-win32.cc.
// The mmap-based virtual memory implementation as it is used on most posix
// platforms does not work well because Cygwin does not support MAP_FIXED.
diff --git a/chromium/v8/src/platform-freebsd.cc b/chromium/v8/src/platform-freebsd.cc
index d81827805a4..75d88ec5d33 100644
--- a/chromium/v8/src/platform-freebsd.cc
+++ b/chromium/v8/src/platform-freebsd.cc
@@ -43,7 +43,6 @@
#include <sys/fcntl.h> // open
#include <unistd.h> // getpagesize
// If you don't have execinfo.h then you need devel/libexecinfo from ports.
-#include <execinfo.h> // backtrace, backtrace_symbols
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
@@ -54,7 +53,6 @@
#include "v8.h"
#include "v8threads.h"
-#include "platform-posix.h"
#include "platform.h"
#include "vm-state-inl.h"
@@ -97,11 +95,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::DumpBacktrace() {
- POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -189,7 +182,7 @@ void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
- LOG(isolate SharedLibraryEvent(start_of_path, start, end));
+ LOG(isolate, SharedLibraryEvent(start_of_path, start, end));
}
close(fd);
}
@@ -199,10 +192,6 @@ void OS::SignalCodeMovingGC() {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
-}
-
// Constants used for mmap.
static const int kMmapFd = -1;
diff --git a/chromium/v8/src/platform-linux.cc b/chromium/v8/src/platform-linux.cc
index b8b96025e1f..eb2d10b3f9d 100644
--- a/chromium/v8/src/platform-linux.cc
+++ b/chromium/v8/src/platform-linux.cc
@@ -38,11 +38,6 @@
#include <sys/types.h>
#include <stdlib.h>
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
-#include <execinfo.h>
-#include <cxxabi.h>
-#endif
-
// Ubuntu Dapper requires memory pages to be marked as
// executable. Otherwise, OS raises an exception when executing code
// in that page.
@@ -66,7 +61,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -154,14 +148,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::DumpBacktrace() {
- // backtrace is a glibc extension.
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
- POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
-#endif
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -313,16 +299,6 @@ void OS::SignalCodeMovingGC() {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // backtrace is a glibc extension.
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
- return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
-#else
- return 0;
-#endif
-}
-
-
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
diff --git a/chromium/v8/src/platform-macos.cc b/chromium/v8/src/platform-macos.cc
index 67cc96f9379..5ffc3fc54c4 100644
--- a/chromium/v8/src/platform-macos.cc
+++ b/chromium/v8/src/platform-macos.cc
@@ -53,27 +53,15 @@
#include <stdlib.h>
#include <string.h>
#include <errno.h>
-#include <cxxabi.h>
#undef MAP_TYPE
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "simulator.h"
#include "vm-state-inl.h"
-// Manually define these here as weak imports, rather than including execinfo.h.
-// This lets us launch on 10.4 which does not have these calls.
-extern "C" {
- extern int backtrace(void**, int) __attribute__((weak_import));
- extern char** backtrace_symbols(void* const*, int)
- __attribute__((weak_import));
- extern void backtrace_symbols_fd(void* const*, int, int)
- __attribute__((weak_import));
-}
-
namespace v8 {
namespace internal {
@@ -107,14 +95,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::DumpBacktrace() {
- // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
- if (backtrace == NULL) return;
-
- POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -220,14 +200,6 @@ double OS::LocalTimeOffset() {
}
-int OS::StackWalk(Vector<StackFrame> frames) {
- // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
- if (backtrace == NULL) return 0;
-
- return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
-}
-
-
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
diff --git a/chromium/v8/src/platform-openbsd.cc b/chromium/v8/src/platform-openbsd.cc
index 30a484f4b30..710c3904afb 100644
--- a/chromium/v8/src/platform-openbsd.cc
+++ b/chromium/v8/src/platform-openbsd.cc
@@ -42,7 +42,6 @@
#include <sys/stat.h> // open
#include <fcntl.h> // open
#include <unistd.h> // sysconf
-#include <execinfo.h> // backtrace, backtrace_symbols
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
@@ -51,7 +50,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -96,11 +94,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -231,34 +224,6 @@ void OS::SignalCodeMovingGC() {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // backtrace is a glibc extension.
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-}
-
// Constants used for mmap.
static const int kMmapFd = -1;
diff --git a/chromium/v8/src/platform-posix.cc b/chromium/v8/src/platform-posix.cc
index fe27eaf71f4..879dcc81484 100644
--- a/chromium/v8/src/platform-posix.cc
+++ b/chromium/v8/src/platform-posix.cc
@@ -29,8 +29,6 @@
// own but contains the parts which are the same across POSIX platforms Linux,
// Mac OS, FreeBSD and OpenBSD.
-#include "platform-posix.h"
-
#include <dlfcn.h>
#include <pthread.h>
#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
@@ -102,6 +100,48 @@ intptr_t OS::MaxVirtualMemory() {
}
+uint64_t OS::TotalPhysicalMemory() {
+#if V8_OS_MACOSX
+ int mib[2];
+ mib[0] = CTL_HW;
+ mib[1] = HW_MEMSIZE;
+ int64_t size = 0;
+ size_t len = sizeof(size);
+ if (sysctl(mib, 2, &size, &len, NULL, 0) != 0) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(size);
+#elif V8_OS_FREEBSD
+ int pages, page_size;
+ size_t size = sizeof(pages);
+ sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
+ sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
+ if (pages == -1 || page_size == -1) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(pages) * page_size;
+#elif V8_OS_CYGWIN
+ MEMORYSTATUS memory_info;
+ memory_info.dwLength = sizeof(memory_info);
+ if (!GlobalMemoryStatus(&memory_info)) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(memory_info.dwTotalPhys);
+#else
+ intptr_t pages = sysconf(_SC_PHYS_PAGES);
+ intptr_t page_size = sysconf(_SC_PAGESIZE);
+ if (pages == -1 || page_size == -1) {
+ UNREACHABLE();
+ return 0;
+ }
+ return static_cast<uint64_t>(pages) * page_size;
+#endif
+}
+
+
int OS::ActivationFrameAlignment() {
#if V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
@@ -248,12 +288,6 @@ void OS::DebugBreak() {
// ----------------------------------------------------------------------------
// Math functions
-double ceiling(double x) {
- // Correct buggy 'ceil' on some systems (i.e. FreeBSD, OS X 10.5)
- return (-1.0 < x && x < 0.0) ? -0.0 : ceil(x);
-}
-
-
double modulo(double x, double y) {
return fmod(x, y);
}
@@ -524,6 +558,9 @@ Thread::Thread(const Options& options)
: data_(new PlatformData),
stack_size_(options.stack_size()),
start_semaphore_(NULL) {
+ if (stack_size_ > 0 && stack_size_ < PTHREAD_STACK_MIN) {
+ stack_size_ = PTHREAD_STACK_MIN;
+ }
set_name(options.name());
}
diff --git a/chromium/v8/src/platform-posix.h b/chromium/v8/src/platform-posix.h
deleted file mode 100644
index 6b73387cd79..00000000000
--- a/chromium/v8/src/platform-posix.h
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PLATFORM_POSIX_H_
-#define V8_PLATFORM_POSIX_H_
-
-#if !defined(ANDROID)
-#include <cxxabi.h>
-#endif
-#include <stdio.h>
-
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-// Used by platform implementation files during OS::DumpBacktrace()
-// and OS::StackWalk().
-template<int (*backtrace)(void**, int),
- char** (*backtrace_symbols)(void* const*, int)>
-struct POSIXBacktraceHelper {
- static void DumpBacktrace() {
- void* trace[100];
- int size = backtrace(trace, ARRAY_SIZE(trace));
- char** symbols = backtrace_symbols(trace, size);
- fprintf(stderr, "\n==== C stack trace ===============================\n\n");
- if (size == 0) {
- fprintf(stderr, "(empty)\n");
- } else if (symbols == NULL) {
- fprintf(stderr, "(no symbols)\n");
- } else {
- for (int i = 1; i < size; ++i) {
- fprintf(stderr, "%2d: ", i);
- char mangled[201];
- if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {// NOLINT
- char* demangled = NULL;
-#if !defined(ANDROID)
- int status;
- size_t length;
- demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
-#endif
- fprintf(stderr, "%s\n", demangled != NULL ? demangled : mangled);
- free(demangled);
- } else {
- fprintf(stderr, "??\n");
- }
- }
- }
- fflush(stderr);
- free(symbols);
- }
-
- static int StackWalk(Vector<OS::StackFrame> frames) {
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return OS::kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- OS::SNPrintF(MutableCStrVector(frames[i].text, OS::kStackWalkMaxTextLen),
- "%s", symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_POSIX_H_
diff --git a/chromium/v8/src/platform-solaris.cc b/chromium/v8/src/platform-solaris.cc
index f082af12540..a0590cbecb8 100644
--- a/chromium/v8/src/platform-solaris.cc
+++ b/chromium/v8/src/platform-solaris.cc
@@ -51,7 +51,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -112,11 +111,6 @@ void* OS::Allocate(const size_t requested,
}
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -211,20 +205,6 @@ static int StackWalkCallback(uintptr_t pc, int signo, void* data) {
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- ucontext_t ctx;
- struct StackWalker walker = { frames, 0 };
-
- if (getcontext(&ctx) < 0) return kStackWalkError;
-
- if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
- return kStackWalkError;
- }
-
- return walker.index;
-}
-
-
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
diff --git a/chromium/v8/src/platform-win32.cc b/chromium/v8/src/platform-win32.cc
index ea4f7ea11f4..ea11806cb41 100644
--- a/chromium/v8/src/platform-win32.cc
+++ b/chromium/v8/src/platform-win32.cc
@@ -133,11 +133,6 @@ intptr_t OS::MaxVirtualMemory() {
}
-double ceiling(double x) {
- return ceil(x);
-}
-
-
#if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
@@ -240,12 +235,16 @@ void MathSetup() {
class Win32Time {
public:
// Constructors.
+ Win32Time();
explicit Win32Time(double jstime);
Win32Time(int year, int mon, int day, int hour, int min, int sec);
// Convert timestamp to JavaScript representation.
double ToJSTime();
+ // Set timestamp to current time.
+ void SetToCurrentTime();
+
// Returns the local timezone offset in milliseconds east of UTC. This is
// the number of milliseconds you must add to UTC to get local time, i.e.
// LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
@@ -314,6 +313,12 @@ char Win32Time::std_tz_name_[kTzNameSize];
char Win32Time::dst_tz_name_[kTzNameSize];
+// Initialize timestamp to start of epoc.
+Win32Time::Win32Time() {
+ t() = 0;
+}
+
+
// Initialize timestamp from a JavaScript timestamp.
Win32Time::Win32Time(double jstime) {
t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
@@ -340,6 +345,62 @@ double Win32Time::ToJSTime() {
}
+// Set timestamp to current time.
+void Win32Time::SetToCurrentTime() {
+ // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
+ // Because we're fast, we like fast timers which have at least a
+ // 1ms resolution.
+ //
+ // timeGetTime() provides 1ms granularity when combined with
+ // timeBeginPeriod(). If the host application for v8 wants fast
+ // timers, it can use timeBeginPeriod to increase the resolution.
+ //
+ // Using timeGetTime() has a drawback because it is a 32bit value
+ // and hence rolls-over every ~49days.
+ //
+ // To use the clock, we use GetSystemTimeAsFileTime as our base;
+ // and then use timeGetTime to extrapolate current time from the
+ // start time. To deal with rollovers, we resync the clock
+ // any time when more than kMaxClockElapsedTime has passed or
+ // whenever timeGetTime creates a rollover.
+
+ static bool initialized = false;
+ static TimeStamp init_time;
+ static DWORD init_ticks;
+ static const int64_t kHundredNanosecondsPerSecond = 10000000;
+ static const int64_t kMaxClockElapsedTime =
+ 60*kHundredNanosecondsPerSecond; // 1 minute
+
+ // If we are uninitialized, we need to resync the clock.
+ bool needs_resync = !initialized;
+
+ // Get the current time.
+ TimeStamp time_now;
+ GetSystemTimeAsFileTime(&time_now.ft_);
+ DWORD ticks_now = timeGetTime();
+
+ // Check if we need to resync due to clock rollover.
+ needs_resync |= ticks_now < init_ticks;
+
+ // Check if we need to resync due to elapsed time.
+ needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
+
+ // Check if we need to resync due to backwards time change.
+ needs_resync |= time_now.t_ < init_time.t_;
+
+ // Resync the clock if necessary.
+ if (needs_resync) {
+ GetSystemTimeAsFileTime(&init_time.ft_);
+ init_ticks = ticks_now = timeGetTime();
+ initialized = true;
+ }
+
+ // Finally, compute the actual time. Why is this so hard.
+ DWORD elapsed = ticks_now - init_ticks;
+ this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
+}
+
+
// Guess the name of the timezone from the bias.
// The guess is very biased towards the northern hemisphere.
const char* Win32Time::GuessTimezoneNameFromBias(int bias) {
@@ -891,11 +952,6 @@ void OS::DebugBreak() {
}
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
class Win32MemoryMappedFile : public OS::MemoryMappedFile {
public:
Win32MemoryMappedFile(HANDLE file,
@@ -1208,133 +1264,21 @@ void OS::SignalCodeMovingGC() {
}
-// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
-
-// Switch off warning 4748 (/GS can not protect parameters and local variables
-// from local buffer overrun because optimizations are disabled in function) as
-// it is triggered by the use of inline assembler.
-#pragma warning(push)
-#pragma warning(disable : 4748)
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- BOOL ok;
-
- // Load the required functions from DLL's.
- if (!LoadDbgHelpAndTlHelp32()) return kStackWalkError;
-
- // Get the process and thread handles.
- HANDLE process_handle = GetCurrentProcess();
- HANDLE thread_handle = GetCurrentThread();
-
- // Read the symbols.
- if (!LoadSymbols(Isolate::Current(), process_handle)) return kStackWalkError;
-
- // Capture current context.
- CONTEXT context;
- RtlCaptureContext(&context);
-
- // Initialize the stack walking
- STACKFRAME64 stack_frame;
- memset(&stack_frame, 0, sizeof(stack_frame));
-#ifdef _WIN64
- stack_frame.AddrPC.Offset = context.Rip;
- stack_frame.AddrFrame.Offset = context.Rbp;
- stack_frame.AddrStack.Offset = context.Rsp;
-#else
- stack_frame.AddrPC.Offset = context.Eip;
- stack_frame.AddrFrame.Offset = context.Ebp;
- stack_frame.AddrStack.Offset = context.Esp;
-#endif
- stack_frame.AddrPC.Mode = AddrModeFlat;
- stack_frame.AddrFrame.Mode = AddrModeFlat;
- stack_frame.AddrStack.Mode = AddrModeFlat;
- int frames_count = 0;
-
- // Collect stack frames.
- int frames_size = frames.length();
- while (frames_count < frames_size) {
- ok = _StackWalk64(
- IMAGE_FILE_MACHINE_I386, // MachineType
- process_handle, // hProcess
- thread_handle, // hThread
- &stack_frame, // StackFrame
- &context, // ContextRecord
- NULL, // ReadMemoryRoutine
- _SymFunctionTableAccess64, // FunctionTableAccessRoutine
- _SymGetModuleBase64, // GetModuleBaseRoutine
- NULL); // TranslateAddress
- if (!ok) break;
-
- // Store the address.
- ASSERT((stack_frame.AddrPC.Offset >> 32) == 0); // 32-bit address.
- frames[frames_count].address =
- reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
-
- // Try to locate a symbol for this frame.
- DWORD64 symbol_displacement;
- SmartArrayPointer<IMAGEHLP_SYMBOL64> symbol(
- NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen));
- if (symbol.is_empty()) return kStackWalkError; // Out of memory.
- memset(*symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
- (*symbol)->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
- (*symbol)->MaxNameLength = kStackWalkMaxNameLen;
- ok = _SymGetSymFromAddr64(process_handle, // hProcess
- stack_frame.AddrPC.Offset, // Address
- &symbol_displacement, // Displacement
- *symbol); // Symbol
- if (ok) {
- // Try to locate more source information for the symbol.
- IMAGEHLP_LINE64 Line;
- memset(&Line, 0, sizeof(Line));
- Line.SizeOfStruct = sizeof(Line);
- DWORD line_displacement;
- ok = _SymGetLineFromAddr64(
- process_handle, // hProcess
- stack_frame.AddrPC.Offset, // dwAddr
- &line_displacement, // pdwDisplacement
- &Line); // Line
- // Format a text representation of the frame based on the information
- // available.
- if (ok) {
- SNPrintF(MutableCStrVector(frames[frames_count].text,
- kStackWalkMaxTextLen),
- "%s %s:%d:%d",
- (*symbol)->Name, Line.FileName, Line.LineNumber,
- line_displacement);
- } else {
- SNPrintF(MutableCStrVector(frames[frames_count].text,
- kStackWalkMaxTextLen),
- "%s",
- (*symbol)->Name);
- }
- // Make sure line termination is in place.
- frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0';
- } else {
- // No text representation of this frame
- frames[frames_count].text[0] = '\0';
-
- // Continue if we are just missing a module (for non C/C++ frames a
- // module will never be found).
- int err = GetLastError();
- if (err != ERROR_MOD_NOT_FOUND) {
- break;
- }
- }
-
- frames_count++;
+uint64_t OS::TotalPhysicalMemory() {
+ MEMORYSTATUSEX memory_info;
+ memory_info.dwLength = sizeof(memory_info);
+ if (!GlobalMemoryStatusEx(&memory_info)) {
+ UNREACHABLE();
+ return 0;
}
- // Return the number of frames filled in.
- return frames_count;
+ return static_cast<uint64_t>(memory_info.ullTotalPhys);
}
-// Restore warnings to previous settings.
-#pragma warning(pop)
-
#else // __MINGW32__
void OS::LogSharedLibraryAddresses(Isolate* isolate) { }
void OS::SignalCodeMovingGC() { }
-int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
#endif // __MINGW32__
diff --git a/chromium/v8/src/platform.h b/chromium/v8/src/platform.h
index ee8fb92910b..3bd87a98326 100644
--- a/chromium/v8/src/platform.h
+++ b/chromium/v8/src/platform.h
@@ -67,6 +67,8 @@ int signbit(double x);
int strncasecmp(const char* s1, const char* s2, int n);
+// Visual C++ 2013 and higher implement this function.
+#if (_MSC_VER < 1800)
inline int lrint(double flt) {
int intgr;
#if V8_TARGET_ARCH_IA32
@@ -84,12 +86,13 @@ inline int lrint(double flt) {
return intgr;
}
+#endif // _MSC_VER < 1800
+
#endif // V8_CC_MSVC
namespace v8 {
namespace internal {
-double ceiling(double x);
double modulo(double x, double y);
// Custom implementation of math functions.
@@ -252,9 +255,6 @@ class OS {
// Debug break.
static void DebugBreak();
- // Dump C++ current stack trace (only functional on Linux).
- static void DumpBacktrace();
-
// Walk the stack.
static const int kStackWalkError = -1;
static const int kStackWalkMaxNameLen = 256;
@@ -264,8 +264,6 @@ class OS {
char text[kStackWalkMaxTextLen];
};
- static int StackWalk(Vector<StackFrame> frames);
-
class MemoryMappedFile {
public:
static MemoryMappedFile* open(const char* name);
@@ -303,6 +301,9 @@ class OS {
// positions indicated by the members of the CpuFeature enum from globals.h
static uint64_t CpuFeaturesImpliedByPlatform();
+ // The total amount of physical memory available on the current system.
+ static uint64_t TotalPhysicalMemory();
+
// Maximum size of the virtual memory. 0 means there is no artificial
// limit.
static intptr_t MaxVirtualMemory();
diff --git a/chromium/v8/src/platform/elapsed-timer.h b/chromium/v8/src/platform/elapsed-timer.h
index 2311db2f524..b61b007605b 100644
--- a/chromium/v8/src/platform/elapsed-timer.h
+++ b/chromium/v8/src/platform/elapsed-timer.h
@@ -28,8 +28,8 @@
#ifndef V8_PLATFORM_ELAPSED_TIMER_H_
#define V8_PLATFORM_ELAPSED_TIMER_H_
-#include "checks.h"
-#include "platform/time.h"
+#include "../checks.h"
+#include "time.h"
namespace v8 {
namespace internal {
@@ -104,7 +104,7 @@ class ElapsedTimer V8_FINAL BASE_EMBEDDED {
private:
static V8_INLINE TimeTicks Now() {
- TimeTicks now = TimeTicks::HighResNow();
+ TimeTicks now = TimeTicks::HighResolutionNow();
ASSERT(!now.IsNull());
return now;
}
diff --git a/chromium/v8/src/platform/mutex.h b/chromium/v8/src/platform/mutex.h
index 0f899ca5976..125e9d4860f 100644
--- a/chromium/v8/src/platform/mutex.h
+++ b/chromium/v8/src/platform/mutex.h
@@ -28,9 +28,9 @@
#ifndef V8_PLATFORM_MUTEX_H_
#define V8_PLATFORM_MUTEX_H_
-#include "lazy-instance.h"
+#include "../lazy-instance.h"
#if V8_OS_WIN
-#include "win32-headers.h"
+#include "../win32-headers.h"
#endif
#if V8_OS_POSIX
diff --git a/chromium/v8/src/platform/semaphore.h b/chromium/v8/src/platform/semaphore.h
index 2cfa1421117..0babe5fd659 100644
--- a/chromium/v8/src/platform/semaphore.h
+++ b/chromium/v8/src/platform/semaphore.h
@@ -28,9 +28,9 @@
#ifndef V8_PLATFORM_SEMAPHORE_H_
#define V8_PLATFORM_SEMAPHORE_H_
-#include "lazy-instance.h"
+#include "../lazy-instance.h"
#if V8_OS_WIN
-#include "win32-headers.h"
+#include "../win32-headers.h"
#endif
#if V8_OS_MACOSX
diff --git a/chromium/v8/src/platform/time.cc b/chromium/v8/src/platform/time.cc
index ea6dd2c0bae..de0ca16473f 100644
--- a/chromium/v8/src/platform/time.cc
+++ b/chromium/v8/src/platform/time.cc
@@ -43,13 +43,6 @@
#include "win32-headers.h"
#endif
-#if V8_OS_WIN
-// Prototype for GetTickCount64() procedure.
-extern "C" {
-typedef ULONGLONG (WINAPI *GETTICKCOUNT64PROC)(void);
-}
-#endif
-
namespace v8 {
namespace internal {
@@ -175,43 +168,43 @@ struct timespec TimeDelta::ToTimespec() const {
// periodically resync the internal clock to the system clock.
class Clock V8_FINAL {
public:
- Clock() : initial_time_(CurrentWallclockTime()),
- initial_ticks_(TimeTicks::Now()) {}
+ Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
Time Now() {
- // This must be executed under lock.
- LockGuard<Mutex> lock_guard(&mutex_);
+ // Time between resampling the un-granular clock for this API (1 minute).
+ const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
- // Calculate the time elapsed since we started our timer.
- TimeDelta elapsed = TimeTicks::Now() - initial_ticks_;
+ LockGuard<Mutex> lock_guard(&mutex_);
- // Check if we don't need to synchronize with the wallclock yet.
- if (elapsed.InMicroseconds() <= kMaxMicrosecondsToAvoidDrift) {
- return initial_time_ + elapsed;
+ // Determine current time and ticks.
+ TimeTicks ticks = GetSystemTicks();
+ Time time = GetSystemTime();
+
+ // Check if we need to synchronize with the system clock due to a backwards
+ // time change or the amount of time elapsed.
+ TimeDelta elapsed = ticks - initial_ticks_;
+ if (time < initial_time_ || elapsed > kMaxElapsedTime) {
+ initial_ticks_ = ticks;
+ initial_time_ = time;
+ return time;
}
- // Resynchronize with the wallclock.
- initial_ticks_ = TimeTicks::Now();
- initial_time_ = CurrentWallclockTime();
- return initial_time_;
+ return initial_time_ + elapsed;
}
Time NowFromSystemTime() {
- // This must be executed under lock.
LockGuard<Mutex> lock_guard(&mutex_);
-
- // Resynchronize with the wallclock.
- initial_ticks_ = TimeTicks::Now();
- initial_time_ = CurrentWallclockTime();
+ initial_ticks_ = GetSystemTicks();
+ initial_time_ = GetSystemTime();
return initial_time_;
}
private:
- // Time between resampling the un-granular clock for this API (1 minute).
- static const int64_t kMaxMicrosecondsToAvoidDrift =
- Time::kMicrosecondsPerMinute;
+ static TimeTicks GetSystemTicks() {
+ return TimeTicks::Now();
+ }
- static Time CurrentWallclockTime() {
+ static Time GetSystemTime() {
FILETIME ft;
::GetSystemTimeAsFileTime(&ft);
return Time::FromFiletime(ft);
@@ -223,9 +216,9 @@ class Clock V8_FINAL {
};
-static LazyDynamicInstance<Clock,
- DefaultCreateTrait<Clock>,
- ThreadSafeInitOnceTrait>::type clock = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+static LazyStaticInstance<Clock,
+ DefaultConstructTrait<Clock>,
+ ThreadSafeInitOnceTrait>::type clock = LAZY_STATIC_INSTANCE_INITIALIZER;
Time Time::Now() {
@@ -388,6 +381,7 @@ class TickClock {
public:
virtual ~TickClock() {}
virtual int64_t Now() = 0;
+ virtual bool IsHighResolution() = 0;
};
@@ -440,42 +434,24 @@ class HighResolutionTickClock V8_FINAL : public TickClock {
int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
- // Make sure we never return 0 here, so that TimeTicks::HighResNow()
+ // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
// will never return 0.
return ticks + 1;
}
- private:
- int64_t ticks_per_second_;
-};
-
-
-// The GetTickCount64() API is what we actually want for the regular tick
-// clock, but this is only available starting with Windows Vista.
-class WindowsVistaTickClock V8_FINAL : public TickClock {
- public:
- explicit WindowsVistaTickClock(GETTICKCOUNT64PROC func) : func_(func) {
- ASSERT(func_ != NULL);
- }
- virtual ~WindowsVistaTickClock() {}
-
- virtual int64_t Now() V8_OVERRIDE {
- // Query the current ticks (in ms).
- ULONGLONG tick_count_ms = (*func_)();
-
- // Convert to microseconds (make sure to never return 0 here).
- return (tick_count_ms * Time::kMicrosecondsPerMillisecond) + 1;
+ virtual bool IsHighResolution() V8_OVERRIDE {
+ return true;
}
private:
- GETTICKCOUNT64PROC func_;
+ int64_t ticks_per_second_;
};
class RolloverProtectedTickClock V8_FINAL : public TickClock {
public:
// We initialize rollover_ms_ to 1 to ensure that we will never
- // return 0 from TimeTicks::HighResNow() and TimeTicks::Now() below.
+ // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below.
RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
virtual ~RolloverProtectedTickClock() {}
@@ -487,6 +463,9 @@ class RolloverProtectedTickClock V8_FINAL : public TickClock {
// Note that we do not use GetTickCount() here, since timeGetTime() gives
// more predictable delta values, as described here:
// http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
+ // timeGetTime() provides 1ms granularity when combined with
+ // timeBeginPeriod(). If the host application for V8 wants fast timers, it
+ // can use timeBeginPeriod() to increase the resolution.
DWORD now = timeGetTime();
if (now < last_seen_now_) {
rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days.
@@ -495,6 +474,10 @@ class RolloverProtectedTickClock V8_FINAL : public TickClock {
return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
}
+ virtual bool IsHighResolution() V8_OVERRIDE {
+ return false;
+ }
+
private:
Mutex mutex_;
DWORD last_seen_now_;
@@ -502,27 +485,10 @@ class RolloverProtectedTickClock V8_FINAL : public TickClock {
};
-struct CreateTickClockTrait {
- static TickClock* Create() {
- // Try to load GetTickCount64() from kernel32.dll (available since Vista).
- HMODULE kernel32 = ::GetModuleHandleA("kernel32.dll");
- ASSERT(kernel32 != NULL);
- FARPROC proc = ::GetProcAddress(kernel32, "GetTickCount64");
- if (proc != NULL) {
- return new WindowsVistaTickClock(
- reinterpret_cast<GETTICKCOUNT64PROC>(proc));
- }
-
- // Fallback to the rollover protected tick clock.
- return new RolloverProtectedTickClock;
- }
-};
-
-
-static LazyDynamicInstance<TickClock,
- CreateTickClockTrait,
+static LazyStaticInstance<RolloverProtectedTickClock,
+ DefaultConstructTrait<RolloverProtectedTickClock>,
ThreadSafeInitOnceTrait>::type tick_clock =
- LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+ LAZY_STATIC_INSTANCE_INITIALIZER;
struct CreateHighResTickClockTrait {
@@ -560,21 +526,27 @@ TimeTicks TimeTicks::Now() {
}
-TimeTicks TimeTicks::HighResNow() {
+TimeTicks TimeTicks::HighResolutionNow() {
// Make sure we never return 0 here.
TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
ASSERT(!ticks.IsNull());
return ticks;
}
+
+// static
+bool TimeTicks::IsHighResolutionClockWorking() {
+ return high_res_tick_clock.Pointer()->IsHighResolution();
+}
+
#else // V8_OS_WIN
TimeTicks TimeTicks::Now() {
- return HighResNow();
+ return HighResolutionNow();
}
-TimeTicks TimeTicks::HighResNow() {
+TimeTicks TimeTicks::HighResolutionNow() {
int64_t ticks;
#if V8_OS_MACOSX
static struct mach_timebase_info info;
@@ -608,6 +580,12 @@ TimeTicks TimeTicks::HighResNow() {
return TimeTicks(ticks + 1);
}
+
+// static
+bool TimeTicks::IsHighResolutionClockWorking() {
+ return true;
+}
+
#endif // V8_OS_WIN
} } // namespace v8::internal
diff --git a/chromium/v8/src/platform/time.h b/chromium/v8/src/platform/time.h
index 2ce6cdd3e99..877e0203bb5 100644
--- a/chromium/v8/src/platform/time.h
+++ b/chromium/v8/src/platform/time.h
@@ -31,7 +31,7 @@
#include <ctime>
#include <limits>
-#include "allocation.h"
+#include "../allocation.h"
// Forward declarations.
extern "C" {
@@ -333,7 +333,10 @@ class TimeTicks V8_FINAL BASE_EMBEDDED {
// resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
// SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
// This method never returns a null TimeTicks.
- static TimeTicks HighResNow();
+ static TimeTicks HighResolutionNow();
+
+ // Returns true if the high-resolution clock is working on this system.
+ static bool IsHighResolutionClockWorking();
// Returns true if this object has not been initialized.
bool IsNull() const { return ticks_ == 0; }
diff --git a/chromium/v8/src/preparser-api.cc b/chromium/v8/src/preparser-api.cc
deleted file mode 100644
index 462dfe2290f..00000000000
--- a/chromium/v8/src/preparser-api.cc
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef _MSC_VER
-#define V8_WIN32_LEAN_AND_MEAN
-#include "win32-headers.h"
-#endif
-
-#include "../include/v8-preparser.h"
-
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-#include "list.h"
-#include "hashmap.h"
-#include "preparse-data-format.h"
-#include "preparse-data.h"
-#include "preparser.h"
-
-namespace v8 {
-namespace internal {
-
-// UTF16Buffer based on a v8::UnicodeInputStream.
-class InputStreamUtf16Buffer : public Utf16CharacterStream {
- public:
- /* The InputStreamUtf16Buffer maintains an internal buffer
- * that is filled in chunks from the Utf16CharacterStream.
- * It also maintains unlimited pushback capability, but optimized
- * for small pushbacks.
- * The pushback_buffer_ pointer points to the limit of pushbacks
- * in the current buffer. There is room for a few pushback'ed chars before
- * the buffer containing the most recently read chunk. If this is overflowed,
- * an external buffer is allocated/reused to hold further pushbacks, and
- * pushback_buffer_ and buffer_cursor_/buffer_end_ now points to the
- * new buffer. When this buffer is read to the end again, the cursor is
- * switched back to the internal buffer
- */
- explicit InputStreamUtf16Buffer(v8::UnicodeInputStream* stream)
- : Utf16CharacterStream(),
- stream_(stream),
- pushback_buffer_(buffer_),
- pushback_buffer_end_cache_(NULL),
- pushback_buffer_backing_(NULL),
- pushback_buffer_backing_size_(0) {
- buffer_cursor_ = buffer_end_ = buffer_ + kPushBackSize;
- }
-
- virtual ~InputStreamUtf16Buffer() {
- if (pushback_buffer_backing_ != NULL) {
- DeleteArray(pushback_buffer_backing_);
- }
- }
-
- virtual void PushBack(uc32 ch) {
- ASSERT(pos_ > 0);
- if (ch == kEndOfInput) {
- pos_--;
- return;
- }
- if (buffer_cursor_ <= pushback_buffer_) {
- // No more room in the current buffer to do pushbacks.
- if (pushback_buffer_end_cache_ == NULL) {
- // We have overflowed the pushback space at the beginning of buffer_.
- // Switch to using a separate allocated pushback buffer.
- if (pushback_buffer_backing_ == NULL) {
- // Allocate a buffer the first time we need it.
- pushback_buffer_backing_ = NewArray<uc16>(kPushBackSize);
- pushback_buffer_backing_size_ = kPushBackSize;
- }
- pushback_buffer_ = pushback_buffer_backing_;
- pushback_buffer_end_cache_ = buffer_end_;
- buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
- buffer_cursor_ = buffer_end_ - 1;
- } else {
- // Hit the bottom of the allocated pushback buffer.
- // Double the buffer and continue.
- uc16* new_buffer = NewArray<uc16>(pushback_buffer_backing_size_ * 2);
- OS::MemCopy(new_buffer + pushback_buffer_backing_size_,
- pushback_buffer_backing_,
- pushback_buffer_backing_size_);
- DeleteArray(pushback_buffer_backing_);
- buffer_cursor_ = new_buffer + pushback_buffer_backing_size_;
- pushback_buffer_backing_ = pushback_buffer_ = new_buffer;
- buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
- }
- }
- pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] =
- static_cast<uc16>(ch);
- pos_--;
- }
-
- protected:
- virtual bool ReadBlock() {
- if (pushback_buffer_end_cache_ != NULL) {
- buffer_cursor_ = buffer_;
- buffer_end_ = pushback_buffer_end_cache_;
- pushback_buffer_end_cache_ = NULL;
- return buffer_end_ > buffer_cursor_;
- }
- // Copy the top of the buffer into the pushback area.
- int32_t value;
- uc16* buffer_start = buffer_ + kPushBackSize;
- buffer_cursor_ = buffer_end_ = buffer_start;
- while ((value = stream_->Next()) >= 0) {
- if (value >
- static_cast<int32_t>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
- buffer_start[buffer_end_++ - buffer_start] =
- unibrow::Utf16::LeadSurrogate(value);
- buffer_start[buffer_end_++ - buffer_start] =
- unibrow::Utf16::TrailSurrogate(value);
- } else {
- // buffer_end_ is a const pointer, but buffer_ is writable.
- buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
- }
- // Stop one before the end of the buffer in case we get a surrogate pair.
- if (buffer_end_ <= buffer_ + 1 + kPushBackSize + kBufferSize) break;
- }
- return buffer_end_ > buffer_start;
- }
-
- virtual unsigned SlowSeekForward(unsigned pos) {
- // Seeking in the input is not used by preparsing.
- // It's only used by the real parser based on preparser data.
- UNIMPLEMENTED();
- return 0;
- }
-
- private:
- static const unsigned kBufferSize = 512;
- static const unsigned kPushBackSize = 16;
- v8::UnicodeInputStream* const stream_;
- // Buffer holding first kPushBackSize characters of pushback buffer,
- // then kBufferSize chars of read-ahead.
- // The pushback buffer is only used if pushing back characters past
- // the start of a block.
- uc16 buffer_[kPushBackSize + kBufferSize];
- // Limit of pushbacks before new allocation is necessary.
- uc16* pushback_buffer_;
- // Only if that pushback buffer at the start of buffer_ isn't sufficient
- // is the following used.
- const uc16* pushback_buffer_end_cache_;
- uc16* pushback_buffer_backing_;
- unsigned pushback_buffer_backing_size_;
-};
-
-} // namespace internal.
-
-
-UnicodeInputStream::~UnicodeInputStream() { }
-
-
-PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
- internal::InputStreamUtf16Buffer buffer(input);
- uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
- internal::UnicodeCache unicode_cache;
- internal::Scanner scanner(&unicode_cache);
- scanner.Initialize(&buffer);
- internal::CompleteParserRecorder recorder;
- preparser::PreParser preparser(&scanner, &recorder, stack_limit);
- preparser.set_allow_lazy(true);
- preparser::PreParser::PreParseResult result = preparser.PreParseProgram();
- if (result == preparser::PreParser::kPreParseStackOverflow) {
- return PreParserData::StackOverflow();
- }
- internal::Vector<unsigned> pre_data = recorder.ExtractData();
- size_t size = pre_data.length() * sizeof(pre_data[0]);
- unsigned char* data = reinterpret_cast<unsigned char*>(pre_data.start());
- return PreParserData(size, data);
-}
-
-} // namespace v8.
diff --git a/chromium/v8/src/preparser.cc b/chromium/v8/src/preparser.cc
index 36a94a3315b..a87c434558d 100644
--- a/chromium/v8/src/preparser.cc
+++ b/chromium/v8/src/preparser.cc
@@ -42,10 +42,10 @@
#include "unicode.h"
#include "utils.h"
-#ifdef _MSC_VER
+#if V8_CC_MSVC && (_MSC_VER < 1800)
namespace std {
-// Usually defined in math.h, but not in MSVC.
+// Usually defined in math.h, but not in MSVC until VS2013+.
// Abstracted to work
int isfinite(double value);
@@ -53,28 +53,27 @@ int isfinite(double value);
#endif
namespace v8 {
-
-namespace preparser {
+namespace internal {
PreParser::PreParseResult PreParser::PreParseLazyFunction(
- i::LanguageMode mode, bool is_generator, i::ParserRecorder* log) {
+ LanguageMode mode, bool is_generator, ParserRecorder* log) {
log_ = log;
// Lazy functions always have trivial outer scopes (no with/catch scopes).
Scope top_scope(&scope_, kTopLevelScope);
set_language_mode(mode);
Scope function_scope(&scope_, kFunctionScope);
function_scope.set_is_generator(is_generator);
- ASSERT_EQ(i::Token::LBRACE, scanner_->current_token());
+ ASSERT_EQ(Token::LBRACE, scanner()->current_token());
bool ok = true;
- int start_position = scanner_->peek_location().beg_pos;
+ int start_position = peek_position();
ParseLazyFunctionLiteralBody(&ok);
- if (stack_overflow_) return kPreParseStackOverflow;
+ if (stack_overflow()) return kPreParseStackOverflow;
if (!ok) {
- ReportUnexpectedToken(scanner_->current_token());
+ ReportUnexpectedToken(scanner()->current_token());
} else {
- ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
+ ASSERT_EQ(Token::RBRACE, scanner()->peek());
if (!is_classic_mode()) {
- int end_pos = scanner_->location().end_pos;
+ int end_pos = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_pos, &ok);
if (ok) {
CheckDelayedStrictModeViolation(start_position, end_pos, &ok);
@@ -98,50 +97,38 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-void PreParser::ReportUnexpectedToken(i::Token::Value token) {
+void PreParser::ReportUnexpectedToken(Token::Value token) {
// We don't report stack overflows here, to avoid increasing the
// stack depth even further. Instead we report it after parsing is
// over, in ParseProgram.
- if (token == i::Token::ILLEGAL && stack_overflow_) {
+ if (token == Token::ILLEGAL && stack_overflow()) {
return;
}
- i::Scanner::Location source_location = scanner_->location();
+ Scanner::Location source_location = scanner()->location();
// Four of the tokens are treated specially
switch (token) {
- case i::Token::EOS:
+ case Token::EOS:
return ReportMessageAt(source_location, "unexpected_eos", NULL);
- case i::Token::NUMBER:
+ case Token::NUMBER:
return ReportMessageAt(source_location, "unexpected_token_number", NULL);
- case i::Token::STRING:
+ case Token::STRING:
return ReportMessageAt(source_location, "unexpected_token_string", NULL);
- case i::Token::IDENTIFIER:
+ case Token::IDENTIFIER:
return ReportMessageAt(source_location,
"unexpected_token_identifier", NULL);
- case i::Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_RESERVED_WORD:
return ReportMessageAt(source_location, "unexpected_reserved", NULL);
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
return ReportMessageAt(source_location,
"unexpected_strict_reserved", NULL);
default:
- const char* name = i::Token::String(token);
+ const char* name = Token::String(token);
ReportMessageAt(source_location, "unexpected_token", name);
}
}
-// Checks whether octal literal last seen is between beg_pos and end_pos.
-// If so, reports an error.
-void PreParser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- i::Scanner::Location octal = scanner_->octal_position();
- if (beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
- ReportMessageAt(octal, "strict_octal_literal", NULL);
- scanner_->clear_octal_position();
- *ok = false;
- }
-}
-
-
#define CHECK_OK ok); \
if (!*ok) return kUnknownSourceElements; \
((void)0
@@ -162,10 +149,10 @@ PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
// GeneratorDeclaration
switch (peek()) {
- case i::Token::FUNCTION:
+ case Token::FUNCTION:
return ParseFunctionDeclaration(ok);
- case i::Token::LET:
- case i::Token::CONST:
+ case Token::LET:
+ case Token::CONST:
return ParseVariableStatement(kSourceElement, ok);
default:
return ParseStatement(ok);
@@ -184,7 +171,7 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
if (allow_directive_prologue) {
if (statement.IsUseStrictLiteral()) {
set_language_mode(allow_harmony_scoping() ?
- i::EXTENDED_MODE : i::STRICT_MODE);
+ EXTENDED_MODE : STRICT_MODE);
} else if (!statement.IsStringLiteral()) {
allow_directive_prologue = false;
}
@@ -229,55 +216,55 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
// Keep the source position of the statement
switch (peek()) {
- case i::Token::LBRACE:
+ case Token::LBRACE:
return ParseBlock(ok);
- case i::Token::CONST:
- case i::Token::LET:
- case i::Token::VAR:
+ case Token::CONST:
+ case Token::LET:
+ case Token::VAR:
return ParseVariableStatement(kStatement, ok);
- case i::Token::SEMICOLON:
+ case Token::SEMICOLON:
Next();
return Statement::Default();
- case i::Token::IF:
+ case Token::IF:
return ParseIfStatement(ok);
- case i::Token::DO:
+ case Token::DO:
return ParseDoWhileStatement(ok);
- case i::Token::WHILE:
+ case Token::WHILE:
return ParseWhileStatement(ok);
- case i::Token::FOR:
+ case Token::FOR:
return ParseForStatement(ok);
- case i::Token::CONTINUE:
+ case Token::CONTINUE:
return ParseContinueStatement(ok);
- case i::Token::BREAK:
+ case Token::BREAK:
return ParseBreakStatement(ok);
- case i::Token::RETURN:
+ case Token::RETURN:
return ParseReturnStatement(ok);
- case i::Token::WITH:
+ case Token::WITH:
return ParseWithStatement(ok);
- case i::Token::SWITCH:
+ case Token::SWITCH:
return ParseSwitchStatement(ok);
- case i::Token::THROW:
+ case Token::THROW:
return ParseThrowStatement(ok);
- case i::Token::TRY:
+ case Token::TRY:
return ParseTryStatement(ok);
- case i::Token::FUNCTION: {
- i::Scanner::Location start_location = scanner_->peek_location();
+ case Token::FUNCTION: {
+ Scanner::Location start_location = scanner()->peek_location();
Statement statement = ParseFunctionDeclaration(CHECK_OK);
- i::Scanner::Location end_location = scanner_->location();
+ Scanner::Location end_location = scanner()->location();
if (!is_classic_mode()) {
ReportMessageAt(start_location.beg_pos, end_location.end_pos,
"strict_function", NULL);
@@ -288,7 +275,7 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) {
}
}
- case i::Token::DEBUGGER:
+ case Token::DEBUGGER:
return ParseDebuggerStatement(ok);
default:
@@ -303,11 +290,11 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
// GeneratorDeclaration ::
// 'function' '*' Identifier '(' FormalParameterListopt ')'
// '{' FunctionBody '}'
- Expect(i::Token::FUNCTION, CHECK_OK);
+ Expect(Token::FUNCTION, CHECK_OK);
- bool is_generator = allow_generators_ && Check(i::Token::MUL);
+ bool is_generator = allow_generators() && Check(Token::MUL);
Identifier identifier = ParseIdentifier(CHECK_OK);
- i::Scanner::Location location = scanner_->location();
+ Scanner::Location location = scanner()->location();
Expression function_value = ParseFunctionLiteral(is_generator, CHECK_OK);
@@ -333,15 +320,15 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
// Note that a Block does not introduce a new execution scope!
// (ECMA-262, 3rd, 12.2)
//
- Expect(i::Token::LBRACE, CHECK_OK);
- while (peek() != i::Token::RBRACE) {
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
if (is_extended_mode()) {
ParseSourceElement(CHECK_OK);
} else {
ParseStatement(CHECK_OK);
}
}
- Expect(i::Token::RBRACE, ok);
+ Expect(Token::RBRACE, ok);
return Statement::Default();
}
@@ -385,9 +372,9 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// ConstBinding ::
// BindingPattern '=' AssignmentExpression
bool require_initializer = false;
- if (peek() == i::Token::VAR) {
- Consume(i::Token::VAR);
- } else if (peek() == i::Token::CONST) {
+ if (peek() == Token::VAR) {
+ Consume(Token::VAR);
+ } else if (peek() == Token::CONST) {
// TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
//
// ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
@@ -398,20 +385,20 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// However disallowing const in classic mode will break compatibility with
// existing pages. Therefore we keep allowing const with the old
// non-harmony semantics in classic mode.
- Consume(i::Token::CONST);
+ Consume(Token::CONST);
switch (language_mode()) {
- case i::CLASSIC_MODE:
+ case CLASSIC_MODE:
break;
- case i::STRICT_MODE: {
- i::Scanner::Location location = scanner_->peek_location();
+ case STRICT_MODE: {
+ Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location, "strict_const", NULL);
*ok = false;
return Statement::Default();
}
- case i::EXTENDED_MODE:
+ case EXTENDED_MODE:
if (var_context != kSourceElement &&
var_context != kForStatement) {
- i::Scanner::Location location = scanner_->peek_location();
+ Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location.beg_pos, location.end_pos,
"unprotected_const", NULL);
*ok = false;
@@ -420,7 +407,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
require_initializer = true;
break;
}
- } else if (peek() == i::Token::LET) {
+ } else if (peek() == Token::LET) {
// ES6 Draft Rev4 section 12.2.1:
//
// LetDeclaration : let LetBindingList ;
@@ -428,16 +415,16 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// * It is a Syntax Error if the code that matches this production is not
// contained in extended code.
if (!is_extended_mode()) {
- i::Scanner::Location location = scanner_->peek_location();
+ Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location.beg_pos, location.end_pos,
"illegal_let", NULL);
*ok = false;
return Statement::Default();
}
- Consume(i::Token::LET);
+ Consume(Token::LET);
if (var_context != kSourceElement &&
var_context != kForStatement) {
- i::Scanner::Location location = scanner_->peek_location();
+ Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location.beg_pos, location.end_pos,
"unprotected_let", NULL);
*ok = false;
@@ -455,22 +442,22 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
int nvars = 0; // the number of variables declared
do {
// Parse variable name.
- if (nvars > 0) Consume(i::Token::COMMA);
+ if (nvars > 0) Consume(Token::COMMA);
Identifier identifier = ParseIdentifier(CHECK_OK);
if (!is_classic_mode() && !identifier.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
+ StrictModeIdentifierViolation(scanner()->location(),
"strict_var_name",
identifier,
ok);
return Statement::Default();
}
nvars++;
- if (peek() == i::Token::ASSIGN || require_initializer) {
- Expect(i::Token::ASSIGN, CHECK_OK);
+ if (peek() == Token::ASSIGN || require_initializer) {
+ Expect(Token::ASSIGN, CHECK_OK);
ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
if (decl_props != NULL) *decl_props = kHasInitializers;
}
- } while (peek() == i::Token::COMMA);
+ } while (peek() == Token::COMMA);
if (num_decl != NULL) *num_decl = nvars;
return Statement::Default();
@@ -488,8 +475,8 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
ASSERT(is_classic_mode() ||
(!expr.AsIdentifier().IsFutureStrictReserved() &&
!expr.AsIdentifier().IsYield()));
- if (peek() == i::Token::COLON) {
- Consume(i::Token::COLON);
+ if (peek() == Token::COLON) {
+ Consume(Token::COLON);
return ParseStatement(ok);
}
// Preparsing is disabled for extensions (because the extension details
@@ -506,12 +493,12 @@ PreParser::Statement PreParser::ParseIfStatement(bool* ok) {
// IfStatement ::
// 'if' '(' Expression ')' Statement ('else' Statement)?
- Expect(i::Token::IF, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::IF, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(CHECK_OK);
- if (peek() == i::Token::ELSE) {
+ if (peek() == Token::ELSE) {
Next();
ParseStatement(CHECK_OK);
}
@@ -523,12 +510,12 @@ PreParser::Statement PreParser::ParseContinueStatement(bool* ok) {
// ContinueStatement ::
// 'continue' [no line terminator] Identifier? ';'
- Expect(i::Token::CONTINUE, CHECK_OK);
- i::Token::Value tok = peek();
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
+ Expect(Token::CONTINUE, CHECK_OK);
+ Token::Value tok = peek();
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ tok != Token::SEMICOLON &&
+ tok != Token::RBRACE &&
+ tok != Token::EOS) {
ParseIdentifier(CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -540,12 +527,12 @@ PreParser::Statement PreParser::ParseBreakStatement(bool* ok) {
// BreakStatement ::
// 'break' [no line terminator] Identifier? ';'
- Expect(i::Token::BREAK, CHECK_OK);
- i::Token::Value tok = peek();
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
+ Expect(Token::BREAK, CHECK_OK);
+ Token::Value tok = peek();
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ tok != Token::SEMICOLON &&
+ tok != Token::RBRACE &&
+ tok != Token::EOS) {
ParseIdentifier(CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -560,18 +547,18 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
// Consume the return token. It is necessary to do the before
// reporting any errors on it, because of the way errors are
// reported (underlining).
- Expect(i::Token::RETURN, CHECK_OK);
+ Expect(Token::RETURN, CHECK_OK);
// An ECMAScript program is considered syntactically incorrect if it
// contains a return statement that is not within the body of a
// function. See ECMA-262, section 12.9, page 67.
// This is not handled during preparsing.
- i::Token::Value tok = peek();
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- tok != i::Token::SEMICOLON &&
- tok != i::Token::RBRACE &&
- tok != i::Token::EOS) {
+ Token::Value tok = peek();
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ tok != Token::SEMICOLON &&
+ tok != Token::RBRACE &&
+ tok != Token::EOS) {
ParseExpression(true, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -582,16 +569,16 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
- Expect(i::Token::WITH, CHECK_OK);
+ Expect(Token::WITH, CHECK_OK);
if (!is_classic_mode()) {
- i::Scanner::Location location = scanner_->location();
+ Scanner::Location location = scanner()->location();
ReportMessageAt(location, "strict_mode_with", NULL);
*ok = false;
return Statement::Default();
}
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
Scope::InsideWith iw(scope_);
ParseStatement(CHECK_OK);
@@ -603,30 +590,30 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
// SwitchStatement ::
// 'switch' '(' Expression ')' '{' CaseClause* '}'
- Expect(i::Token::SWITCH, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::SWITCH, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
- Expect(i::Token::LBRACE, CHECK_OK);
- i::Token::Value token = peek();
- while (token != i::Token::RBRACE) {
- if (token == i::Token::CASE) {
- Expect(i::Token::CASE, CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK);
+ Token::Value token = peek();
+ while (token != Token::RBRACE) {
+ if (token == Token::CASE) {
+ Expect(Token::CASE, CHECK_OK);
ParseExpression(true, CHECK_OK);
} else {
- Expect(i::Token::DEFAULT, CHECK_OK);
+ Expect(Token::DEFAULT, CHECK_OK);
}
- Expect(i::Token::COLON, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
token = peek();
- while (token != i::Token::CASE &&
- token != i::Token::DEFAULT &&
- token != i::Token::RBRACE) {
+ while (token != Token::CASE &&
+ token != Token::DEFAULT &&
+ token != Token::RBRACE) {
ParseStatement(CHECK_OK);
token = peek();
}
}
- Expect(i::Token::RBRACE, ok);
+ Expect(Token::RBRACE, ok);
return Statement::Default();
}
@@ -635,13 +622,13 @@ PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
- Expect(i::Token::DO, CHECK_OK);
+ Expect(Token::DO, CHECK_OK);
ParseStatement(CHECK_OK);
- Expect(i::Token::WHILE, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::WHILE, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, ok);
- if (peek() == i::Token::SEMICOLON) Consume(i::Token::SEMICOLON);
+ Expect(Token::RPAREN, ok);
+ if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
return Statement::Default();
}
@@ -650,20 +637,19 @@ PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
// WhileStatement ::
// 'while' '(' Expression ')' Statement
- Expect(i::Token::WHILE, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
+ Expect(Token::WHILE, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(ok);
return Statement::Default();
}
bool PreParser::CheckInOrOf(bool accept_OF) {
- if (peek() == i::Token::IN ||
- (allow_for_of() && accept_OF && peek() == i::Token::IDENTIFIER &&
- scanner_->is_next_contextual_keyword(v8::internal::CStrVector("of")))) {
- Next();
+ if (Check(Token::IN) ||
+ (allow_for_of() && accept_OF &&
+ CheckContextualKeyword(CStrVector("of")))) {
return true;
}
return false;
@@ -674,12 +660,12 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
- Expect(i::Token::FOR, CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- if (peek() != i::Token::SEMICOLON) {
- if (peek() == i::Token::VAR || peek() == i::Token::CONST ||
- peek() == i::Token::LET) {
- bool is_let = peek() == i::Token::LET;
+ Expect(Token::FOR, CHECK_OK);
+ Expect(Token::LPAREN, CHECK_OK);
+ if (peek() != Token::SEMICOLON) {
+ if (peek() == Token::VAR || peek() == Token::CONST ||
+ peek() == Token::LET) {
+ bool is_let = peek() == Token::LET;
int decl_count;
VariableDeclarationProperties decl_props = kHasNoInitializers;
ParseVariableDeclarations(
@@ -689,7 +675,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
bool accept_OF = !has_initializers;
if (accept_IN && CheckInOrOf(accept_OF)) {
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(CHECK_OK);
return Statement::Default();
@@ -698,7 +684,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
Expression lhs = ParseExpression(false, CHECK_OK);
if (CheckInOrOf(lhs.IsIdentifier())) {
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(CHECK_OK);
return Statement::Default();
@@ -707,17 +693,17 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
// Parsed initializer at this point.
- Expect(i::Token::SEMICOLON, CHECK_OK);
+ Expect(Token::SEMICOLON, CHECK_OK);
- if (peek() != i::Token::SEMICOLON) {
+ if (peek() != Token::SEMICOLON) {
ParseExpression(true, CHECK_OK);
}
- Expect(i::Token::SEMICOLON, CHECK_OK);
+ Expect(Token::SEMICOLON, CHECK_OK);
- if (peek() != i::Token::RPAREN) {
+ if (peek() != Token::RPAREN) {
ParseExpression(true, CHECK_OK);
}
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
ParseStatement(ok);
return Statement::Default();
@@ -728,9 +714,9 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
// ThrowStatement ::
// 'throw' [no line terminator] Expression ';'
- Expect(i::Token::THROW, CHECK_OK);
- if (scanner_->HasAnyLineTerminatorBeforeNext()) {
- i::Scanner::Location pos = scanner_->location();
+ Expect(Token::THROW, CHECK_OK);
+ if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+ Scanner::Location pos = scanner()->location();
ReportMessageAt(pos, "newline_after_throw", NULL);
*ok = false;
return Statement::Default();
@@ -756,30 +742,30 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
// In preparsing, allow any number of catch/finally blocks, including zero
// of both.
- Expect(i::Token::TRY, CHECK_OK);
+ Expect(Token::TRY, CHECK_OK);
ParseBlock(CHECK_OK);
bool catch_or_finally_seen = false;
- if (peek() == i::Token::CATCH) {
- Consume(i::Token::CATCH);
- Expect(i::Token::LPAREN, CHECK_OK);
+ if (peek() == Token::CATCH) {
+ Consume(Token::CATCH);
+ Expect(Token::LPAREN, CHECK_OK);
Identifier id = ParseIdentifier(CHECK_OK);
if (!is_classic_mode() && !id.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
+ StrictModeIdentifierViolation(scanner()->location(),
"strict_catch_variable",
id,
ok);
return Statement::Default();
}
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
{ Scope::InsideWith iw(scope_);
ParseBlock(CHECK_OK);
}
catch_or_finally_seen = true;
}
- if (peek() == i::Token::FINALLY) {
- Consume(i::Token::FINALLY);
+ if (peek() == Token::FINALLY) {
+ Consume(Token::FINALLY);
ParseBlock(CHECK_OK);
catch_or_finally_seen = true;
}
@@ -797,7 +783,7 @@ PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
// DebuggerStatement ::
// 'debugger' ';'
- Expect(i::Token::DEBUGGER, CHECK_OK);
+ Expect(Token::DEBUGGER, CHECK_OK);
ExpectSemicolon(ok);
return Statement::Default();
}
@@ -818,8 +804,8 @@ PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
// Expression ',' AssignmentExpression
Expression result = ParseAssignmentExpression(accept_IN, CHECK_OK);
- while (peek() == i::Token::COMMA) {
- Expect(i::Token::COMMA, CHECK_OK);
+ while (peek() == Token::COMMA) {
+ Expect(Token::COMMA, CHECK_OK);
ParseAssignmentExpression(accept_IN, CHECK_OK);
result = Expression::Default();
}
@@ -835,14 +821,14 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
// YieldExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
- if (scope_->is_generator() && peek() == i::Token::YIELD) {
+ if (scope_->is_generator() && peek() == Token::YIELD) {
return ParseYieldExpression(ok);
}
- i::Scanner::Location before = scanner_->peek_location();
+ Scanner::Location before = scanner()->peek_location();
Expression expression = ParseConditionalExpression(accept_IN, CHECK_OK);
- if (!i::Token::IsAssignmentOp(peek())) {
+ if (!Token::IsAssignmentOp(peek())) {
// Parsed conditional expression only (no assignment).
return expression;
}
@@ -850,17 +836,17 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
if (!is_classic_mode() &&
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
- i::Scanner::Location after = scanner_->location();
+ Scanner::Location after = scanner()->location();
ReportMessageAt(before.beg_pos, after.end_pos,
"strict_lhs_assignment", NULL);
*ok = false;
return Expression::Default();
}
- i::Token::Value op = Next(); // Get assignment operator.
+ Token::Value op = Next(); // Get assignment operator.
ParseAssignmentExpression(accept_IN, CHECK_OK);
- if ((op == i::Token::ASSIGN) && expression.IsThisProperty()) {
+ if ((op == Token::ASSIGN) && expression.IsThisProperty()) {
scope_->AddProperty();
}
@@ -872,8 +858,8 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
PreParser::Expression PreParser::ParseYieldExpression(bool* ok) {
// YieldExpression ::
// 'yield' '*'? AssignmentExpression
- Consume(i::Token::YIELD);
- Check(i::Token::MUL);
+ Consume(Token::YIELD);
+ Check(Token::MUL);
ParseAssignmentExpression(false, CHECK_OK);
@@ -890,26 +876,18 @@ PreParser::Expression PreParser::ParseConditionalExpression(bool accept_IN,
// We start using the binary expression parser for prec >= 4 only!
Expression expression = ParseBinaryExpression(4, accept_IN, CHECK_OK);
- if (peek() != i::Token::CONDITIONAL) return expression;
- Consume(i::Token::CONDITIONAL);
+ if (peek() != Token::CONDITIONAL) return expression;
+ Consume(Token::CONDITIONAL);
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
ParseAssignmentExpression(true, CHECK_OK);
- Expect(i::Token::COLON, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
ParseAssignmentExpression(accept_IN, CHECK_OK);
return Expression::Default();
}
-int PreParser::Precedence(i::Token::Value tok, bool accept_IN) {
- if (tok == i::Token::IN && !accept_IN)
- return 0; // 0 precedence will terminate binary expression parsing
-
- return i::Token::Precedence(tok);
-}
-
-
// Precedence >= 4
PreParser::Expression PreParser::ParseBinaryExpression(int prec,
bool accept_IN,
@@ -940,19 +918,19 @@ PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
// '~' UnaryExpression
// '!' UnaryExpression
- i::Token::Value op = peek();
- if (i::Token::IsUnaryOp(op)) {
+ Token::Value op = peek();
+ if (Token::IsUnaryOp(op)) {
op = Next();
ParseUnaryExpression(ok);
return Expression::Default();
- } else if (i::Token::IsCountOp(op)) {
+ } else if (Token::IsCountOp(op)) {
op = Next();
- i::Scanner::Location before = scanner_->peek_location();
+ Scanner::Location before = scanner()->peek_location();
Expression expression = ParseUnaryExpression(CHECK_OK);
if (!is_classic_mode() &&
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
- i::Scanner::Location after = scanner_->location();
+ Scanner::Location after = scanner()->location();
ReportMessageAt(before.beg_pos, after.end_pos,
"strict_lhs_prefix", NULL);
*ok = false;
@@ -968,14 +946,14 @@ PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
// PostfixExpression ::
// LeftHandSideExpression ('++' | '--')?
- i::Scanner::Location before = scanner_->peek_location();
+ Scanner::Location before = scanner()->peek_location();
Expression expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
- i::Token::IsCountOp(peek())) {
+ if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
+ Token::IsCountOp(peek())) {
if (!is_classic_mode() &&
expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
- i::Scanner::Location after = scanner_->location();
+ Scanner::Location after = scanner()->location();
ReportMessageAt(before.beg_pos, after.end_pos,
"strict_lhs_postfix", NULL);
*ok = false;
@@ -993,7 +971,7 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
// (NewExpression | MemberExpression) ...
Expression result = Expression::Default();
- if (peek() == i::Token::NEW) {
+ if (peek() == Token::NEW) {
result = ParseNewExpression(CHECK_OK);
} else {
result = ParseMemberExpression(CHECK_OK);
@@ -1001,10 +979,10 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
while (true) {
switch (peek()) {
- case i::Token::LBRACK: {
- Consume(i::Token::LBRACK);
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RBRACK, CHECK_OK);
+ Expect(Token::RBRACK, CHECK_OK);
if (result.IsThis()) {
result = Expression::ThisProperty();
} else {
@@ -1013,14 +991,14 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
break;
}
- case i::Token::LPAREN: {
+ case Token::LPAREN: {
ParseArguments(CHECK_OK);
result = Expression::Default();
break;
}
- case i::Token::PERIOD: {
- Consume(i::Token::PERIOD);
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
ParseIdentifierName(CHECK_OK);
if (result.IsThis()) {
result = Expression::ThisProperty();
@@ -1051,9 +1029,9 @@ PreParser::Expression PreParser::ParseNewExpression(bool* ok) {
// lists as long as it has 'new' prefixes left
unsigned new_count = 0;
do {
- Consume(i::Token::NEW);
+ Consume(Token::NEW);
new_count++;
- } while (peek() == i::Token::NEW);
+ } while (peek() == Token::NEW);
return ParseMemberWithNewPrefixesExpression(new_count, ok);
}
@@ -1072,17 +1050,17 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
// Parse the initial primary or function expression.
Expression result = Expression::Default();
- if (peek() == i::Token::FUNCTION) {
- Consume(i::Token::FUNCTION);
+ if (peek() == Token::FUNCTION) {
+ Consume(Token::FUNCTION);
- bool is_generator = allow_generators_ && Check(i::Token::MUL);
+ bool is_generator = allow_generators() && Check(Token::MUL);
Identifier identifier = Identifier::Default();
if (peek_any_identifier()) {
identifier = ParseIdentifier(CHECK_OK);
}
result = ParseFunctionLiteral(is_generator, CHECK_OK);
if (result.IsStrictFunction() && !identifier.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
+ StrictModeIdentifierViolation(scanner()->location(),
"strict_function_name",
identifier,
ok);
@@ -1094,10 +1072,10 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
while (true) {
switch (peek()) {
- case i::Token::LBRACK: {
- Consume(i::Token::LBRACK);
+ case Token::LBRACK: {
+ Consume(Token::LBRACK);
ParseExpression(true, CHECK_OK);
- Expect(i::Token::RBRACK, CHECK_OK);
+ Expect(Token::RBRACK, CHECK_OK);
if (result.IsThis()) {
result = Expression::ThisProperty();
} else {
@@ -1105,8 +1083,8 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
}
break;
}
- case i::Token::PERIOD: {
- Consume(i::Token::PERIOD);
+ case Token::PERIOD: {
+ Consume(Token::PERIOD);
ParseIdentifierName(CHECK_OK);
if (result.IsThis()) {
result = Expression::ThisProperty();
@@ -1115,7 +1093,7 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
}
break;
}
- case i::Token::LPAREN: {
+ case Token::LPAREN: {
if (new_count == 0) return result;
// Consume one of the new prefixes (already parsed).
ParseArguments(CHECK_OK);
@@ -1146,59 +1124,59 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
Expression result = Expression::Default();
switch (peek()) {
- case i::Token::THIS: {
+ case Token::THIS: {
Next();
result = Expression::This();
break;
}
- case i::Token::FUTURE_RESERVED_WORD:
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
- case i::Token::YIELD:
- case i::Token::IDENTIFIER: {
+ case Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::YIELD:
+ case Token::IDENTIFIER: {
Identifier id = ParseIdentifier(CHECK_OK);
result = Expression::FromIdentifier(id);
break;
}
- case i::Token::NULL_LITERAL:
- case i::Token::TRUE_LITERAL:
- case i::Token::FALSE_LITERAL:
- case i::Token::NUMBER: {
+ case Token::NULL_LITERAL:
+ case Token::TRUE_LITERAL:
+ case Token::FALSE_LITERAL:
+ case Token::NUMBER: {
Next();
break;
}
- case i::Token::STRING: {
+ case Token::STRING: {
Next();
result = GetStringSymbol();
break;
}
- case i::Token::ASSIGN_DIV:
+ case Token::ASSIGN_DIV:
result = ParseRegExpLiteral(true, CHECK_OK);
break;
- case i::Token::DIV:
+ case Token::DIV:
result = ParseRegExpLiteral(false, CHECK_OK);
break;
- case i::Token::LBRACK:
+ case Token::LBRACK:
result = ParseArrayLiteral(CHECK_OK);
break;
- case i::Token::LBRACE:
+ case Token::LBRACE:
result = ParseObjectLiteral(CHECK_OK);
break;
- case i::Token::LPAREN:
- Consume(i::Token::LPAREN);
- parenthesized_function_ = (peek() == i::Token::FUNCTION);
+ case Token::LPAREN:
+ Consume(Token::LPAREN);
+ parenthesized_function_ = (peek() == Token::FUNCTION);
result = ParseExpression(true, CHECK_OK);
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
result = result.Parenthesize();
break;
- case i::Token::MOD:
+ case Token::MOD:
result = ParseV8Intrinsic(CHECK_OK);
break;
@@ -1216,54 +1194,21 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
- Expect(i::Token::LBRACK, CHECK_OK);
- while (peek() != i::Token::RBRACK) {
- if (peek() != i::Token::COMMA) {
+ Expect(Token::LBRACK, CHECK_OK);
+ while (peek() != Token::RBRACK) {
+ if (peek() != Token::COMMA) {
ParseAssignmentExpression(true, CHECK_OK);
}
- if (peek() != i::Token::RBRACK) {
- Expect(i::Token::COMMA, CHECK_OK);
+ if (peek() != Token::RBRACK) {
+ Expect(Token::COMMA, CHECK_OK);
}
}
- Expect(i::Token::RBRACK, CHECK_OK);
+ Expect(Token::RBRACK, CHECK_OK);
scope_->NextMaterializedLiteralIndex();
return Expression::Default();
}
-void PreParser::CheckDuplicate(DuplicateFinder* finder,
- i::Token::Value property,
- int type,
- bool* ok) {
- int old_type;
- if (property == i::Token::NUMBER) {
- old_type = finder->AddNumber(scanner_->literal_ascii_string(), type);
- } else if (scanner_->is_literal_ascii()) {
- old_type = finder->AddAsciiSymbol(scanner_->literal_ascii_string(),
- type);
- } else {
- old_type = finder->AddUtf16Symbol(scanner_->literal_utf16_string(), type);
- }
- if (HasConflict(old_type, type)) {
- if (IsDataDataConflict(old_type, type)) {
- // Both are data properties.
- if (is_classic_mode()) return;
- ReportMessageAt(scanner_->location(),
- "strict_duplicate_property", NULL);
- } else if (IsDataAccessorConflict(old_type, type)) {
- // Both a data and an accessor property with the same name.
- ReportMessageAt(scanner_->location(),
- "accessor_data_property", NULL);
- } else {
- ASSERT(IsAccessorAccessorConflict(old_type, type));
- // Both accessors of the same type.
- ReportMessageAt(scanner_->location(),
- "accessor_get_set", NULL);
- }
- *ok = false;
- }
-}
-
PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
// ObjectLiteral ::
@@ -1272,25 +1217,26 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
// | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
// )*[','] '}'
- Expect(i::Token::LBRACE, CHECK_OK);
- DuplicateFinder duplicate_finder(scanner_->unicode_cache());
- while (peek() != i::Token::RBRACE) {
- i::Token::Value next = peek();
+ ObjectLiteralChecker checker(this, language_mode());
+
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
+ Token::Value next = peek();
switch (next) {
- case i::Token::IDENTIFIER:
- case i::Token::FUTURE_RESERVED_WORD:
- case i::Token::FUTURE_STRICT_RESERVED_WORD: {
+ case Token::IDENTIFIER:
+ case Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD: {
bool is_getter = false;
bool is_setter = false;
ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
- if ((is_getter || is_setter) && peek() != i::Token::COLON) {
- i::Token::Value name = Next();
- bool is_keyword = i::Token::IsKeyword(name);
- if (name != i::Token::IDENTIFIER &&
- name != i::Token::FUTURE_RESERVED_WORD &&
- name != i::Token::FUTURE_STRICT_RESERVED_WORD &&
- name != i::Token::NUMBER &&
- name != i::Token::STRING &&
+ if ((is_getter || is_setter) && peek() != Token::COLON) {
+ Token::Value name = Next();
+ bool is_keyword = Token::IsKeyword(name);
+ if (name != Token::IDENTIFIER &&
+ name != Token::FUTURE_RESERVED_WORD &&
+ name != Token::FUTURE_STRICT_RESERVED_WORD &&
+ name != Token::NUMBER &&
+ name != Token::STRING &&
!is_keyword) {
*ok = false;
return Expression::Default();
@@ -1298,30 +1244,30 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
if (!is_keyword) {
LogSymbol();
}
- PropertyType type = is_getter ? kGetterProperty : kSetterProperty;
- CheckDuplicate(&duplicate_finder, name, type, CHECK_OK);
+ PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
+ checker.CheckProperty(name, type, CHECK_OK);
ParseFunctionLiteral(false, CHECK_OK);
- if (peek() != i::Token::RBRACE) {
- Expect(i::Token::COMMA, CHECK_OK);
+ if (peek() != Token::RBRACE) {
+ Expect(Token::COMMA, CHECK_OK);
}
continue; // restart the while
}
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
break;
}
- case i::Token::STRING:
+ case Token::STRING:
Consume(next);
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
GetStringSymbol();
break;
- case i::Token::NUMBER:
+ case Token::NUMBER:
Consume(next);
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
break;
default:
- if (i::Token::IsKeyword(next)) {
+ if (Token::IsKeyword(next)) {
Consume(next);
- CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK);
+ checker.CheckProperty(next, kValueProperty, CHECK_OK);
} else {
// Unexpected token.
*ok = false;
@@ -1329,13 +1275,13 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
}
}
- Expect(i::Token::COLON, CHECK_OK);
+ Expect(Token::COLON, CHECK_OK);
ParseAssignmentExpression(true, CHECK_OK);
// TODO(1240767): Consider allowing trailing comma.
- if (peek() != i::Token::RBRACE) Expect(i::Token::COMMA, CHECK_OK);
+ if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
}
- Expect(i::Token::RBRACE, CHECK_OK);
+ Expect(Token::RBRACE, CHECK_OK);
scope_->NextMaterializedLiteralIndex();
return Expression::Default();
@@ -1344,18 +1290,18 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
bool* ok) {
- if (!scanner_->ScanRegExpPattern(seen_equal)) {
+ if (!scanner()->ScanRegExpPattern(seen_equal)) {
Next();
- ReportMessageAt(scanner_->location(), "unterminated_regexp", NULL);
+ ReportMessageAt(scanner()->location(), "unterminated_regexp", NULL);
*ok = false;
return Expression::Default();
}
scope_->NextMaterializedLiteralIndex();
- if (!scanner_->ScanRegExpFlags()) {
+ if (!scanner()->ScanRegExpFlags()) {
Next();
- ReportMessageAt(scanner_->location(), "invalid_regexp_flags", NULL);
+ ReportMessageAt(scanner()->location(), "invalid_regexp_flags", NULL);
*ok = false;
return Expression::Default();
}
@@ -1368,21 +1314,21 @@ PreParser::Arguments PreParser::ParseArguments(bool* ok) {
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
- Expect(i::Token::LPAREN, ok);
+ Expect(Token::LPAREN, ok);
if (!*ok) return -1;
- bool done = (peek() == i::Token::RPAREN);
+ bool done = (peek() == Token::RPAREN);
int argc = 0;
while (!done) {
ParseAssignmentExpression(true, ok);
if (!*ok) return -1;
argc++;
- done = (peek() == i::Token::RPAREN);
+ done = (peek() == Token::RPAREN);
if (!done) {
- Expect(i::Token::COMMA, ok);
+ Expect(Token::COMMA, ok);
if (!*ok) return -1;
}
}
- Expect(i::Token::RPAREN, ok);
+ Expect(Token::RPAREN, ok);
return argc;
}
@@ -1399,57 +1345,57 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator,
function_scope.set_is_generator(is_generator);
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
- Expect(i::Token::LPAREN, CHECK_OK);
- int start_position = scanner_->location().beg_pos;
- bool done = (peek() == i::Token::RPAREN);
- DuplicateFinder duplicate_finder(scanner_->unicode_cache());
+ Expect(Token::LPAREN, CHECK_OK);
+ int start_position = position();
+ bool done = (peek() == Token::RPAREN);
+ DuplicateFinder duplicate_finder(scanner()->unicode_cache());
while (!done) {
Identifier id = ParseIdentifier(CHECK_OK);
if (!id.IsValidStrictVariable()) {
- StrictModeIdentifierViolation(scanner_->location(),
+ StrictModeIdentifierViolation(scanner()->location(),
"strict_param_name",
id,
CHECK_OK);
}
int prev_value;
- if (scanner_->is_literal_ascii()) {
+ if (scanner()->is_literal_ascii()) {
prev_value =
- duplicate_finder.AddAsciiSymbol(scanner_->literal_ascii_string(), 1);
+ duplicate_finder.AddAsciiSymbol(scanner()->literal_ascii_string(), 1);
} else {
prev_value =
- duplicate_finder.AddUtf16Symbol(scanner_->literal_utf16_string(), 1);
+ duplicate_finder.AddUtf16Symbol(scanner()->literal_utf16_string(), 1);
}
if (prev_value != 0) {
- SetStrictModeViolation(scanner_->location(),
+ SetStrictModeViolation(scanner()->location(),
"strict_param_dupe",
CHECK_OK);
}
- done = (peek() == i::Token::RPAREN);
+ done = (peek() == Token::RPAREN);
if (!done) {
- Expect(i::Token::COMMA, CHECK_OK);
+ Expect(Token::COMMA, CHECK_OK);
}
}
- Expect(i::Token::RPAREN, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
// Determine if the function will be lazily compiled.
// Currently only happens to top-level functions.
// Optimistically assume that all top-level functions are lazily compiled.
bool is_lazily_compiled = (outer_scope_type == kTopLevelScope &&
- !inside_with && allow_lazy_ &&
+ !inside_with && allow_lazy() &&
!parenthesized_function_);
parenthesized_function_ = false;
- Expect(i::Token::LBRACE, CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK);
if (is_lazily_compiled) {
ParseLazyFunctionLiteralBody(CHECK_OK);
} else {
- ParseSourceElements(i::Token::RBRACE, ok);
+ ParseSourceElements(Token::RBRACE, ok);
}
- Expect(i::Token::RBRACE, CHECK_OK);
+ Expect(Token::RBRACE, CHECK_OK);
if (!is_classic_mode()) {
- int end_position = scanner_->location().end_pos;
+ int end_position = scanner()->location().end_pos;
CheckOctalLiteral(start_position, end_position, CHECK_OK);
CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
return Expression::StrictFunction();
@@ -1460,15 +1406,15 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator,
void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
- int body_start = scanner_->location().beg_pos;
+ int body_start = position();
log_->PauseRecording();
- ParseSourceElements(i::Token::RBRACE, ok);
+ ParseSourceElements(Token::RBRACE, ok);
log_->ResumeRecording();
if (!*ok) return;
// Position right after terminal '}'.
- ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
- int body_end = scanner_->peek_location().end_pos;
+ ASSERT_EQ(Token::RBRACE, scanner()->peek());
+ int body_end = scanner()->peek_location().end_pos;
log_->LogFunction(body_start, body_end,
scope_->materialized_literal_count(),
scope_->expected_properties(),
@@ -1479,8 +1425,8 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
// CallRuntime ::
// '%' Identifier Arguments
- Expect(i::Token::MOD, CHECK_OK);
- if (!allow_natives_syntax_) {
+ Expect(Token::MOD, CHECK_OK);
+ if (!allow_natives_syntax()) {
*ok = false;
return Expression::Default();
}
@@ -1493,29 +1439,12 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
#undef CHECK_OK
-void PreParser::ExpectSemicolon(bool* ok) {
- // Check for automatic semicolon insertion according to
- // the rules given in ECMA-262, section 7.9, page 21.
- i::Token::Value tok = peek();
- if (tok == i::Token::SEMICOLON) {
- Next();
- return;
- }
- if (scanner_->HasAnyLineTerminatorBeforeNext() ||
- tok == i::Token::RBRACE ||
- tok == i::Token::EOS) {
- return;
- }
- Expect(i::Token::SEMICOLON, ok);
-}
-
-
void PreParser::LogSymbol() {
- int identifier_pos = scanner_->location().beg_pos;
- if (scanner_->is_literal_ascii()) {
- log_->LogAsciiSymbol(identifier_pos, scanner_->literal_ascii_string());
+ int identifier_pos = position();
+ if (scanner()->is_literal_ascii()) {
+ log_->LogAsciiSymbol(identifier_pos, scanner()->literal_ascii_string());
} else {
- log_->LogUtf16Symbol(identifier_pos, scanner_->literal_utf16_string());
+ log_->LogUtf16Symbol(identifier_pos, scanner()->literal_utf16_string());
}
}
@@ -1524,10 +1453,10 @@ PreParser::Expression PreParser::GetStringSymbol() {
const int kUseStrictLength = 10;
const char* kUseStrictChars = "use strict";
LogSymbol();
- if (scanner_->is_literal_ascii() &&
- scanner_->literal_length() == kUseStrictLength &&
- !scanner_->literal_contains_escapes() &&
- !strncmp(scanner_->literal_ascii_string().start(), kUseStrictChars,
+ if (scanner()->is_literal_ascii() &&
+ scanner()->literal_length() == kUseStrictLength &&
+ !scanner()->literal_contains_escapes() &&
+ !strncmp(scanner()->literal_ascii_string().start(), kUseStrictChars,
kUseStrictLength)) {
return Expression::UseStrictStringLiteral();
}
@@ -1537,22 +1466,22 @@ PreParser::Expression PreParser::GetStringSymbol() {
PreParser::Identifier PreParser::GetIdentifierSymbol() {
LogSymbol();
- if (scanner_->current_token() == i::Token::FUTURE_RESERVED_WORD) {
+ if (scanner()->current_token() == Token::FUTURE_RESERVED_WORD) {
return Identifier::FutureReserved();
- } else if (scanner_->current_token() ==
- i::Token::FUTURE_STRICT_RESERVED_WORD) {
+ } else if (scanner()->current_token() ==
+ Token::FUTURE_STRICT_RESERVED_WORD) {
return Identifier::FutureStrictReserved();
- } else if (scanner_->current_token() == i::Token::YIELD) {
+ } else if (scanner()->current_token() == Token::YIELD) {
return Identifier::Yield();
}
- if (scanner_->is_literal_ascii()) {
+ if (scanner()->is_literal_ascii()) {
// Detect strict-mode poison words.
- if (scanner_->literal_length() == 4 &&
- !strncmp(scanner_->literal_ascii_string().start(), "eval", 4)) {
+ if (scanner()->literal_length() == 4 &&
+ !strncmp(scanner()->literal_ascii_string().start(), "eval", 4)) {
return Identifier::Eval();
}
- if (scanner_->literal_length() == 9 &&
- !strncmp(scanner_->literal_ascii_string().start(), "arguments", 9)) {
+ if (scanner()->literal_length() == 9 &&
+ !strncmp(scanner()->literal_ascii_string().start(), "arguments", 9)) {
return Identifier::Arguments();
}
}
@@ -1561,32 +1490,32 @@ PreParser::Identifier PreParser::GetIdentifierSymbol() {
PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
- i::Token::Value next = Next();
+ Token::Value next = Next();
switch (next) {
- case i::Token::FUTURE_RESERVED_WORD: {
- i::Scanner::Location location = scanner_->location();
+ case Token::FUTURE_RESERVED_WORD: {
+ Scanner::Location location = scanner()->location();
ReportMessageAt(location.beg_pos, location.end_pos,
"reserved_word", NULL);
*ok = false;
return GetIdentifierSymbol();
}
- case i::Token::YIELD:
+ case Token::YIELD:
if (scope_->is_generator()) {
// 'yield' in a generator is only valid as part of a YieldExpression.
- ReportMessageAt(scanner_->location(), "unexpected_token", "yield");
+ ReportMessageAt(scanner()->location(), "unexpected_token", "yield");
*ok = false;
return Identifier::Yield();
}
// FALLTHROUGH
- case i::Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
if (!is_classic_mode()) {
- i::Scanner::Location location = scanner_->location();
+ Scanner::Location location = scanner()->location();
ReportMessageAt(location.beg_pos, location.end_pos,
"strict_reserved_word", NULL);
*ok = false;
}
// FALLTHROUGH
- case i::Token::IDENTIFIER:
+ case Token::IDENTIFIER:
return GetIdentifierSymbol();
default:
*ok = false;
@@ -1595,7 +1524,7 @@ PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
}
-void PreParser::SetStrictModeViolation(i::Scanner::Location location,
+void PreParser::SetStrictModeViolation(Scanner::Location location,
const char* type,
bool* ok) {
if (!is_classic_mode()) {
@@ -1619,7 +1548,7 @@ void PreParser::SetStrictModeViolation(i::Scanner::Location location,
void PreParser::CheckDelayedStrictModeViolation(int beg_pos,
int end_pos,
bool* ok) {
- i::Scanner::Location location = strict_mode_violation_location_;
+ Scanner::Location location = strict_mode_violation_location_;
if (location.IsValid() &&
location.beg_pos > beg_pos && location.end_pos < end_pos) {
ReportMessageAt(location, strict_mode_violation_type_, NULL);
@@ -1628,7 +1557,7 @@ void PreParser::CheckDelayedStrictModeViolation(int beg_pos,
}
-void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location,
+void PreParser::StrictModeIdentifierViolation(Scanner::Location location,
const char* eval_args_type,
Identifier identifier,
bool* ok) {
@@ -1649,17 +1578,16 @@ void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location,
PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
- i::Token::Value next = Next();
- if (i::Token::IsKeyword(next)) {
- int pos = scanner_->location().beg_pos;
- const char* keyword = i::Token::String(next);
- log_->LogAsciiSymbol(pos, i::Vector<const char>(keyword,
- i::StrLength(keyword)));
+ Token::Value next = Next();
+ if (Token::IsKeyword(next)) {
+ int pos = position();
+ const char* keyword = Token::String(next);
+ log_->LogAsciiSymbol(pos, Vector<const char>(keyword, StrLength(keyword)));
return Identifier::Default();
}
- if (next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD ||
- next == i::Token::FUTURE_STRICT_RESERVED_WORD) {
+ if (next == Token::IDENTIFIER ||
+ next == Token::FUTURE_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD) {
return GetIdentifierSymbol();
}
*ok = false;
@@ -1676,9 +1604,9 @@ PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
bool* ok) {
Identifier result = ParseIdentifierName(ok);
if (!*ok) return Identifier::Default();
- if (scanner_->is_literal_ascii() &&
- scanner_->literal_length() == 3) {
- const char* token = scanner_->literal_ascii_string().start();
+ if (scanner()->is_literal_ascii() &&
+ scanner()->literal_length() == 3) {
+ const char* token = scanner()->literal_ascii_string().start();
*is_get = strncmp(token, "get", 3) == 0;
*is_set = !*is_get && strncmp(token, "set", 3) == 0;
}
@@ -1686,147 +1614,36 @@ PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
}
-bool PreParser::peek_any_identifier() {
- i::Token::Value next = peek();
- return next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD ||
- next == i::Token::FUTURE_STRICT_RESERVED_WORD ||
- next == i::Token::YIELD;
-}
-
-
-int DuplicateFinder::AddAsciiSymbol(i::Vector<const char> key, int value) {
- return AddSymbol(i::Vector<const byte>::cast(key), true, value);
-}
-
-
-int DuplicateFinder::AddUtf16Symbol(i::Vector<const uint16_t> key, int value) {
- return AddSymbol(i::Vector<const byte>::cast(key), false, value);
-}
-
-int DuplicateFinder::AddSymbol(i::Vector<const byte> key,
- bool is_ascii,
- int value) {
- uint32_t hash = Hash(key, is_ascii);
- byte* encoding = BackupKey(key, is_ascii);
- i::HashMap::Entry* entry = map_.Lookup(encoding, hash, true);
- int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- entry->value =
- reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
- return old_value;
-}
-
-
-int DuplicateFinder::AddNumber(i::Vector<const char> key, int value) {
- ASSERT(key.length() > 0);
- // Quick check for already being in canonical form.
- if (IsNumberCanonical(key)) {
- return AddAsciiSymbol(key, value);
- }
-
- int flags = i::ALLOW_HEX | i::ALLOW_OCTAL | i::ALLOW_IMPLICIT_OCTAL |
- i::ALLOW_BINARY;
- double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
- int length;
- const char* string;
- if (!std::isfinite(double_value)) {
- string = "Infinity";
- length = 8; // strlen("Infinity");
- } else {
- string = DoubleToCString(double_value,
- i::Vector<char>(number_buffer_, kBufferSize));
- length = i::StrLength(string);
- }
- return AddSymbol(i::Vector<const byte>(reinterpret_cast<const byte*>(string),
- length), true, value);
-}
-
-
-bool DuplicateFinder::IsNumberCanonical(i::Vector<const char> number) {
- // Test for a safe approximation of number literals that are already
- // in canonical form: max 15 digits, no leading zeroes, except an
- // integer part that is a single zero, and no trailing zeros below
- // the decimal point.
- int pos = 0;
- int length = number.length();
- if (number.length() > 15) return false;
- if (number[pos] == '0') {
- pos++;
+void PreParser::ObjectLiteralChecker::CheckProperty(Token::Value property,
+ PropertyKind type,
+ bool* ok) {
+ int old;
+ if (property == Token::NUMBER) {
+ old = finder_.AddNumber(scanner()->literal_ascii_string(), type);
+ } else if (scanner()->is_literal_ascii()) {
+ old = finder_.AddAsciiSymbol(scanner()->literal_ascii_string(), type);
} else {
- while (pos < length &&
- static_cast<unsigned>(number[pos] - '0') <= ('9' - '0')) pos++;
- }
- if (length == pos) return true;
- if (number[pos] != '.') return false;
- pos++;
- bool invalid_last_digit = true;
- while (pos < length) {
- byte digit = number[pos] - '0';
- if (digit > '9' - '0') return false;
- invalid_last_digit = (digit == 0);
- pos++;
+ old = finder_.AddUtf16Symbol(scanner()->literal_utf16_string(), type);
}
- return !invalid_last_digit;
-}
-
-
-uint32_t DuplicateFinder::Hash(i::Vector<const byte> key, bool is_ascii) {
- // Primitive hash function, almost identical to the one used
- // for strings (except that it's seeded by the length and ASCII-ness).
- int length = key.length();
- uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ;
- for (int i = 0; i < length; i++) {
- uint32_t c = key[i];
- hash = (hash + c) * 1025;
- hash ^= (hash >> 6);
- }
- return hash;
-}
-
-
-bool DuplicateFinder::Match(void* first, void* second) {
- // Decode lengths.
- // Length + ASCII-bit is encoded as base 128, most significant heptet first,
- // with a 8th bit being non-zero while there are more heptets.
- // The value encodes the number of bytes following, and whether the original
- // was ASCII.
- byte* s1 = reinterpret_cast<byte*>(first);
- byte* s2 = reinterpret_cast<byte*>(second);
- uint32_t length_ascii_field = 0;
- byte c1;
- do {
- c1 = *s1;
- if (c1 != *s2) return false;
- length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f);
- s1++;
- s2++;
- } while ((c1 & 0x80) != 0);
- int length = static_cast<int>(length_ascii_field >> 1);
- return memcmp(s1, s2, length) == 0;
-}
-
-
-byte* DuplicateFinder::BackupKey(i::Vector<const byte> bytes,
- bool is_ascii) {
- uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0);
- backing_store_.StartSequence();
- // Emit ascii_length as base-128 encoded number, with the 7th bit set
- // on the byte of every heptet except the last, least significant, one.
- if (ascii_length >= (1 << 7)) {
- if (ascii_length >= (1 << 14)) {
- if (ascii_length >= (1 << 21)) {
- if (ascii_length >= (1 << 28)) {
- backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80));
- }
- backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u));
- }
- backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u));
+ PropertyKind old_type = static_cast<PropertyKind>(old);
+ if (HasConflict(old_type, type)) {
+ if (IsDataDataConflict(old_type, type)) {
+ // Both are data properties.
+ if (language_mode_ == CLASSIC_MODE) return;
+ parser()->ReportMessageAt(scanner()->location(),
+ "strict_duplicate_property");
+ } else if (IsDataAccessorConflict(old_type, type)) {
+ // Both a data and an accessor property with the same name.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_data_property");
+ } else {
+ ASSERT(IsAccessorAccessorConflict(old_type, type));
+ // Both accessors of the same type.
+ parser()->ReportMessageAt(scanner()->location(),
+ "accessor_get_set");
}
- backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u));
+ *ok = false;
}
- backing_store_.Add(static_cast<byte>(ascii_length & 0x7f));
-
- backing_store_.AddBlock(bytes);
- return backing_store_.EndSequence().start();
}
-} } // v8::preparser
+
+} } // v8::internal
diff --git a/chromium/v8/src/preparser.h b/chromium/v8/src/preparser.h
index 9358d6bd189..e99b4b0a181 100644
--- a/chromium/v8/src/preparser.h
+++ b/chromium/v8/src/preparser.h
@@ -33,14 +33,178 @@
#include "scanner.h"
namespace v8 {
-
namespace internal {
-class UnicodeCache;
-}
-namespace preparser {
+// Common base class shared between parser and pre-parser.
+class ParserBase {
+ public:
+ ParserBase(Scanner* scanner, uintptr_t stack_limit)
+ : scanner_(scanner),
+ stack_limit_(stack_limit),
+ stack_overflow_(false),
+ allow_lazy_(false),
+ allow_natives_syntax_(false),
+ allow_generators_(false),
+ allow_for_of_(false) { }
+ // TODO(mstarzinger): Only virtual until message reporting has been unified.
+ virtual ~ParserBase() { }
+
+ // Getters that indicate whether certain syntactical constructs are
+ // allowed to be parsed by this instance of the parser.
+ bool allow_lazy() const { return allow_lazy_; }
+ bool allow_natives_syntax() const { return allow_natives_syntax_; }
+ bool allow_generators() const { return allow_generators_; }
+ bool allow_for_of() const { return allow_for_of_; }
+ bool allow_modules() const { return scanner()->HarmonyModules(); }
+ bool allow_harmony_scoping() const { return scanner()->HarmonyScoping(); }
+ bool allow_harmony_numeric_literals() const {
+ return scanner()->HarmonyNumericLiterals();
+ }
+
+ // Setters that determine whether certain syntactical constructs are
+ // allowed to be parsed by this instance of the parser.
+ void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
+ void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
+ void set_allow_generators(bool allow) { allow_generators_ = allow; }
+ void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
+ void set_allow_modules(bool allow) { scanner()->SetHarmonyModules(allow); }
+ void set_allow_harmony_scoping(bool allow) {
+ scanner()->SetHarmonyScoping(allow);
+ }
+ void set_allow_harmony_numeric_literals(bool allow) {
+ scanner()->SetHarmonyNumericLiterals(allow);
+ }
+
+ protected:
+ Scanner* scanner() const { return scanner_; }
+ int position() { return scanner_->location().beg_pos; }
+ int peek_position() { return scanner_->peek_location().beg_pos; }
+ bool stack_overflow() const { return stack_overflow_; }
+ void set_stack_overflow() { stack_overflow_ = true; }
+
+ INLINE(Token::Value peek()) {
+ if (stack_overflow_) return Token::ILLEGAL;
+ return scanner()->peek();
+ }
+
+ INLINE(Token::Value Next()) {
+ if (stack_overflow_) return Token::ILLEGAL;
+ {
+ int marker;
+ if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) {
+ // Any further calls to Next or peek will return the illegal token.
+ // The current call must return the next token, which might already
+ // have been peek'ed.
+ stack_overflow_ = true;
+ }
+ }
+ return scanner()->Next();
+ }
+
+ void Consume(Token::Value token) {
+ Token::Value next = Next();
+ USE(next);
+ USE(token);
+ ASSERT(next == token);
+ }
+
+ bool Check(Token::Value token) {
+ Token::Value next = peek();
+ if (next == token) {
+ Consume(next);
+ return true;
+ }
+ return false;
+ }
+
+ void Expect(Token::Value token, bool* ok) {
+ Token::Value next = Next();
+ if (next != token) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ }
+ }
+
+ bool peek_any_identifier();
+ void ExpectSemicolon(bool* ok);
+ bool CheckContextualKeyword(Vector<const char> keyword);
+ void ExpectContextualKeyword(Vector<const char> keyword, bool* ok);
+
+ // Strict mode octal literal validation.
+ void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
+
+ // Determine precedence of given token.
+ static int Precedence(Token::Value token, bool accept_IN);
+
+ // Report syntax errors.
+ virtual void ReportUnexpectedToken(Token::Value token) = 0;
+ virtual void ReportMessageAt(Scanner::Location loc, const char* type) = 0;
+
+ // Used to detect duplicates in object literals. Each of the values
+ // kGetterProperty, kSetterProperty and kValueProperty represents
+ // a type of object literal property. When parsing a property, its
+ // type value is stored in the DuplicateFinder for the property name.
+ // Values are chosen so that having intersection bits means the there is
+ // an incompatibility.
+ // I.e., you can add a getter to a property that already has a setter, since
+ // kGetterProperty and kSetterProperty doesn't intersect, but not if it
+ // already has a getter or a value. Adding the getter to an existing
+ // setter will store the value (kGetterProperty | kSetterProperty), which
+ // is incompatible with adding any further properties.
+ enum PropertyKind {
+ kNone = 0,
+ // Bit patterns representing different object literal property types.
+ kGetterProperty = 1,
+ kSetterProperty = 2,
+ kValueProperty = 7,
+ // Helper constants.
+ kValueFlag = 4
+ };
+
+ // Validation per ECMA 262 - 11.1.5 "Object Initialiser".
+ class ObjectLiteralChecker {
+ public:
+ ObjectLiteralChecker(ParserBase* parser, LanguageMode mode)
+ : parser_(parser),
+ finder_(scanner()->unicode_cache()),
+ language_mode_(mode) { }
+
+ void CheckProperty(Token::Value property, PropertyKind type, bool* ok);
+
+ private:
+ ParserBase* parser() const { return parser_; }
+ Scanner* scanner() const { return parser_->scanner(); }
+
+ // Checks the type of conflict based on values coming from PropertyType.
+ bool HasConflict(PropertyKind type1, PropertyKind type2) {
+ return (type1 & type2) != 0;
+ }
+ bool IsDataDataConflict(PropertyKind type1, PropertyKind type2) {
+ return ((type1 & type2) & kValueFlag) != 0;
+ }
+ bool IsDataAccessorConflict(PropertyKind type1, PropertyKind type2) {
+ return ((type1 ^ type2) & kValueFlag) != 0;
+ }
+ bool IsAccessorAccessorConflict(PropertyKind type1, PropertyKind type2) {
+ return ((type1 | type2) & kValueFlag) == 0;
+ }
+
+ ParserBase* parser_;
+ DuplicateFinder finder_;
+ LanguageMode language_mode_;
+ };
+
+ private:
+ Scanner* scanner_;
+ uintptr_t stack_limit_;
+ bool stack_overflow_;
+
+ bool allow_lazy_;
+ bool allow_natives_syntax_;
+ bool allow_generators_;
+ bool allow_for_of_;
+};
-typedef uint8_t byte;
// Preparsing checks a JavaScript program and emits preparse-data that helps
// a later parsing to be faster.
@@ -54,104 +218,25 @@ typedef uint8_t byte;
// rather it is to speed up properly written and correct programs.
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-
-namespace i = v8::internal;
-
-class DuplicateFinder {
- public:
- explicit DuplicateFinder(i::UnicodeCache* constants)
- : unicode_constants_(constants),
- backing_store_(16),
- map_(&Match) { }
-
- int AddAsciiSymbol(i::Vector<const char> key, int value);
- int AddUtf16Symbol(i::Vector<const uint16_t> key, int value);
- // Add a a number literal by converting it (if necessary)
- // to the string that ToString(ToNumber(literal)) would generate.
- // and then adding that string with AddAsciiSymbol.
- // This string is the actual value used as key in an object literal,
- // and the one that must be different from the other keys.
- int AddNumber(i::Vector<const char> key, int value);
-
- private:
- int AddSymbol(i::Vector<const byte> key, bool is_ascii, int value);
- // Backs up the key and its length in the backing store.
- // The backup is stored with a base 127 encoding of the
- // length (plus a bit saying whether the string is ASCII),
- // followed by the bytes of the key.
- byte* BackupKey(i::Vector<const byte> key, bool is_ascii);
-
- // Compare two encoded keys (both pointing into the backing store)
- // for having the same base-127 encoded lengths and ASCII-ness,
- // and then having the same 'length' bytes following.
- static bool Match(void* first, void* second);
- // Creates a hash from a sequence of bytes.
- static uint32_t Hash(i::Vector<const byte> key, bool is_ascii);
- // Checks whether a string containing a JS number is its canonical
- // form.
- static bool IsNumberCanonical(i::Vector<const char> key);
-
- // Size of buffer. Sufficient for using it to call DoubleToCString in
- // from conversions.h.
- static const int kBufferSize = 100;
-
- i::UnicodeCache* unicode_constants_;
- // Backing store used to store strings used as hashmap keys.
- i::SequenceCollector<unsigned char> backing_store_;
- i::HashMap map_;
- // Buffer used for string->number->canonical string conversions.
- char number_buffer_[kBufferSize];
-};
-
-
-class PreParser {
+class PreParser : public ParserBase {
public:
enum PreParseResult {
kPreParseStackOverflow,
kPreParseSuccess
};
-
- PreParser(i::Scanner* scanner,
- i::ParserRecorder* log,
+ PreParser(Scanner* scanner,
+ ParserRecorder* log,
uintptr_t stack_limit)
- : scanner_(scanner),
+ : ParserBase(scanner, stack_limit),
log_(log),
scope_(NULL),
- stack_limit_(stack_limit),
- strict_mode_violation_location_(i::Scanner::Location::invalid()),
+ strict_mode_violation_location_(Scanner::Location::invalid()),
strict_mode_violation_type_(NULL),
- stack_overflow_(false),
- allow_lazy_(false),
- allow_natives_syntax_(false),
- allow_generators_(false),
- allow_for_of_(false),
parenthesized_function_(false) { }
~PreParser() {}
- bool allow_natives_syntax() const { return allow_natives_syntax_; }
- bool allow_lazy() const { return allow_lazy_; }
- bool allow_modules() const { return scanner_->HarmonyModules(); }
- bool allow_harmony_scoping() const { return scanner_->HarmonyScoping(); }
- bool allow_generators() const { return allow_generators_; }
- bool allow_for_of() const { return allow_for_of_; }
- bool allow_harmony_numeric_literals() const {
- return scanner_->HarmonyNumericLiterals();
- }
-
- void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
- void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
- void set_allow_modules(bool allow) { scanner_->SetHarmonyModules(allow); }
- void set_allow_harmony_scoping(bool allow) {
- scanner_->SetHarmonyScoping(allow);
- }
- void set_allow_generators(bool allow) { allow_generators_ = allow; }
- void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
- void set_allow_harmony_numeric_literals(bool allow) {
- scanner_->SetHarmonyNumericLiterals(allow);
- }
-
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
@@ -159,13 +244,13 @@ class PreParser {
PreParseResult PreParseProgram() {
Scope top_scope(&scope_, kTopLevelScope);
bool ok = true;
- int start_position = scanner_->peek_location().beg_pos;
- ParseSourceElements(i::Token::EOS, &ok);
- if (stack_overflow_) return kPreParseStackOverflow;
+ int start_position = scanner()->peek_location().beg_pos;
+ ParseSourceElements(Token::EOS, &ok);
+ if (stack_overflow()) return kPreParseStackOverflow;
if (!ok) {
- ReportUnexpectedToken(scanner_->current_token());
+ ReportUnexpectedToken(scanner()->current_token());
} else if (!scope_->is_classic_mode()) {
- CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
+ CheckOctalLiteral(start_position, scanner()->location().end_pos, &ok);
}
return kPreParseSuccess;
}
@@ -178,50 +263,11 @@ class PreParser {
// keyword and parameters, and have consumed the initial '{'.
// At return, unless an error occurred, the scanner is positioned before the
// the final '}'.
- PreParseResult PreParseLazyFunction(i::LanguageMode mode,
+ PreParseResult PreParseLazyFunction(LanguageMode mode,
bool is_generator,
- i::ParserRecorder* log);
+ ParserRecorder* log);
private:
- // Used to detect duplicates in object literals. Each of the values
- // kGetterProperty, kSetterProperty and kValueProperty represents
- // a type of object literal property. When parsing a property, its
- // type value is stored in the DuplicateFinder for the property name.
- // Values are chosen so that having intersection bits means the there is
- // an incompatibility.
- // I.e., you can add a getter to a property that already has a setter, since
- // kGetterProperty and kSetterProperty doesn't intersect, but not if it
- // already has a getter or a value. Adding the getter to an existing
- // setter will store the value (kGetterProperty | kSetterProperty), which
- // is incompatible with adding any further properties.
- enum PropertyType {
- kNone = 0,
- // Bit patterns representing different object literal property types.
- kGetterProperty = 1,
- kSetterProperty = 2,
- kValueProperty = 7,
- // Helper constants.
- kValueFlag = 4
- };
-
- // Checks the type of conflict based on values coming from PropertyType.
- bool HasConflict(int type1, int type2) { return (type1 & type2) != 0; }
- bool IsDataDataConflict(int type1, int type2) {
- return ((type1 & type2) & kValueFlag) != 0;
- }
- bool IsDataAccessorConflict(int type1, int type2) {
- return ((type1 ^ type2) & kValueFlag) != 0;
- }
- bool IsAccessorAccessorConflict(int type1, int type2) {
- return ((type1 | type2) & kValueFlag) == 0;
- }
-
-
- void CheckDuplicate(DuplicateFinder* finder,
- i::Token::Value property,
- int type,
- bool* ok);
-
// These types form an algebra over syntactic categories that is just
// rich enough to let us recognize and propagate the constructs that
// are either being counted in the preparser data, or is important
@@ -441,7 +487,7 @@ class PreParser {
}
bool IsStringLiteral() {
- return code_ != kUnknownStatement;
+ return code_ == kStringLiteralExpressionStatement;
}
bool IsUseStrictLiteral() {
@@ -480,7 +526,7 @@ class PreParser {
expected_properties_(0),
with_nesting_count_(0),
language_mode_(
- (prev_ != NULL) ? prev_->language_mode() : i::CLASSIC_MODE),
+ (prev_ != NULL) ? prev_->language_mode() : CLASSIC_MODE),
is_generator_(false) {
*variable = this;
}
@@ -494,12 +540,12 @@ class PreParser {
bool is_generator() { return is_generator_; }
void set_is_generator(bool is_generator) { is_generator_ = is_generator; }
bool is_classic_mode() {
- return language_mode_ == i::CLASSIC_MODE;
+ return language_mode_ == CLASSIC_MODE;
}
- i::LanguageMode language_mode() {
+ LanguageMode language_mode() {
return language_mode_;
}
- void set_language_mode(i::LanguageMode language_mode) {
+ void set_language_mode(LanguageMode language_mode) {
language_mode_ = language_mode;
}
@@ -523,13 +569,16 @@ class PreParser {
int materialized_literal_count_;
int expected_properties_;
int with_nesting_count_;
- i::LanguageMode language_mode_;
+ LanguageMode language_mode_;
bool is_generator_;
};
// Report syntax error
- void ReportUnexpectedToken(i::Token::Value token);
- void ReportMessageAt(i::Scanner::Location location,
+ void ReportUnexpectedToken(Token::Value token);
+ void ReportMessageAt(Scanner::Location location, const char* type) {
+ ReportMessageAt(location, type, NULL);
+ }
+ void ReportMessageAt(Scanner::Location location,
const char* type,
const char* name_opt) {
log_->LogMessage(location.beg_pos, location.end_pos, type, name_opt);
@@ -541,8 +590,6 @@ class PreParser {
log_->LogMessage(start_pos, end_pos, type, name_opt);
}
- void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
-
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
@@ -606,87 +653,40 @@ class PreParser {
// Log the currently parsed string literal.
Expression GetStringSymbol();
- i::Token::Value peek() {
- if (stack_overflow_) return i::Token::ILLEGAL;
- return scanner_->peek();
- }
-
- i::Token::Value Next() {
- if (stack_overflow_) return i::Token::ILLEGAL;
- {
- int marker;
- if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) {
- // Further calls to peek/Next will return illegal token.
- // The current one will still be returned. It might already
- // have been seen using peek.
- stack_overflow_ = true;
- }
- }
- return scanner_->Next();
- }
-
- bool peek_any_identifier();
-
- void set_language_mode(i::LanguageMode language_mode) {
+ void set_language_mode(LanguageMode language_mode) {
scope_->set_language_mode(language_mode);
}
bool is_classic_mode() {
- return scope_->language_mode() == i::CLASSIC_MODE;
+ return scope_->language_mode() == CLASSIC_MODE;
}
bool is_extended_mode() {
- return scope_->language_mode() == i::EXTENDED_MODE;
+ return scope_->language_mode() == EXTENDED_MODE;
}
- i::LanguageMode language_mode() { return scope_->language_mode(); }
-
- void Consume(i::Token::Value token) { Next(); }
-
- void Expect(i::Token::Value token, bool* ok) {
- if (Next() != token) {
- *ok = false;
- }
- }
-
- bool Check(i::Token::Value token) {
- i::Token::Value next = peek();
- if (next == token) {
- Consume(next);
- return true;
- }
- return false;
- }
- void ExpectSemicolon(bool* ok);
+ LanguageMode language_mode() { return scope_->language_mode(); }
bool CheckInOrOf(bool accept_OF);
- static int Precedence(i::Token::Value tok, bool accept_IN);
-
- void SetStrictModeViolation(i::Scanner::Location,
+ void SetStrictModeViolation(Scanner::Location,
const char* type,
bool* ok);
void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
- void StrictModeIdentifierViolation(i::Scanner::Location,
+ void StrictModeIdentifierViolation(Scanner::Location,
const char* eval_args_type,
Identifier identifier,
bool* ok);
- i::Scanner* scanner_;
- i::ParserRecorder* log_;
+ ParserRecorder* log_;
Scope* scope_;
- uintptr_t stack_limit_;
- i::Scanner::Location strict_mode_violation_location_;
+ Scanner::Location strict_mode_violation_location_;
const char* strict_mode_violation_type_;
- bool stack_overflow_;
- bool allow_lazy_;
- bool allow_natives_syntax_;
- bool allow_generators_;
- bool allow_for_of_;
bool parenthesized_function_;
};
-} } // v8::preparser
+
+} } // v8::internal
#endif // V8_PREPARSER_H
diff --git a/chromium/v8/src/prettyprinter.cc b/chromium/v8/src/prettyprinter.cc
index b1bac4cd4a7..4b441b9ae41 100644
--- a/chromium/v8/src/prettyprinter.cc
+++ b/chromium/v8/src/prettyprinter.cc
@@ -200,11 +200,25 @@ void PrettyPrinter::VisitSwitchStatement(SwitchStatement* node) {
Print(") { ");
ZoneList<CaseClause*>* cases = node->cases();
for (int i = 0; i < cases->length(); i++)
- PrintCaseClause(cases->at(i));
+ Visit(cases->at(i));
Print("}");
}
+void PrettyPrinter::VisitCaseClause(CaseClause* clause) {
+ if (clause->is_default()) {
+ Print("default");
+ } else {
+ Print("case ");
+ Visit(clause->label());
+ }
+ Print(": ");
+ PrintStatements(clause->statements());
+ if (clause->statements()->length() > 0)
+ Print(" ");
+}
+
+
void PrettyPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
PrintLabels(node->labels());
Print("do ");
@@ -297,10 +311,9 @@ void PrettyPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
}
-void PrettyPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
+void PrettyPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
Print("(");
- PrintLiteral(node->shared_function_info(), true);
+ PrintLiteral(node->name(), false);
Print(")");
}
@@ -621,20 +634,6 @@ void PrettyPrinter::PrintFunctionLiteral(FunctionLiteral* function) {
}
-void PrettyPrinter::PrintCaseClause(CaseClause* clause) {
- if (clause->is_default()) {
- Print("default");
- } else {
- Print("case ");
- Visit(clause->label());
- }
- Print(": ");
- PrintStatements(clause->statements());
- if (clause->statements()->length() > 0)
- Print(" ");
-}
-
-
//-----------------------------------------------------------------------------
class IndentedScope BASE_EMBEDDED {
@@ -762,18 +761,6 @@ void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
}
-void AstPrinter::PrintCaseClause(CaseClause* clause) {
- if (clause->is_default()) {
- IndentedScope indent(this, "DEFAULT");
- PrintStatements(clause->statements());
- } else {
- IndentedScope indent(this, "CASE");
- Visit(clause->label());
- PrintStatements(clause->statements());
- }
-}
-
-
void AstPrinter::VisitBlock(Block* node) {
const char* block_txt = node->is_initializer_block() ? "BLOCK INIT" : "BLOCK";
IndentedScope indent(this, block_txt);
@@ -901,7 +888,19 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
PrintLabelsIndented(node->labels());
PrintIndentedVisit("TAG", node->tag());
for (int i = 0; i < node->cases()->length(); i++) {
- PrintCaseClause(node->cases()->at(i));
+ Visit(node->cases()->at(i));
+ }
+}
+
+
+void AstPrinter::VisitCaseClause(CaseClause* clause) {
+ if (clause->is_default()) {
+ IndentedScope indent(this, "DEFAULT");
+ PrintStatements(clause->statements());
+ } else {
+ IndentedScope indent(this, "CASE");
+ Visit(clause->label());
+ PrintStatements(clause->statements());
}
}
@@ -982,10 +981,9 @@ void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
}
-void AstPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- IndentedScope indent(this, "FUNC LITERAL");
- PrintLiteralIndented("SHARED INFO", node->shared_function_info(), true);
+void AstPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
+ IndentedScope indent(this, "NATIVE FUNC LITERAL");
+ PrintLiteralIndented("NAME", node->name(), false);
}
diff --git a/chromium/v8/src/profile-generator-inl.h b/chromium/v8/src/profile-generator-inl.h
index f2feb73fc91..e363f67761b 100644
--- a/chromium/v8/src/profile-generator-inl.h
+++ b/chromium/v8/src/profile-generator-inl.h
@@ -33,27 +33,19 @@
namespace v8 {
namespace internal {
-const char* StringsStorage::GetFunctionName(Name* name) {
- return GetFunctionName(GetName(name));
-}
-
-
-const char* StringsStorage::GetFunctionName(const char* name) {
- return strlen(name) > 0 ? name : ProfileGenerator::kAnonymousFunctionName;
-}
-
-
CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
const char* name,
const char* name_prefix,
const char* resource_name,
- int line_number)
+ int line_number,
+ int column_number)
: tag_(tag),
builtin_id_(Builtins::builtin_count),
name_prefix_(name_prefix),
name_(name),
resource_name_(resource_name),
line_number_(line_number),
+ column_number_(column_number),
shared_id_(0),
script_id_(v8::Script::kNoScriptId),
no_frame_ranges_(NULL),
@@ -77,25 +69,6 @@ ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
children_(CodeEntriesMatch),
id_(tree->next_node_id()) { }
-
-CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
- switch (tag) {
- case GC:
- return gc_entry_;
- case JS:
- case COMPILER:
- // DOM events handlers are reported as OTHER / EXTERNAL entries.
- // To avoid confusing people, let's put all these entries into
- // one bucket.
- case OTHER:
- case EXTERNAL:
- return program_entry_;
- case IDLE:
- return idle_entry_;
- default: return NULL;
- }
-}
-
} } // namespace v8::internal
#endif // V8_PROFILE_GENERATOR_INL_H_
diff --git a/chromium/v8/src/profile-generator.cc b/chromium/v8/src/profile-generator.cc
index 38c1f785d9c..acf54da1c7b 100644
--- a/chromium/v8/src/profile-generator.cc
+++ b/chromium/v8/src/profile-generator.cc
@@ -41,6 +41,12 @@ namespace v8 {
namespace internal {
+bool StringsStorage::StringsMatch(void* key1, void* key2) {
+ return strcmp(reinterpret_cast<char*>(key1),
+ reinterpret_cast<char*>(key2)) == 0;
+}
+
+
StringsStorage::StringsStorage(Heap* heap)
: hash_seed_(heap->HashSeed()), names_(StringsMatch) {
}
@@ -57,12 +63,15 @@ StringsStorage::~StringsStorage() {
const char* StringsStorage::GetCopy(const char* src) {
int len = static_cast<int>(strlen(src));
- Vector<char> dst = Vector<char>::New(len + 1);
- OS::StrNCpy(dst, src, len);
- dst[len] = '\0';
- uint32_t hash =
- StringHasher::HashSequentialString(dst.start(), len, hash_seed_);
- return AddOrDisposeString(dst.start(), hash);
+ HashMap::Entry* entry = GetEntry(src, len);
+ if (entry->value == NULL) {
+ Vector<char> dst = Vector<char>::New(len + 1);
+ OS::StrNCpy(dst, src, len);
+ dst[len] = '\0';
+ entry->key = dst.start();
+ entry->value = entry->key;
+ }
+ return reinterpret_cast<const char*>(entry->value);
}
@@ -75,15 +84,16 @@ const char* StringsStorage::GetFormatted(const char* format, ...) {
}
-const char* StringsStorage::AddOrDisposeString(char* str, uint32_t hash) {
- HashMap::Entry* cache_entry = names_.Lookup(str, hash, true);
- if (cache_entry->value == NULL) {
+const char* StringsStorage::AddOrDisposeString(char* str, int len) {
+ HashMap::Entry* entry = GetEntry(str, len);
+ if (entry->value == NULL) {
// New entry added.
- cache_entry->value = str;
+ entry->key = str;
+ entry->value = str;
} else {
DeleteArray(str);
}
- return reinterpret_cast<const char*>(cache_entry->value);
+ return reinterpret_cast<const char*>(entry->value);
}
@@ -92,11 +102,9 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
int len = OS::VSNPrintF(str, format, args);
if (len == -1) {
DeleteArray(str.start());
- return format;
+ return GetCopy(format);
}
- uint32_t hash = StringHasher::HashSequentialString(
- str.start(), len, hash_seed_);
- return AddOrDisposeString(str.start(), hash);
+ return AddOrDisposeString(str.start(), len);
}
@@ -104,11 +112,11 @@ const char* StringsStorage::GetName(Name* name) {
if (name->IsString()) {
String* str = String::cast(name);
int length = Min(kMaxNameSize, str->length());
+ int actual_length = 0;
SmartArrayPointer<char> data =
- str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
- uint32_t hash = StringHasher::HashSequentialString(
- *data, length, name->GetHeap()->HashSeed());
- return AddOrDisposeString(data.Detach(), hash);
+ str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length,
+ &actual_length);
+ return AddOrDisposeString(data.Detach(), actual_length);
} else if (name->IsSymbol()) {
return "<symbol>";
}
@@ -121,6 +129,21 @@ const char* StringsStorage::GetName(int index) {
}
+const char* StringsStorage::GetFunctionName(Name* name) {
+ return BeautifyFunctionName(GetName(name));
+}
+
+
+const char* StringsStorage::GetFunctionName(const char* name) {
+ return BeautifyFunctionName(GetCopy(name));
+}
+
+
+const char* StringsStorage::BeautifyFunctionName(const char* name) {
+ return (*name == 0) ? ProfileGenerator::kAnonymousFunctionName : name;
+}
+
+
size_t StringsStorage::GetUsedMemorySize() const {
size_t size = sizeof(*this);
size += sizeof(HashMap::Entry) * names_.capacity();
@@ -131,6 +154,12 @@ size_t StringsStorage::GetUsedMemorySize() const {
}
+HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
+ uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
+ return names_.Lookup(const_cast<char*>(str), hash, true);
+}
+
+
const char* const CodeEntry::kEmptyNamePrefix = "";
const char* const CodeEntry::kEmptyResourceName = "";
const char* const CodeEntry::kEmptyBailoutReason = "";
@@ -141,15 +170,6 @@ CodeEntry::~CodeEntry() {
}
-void CodeEntry::CopyData(const CodeEntry& source) {
- tag_ = source.tag_;
- name_prefix_ = source.name_prefix_;
- name_ = source.name_;
- resource_name_ = source.resource_name_;
- line_number_ = source.line_number_;
-}
-
-
uint32_t CodeEntry::GetCallUid() const {
uint32_t hash = ComputeIntegerHash(tag_, v8::internal::kZeroHashSeed);
if (shared_id_ != 0) {
@@ -546,12 +566,14 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(
const char* name,
const char* name_prefix,
const char* resource_name,
- int line_number) {
+ int line_number,
+ int column_number) {
CodeEntry* code_entry = new CodeEntry(tag,
name,
name_prefix,
resource_name,
- line_number);
+ line_number,
+ column_number);
code_entries_.Add(code_entry);
return code_entry;
}
@@ -660,4 +682,22 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
}
+CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
+ switch (tag) {
+ case GC:
+ return gc_entry_;
+ case JS:
+ case COMPILER:
+ // DOM events handlers are reported as OTHER / EXTERNAL entries.
+ // To avoid confusing people, let's put all these entries into
+ // one bucket.
+ case OTHER:
+ case EXTERNAL:
+ return program_entry_;
+ case IDLE:
+ return idle_entry_;
+ default: return NULL;
+ }
+}
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/profile-generator.h b/chromium/v8/src/profile-generator.h
index 0a4502cc1b3..6e4758bece7 100644
--- a/chromium/v8/src/profile-generator.h
+++ b/chromium/v8/src/profile-generator.h
@@ -49,20 +49,18 @@ class StringsStorage {
const char* GetVFormatted(const char* format, va_list args);
const char* GetName(Name* name);
const char* GetName(int index);
- inline const char* GetFunctionName(Name* name);
- inline const char* GetFunctionName(const char* name);
+ const char* GetFunctionName(Name* name);
+ const char* GetFunctionName(const char* name);
size_t GetUsedMemorySize() const;
private:
static const int kMaxNameSize = 1024;
- INLINE(static bool StringsMatch(void* key1, void* key2)) {
- return strcmp(reinterpret_cast<char*>(key1),
- reinterpret_cast<char*>(key2)) == 0;
- }
- const char* AddOrDisposeString(char* str, uint32_t hash);
+ static bool StringsMatch(void* key1, void* key2);
+ const char* BeautifyFunctionName(const char* name);
+ const char* AddOrDisposeString(char* str, int len);
+ HashMap::Entry* GetEntry(const char* str, int len);
- // Mapping of strings by String::Hash to const char* strings.
uint32_t hash_seed_;
HashMap names_;
@@ -73,28 +71,30 @@ class StringsStorage {
class CodeEntry {
public:
// CodeEntry doesn't own name strings, just references them.
- INLINE(CodeEntry(Logger::LogEventsAndTags tag,
+ inline CodeEntry(Logger::LogEventsAndTags tag,
const char* name,
const char* name_prefix = CodeEntry::kEmptyNamePrefix,
const char* resource_name = CodeEntry::kEmptyResourceName,
- int line_number = v8::CpuProfileNode::kNoLineNumberInfo));
+ int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
+ int column_number = v8::CpuProfileNode::kNoColumnNumberInfo);
~CodeEntry();
- INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
- INLINE(const char* name_prefix() const) { return name_prefix_; }
- INLINE(bool has_name_prefix() const) { return name_prefix_[0] != '\0'; }
- INLINE(const char* name() const) { return name_; }
- INLINE(const char* resource_name() const) { return resource_name_; }
- INLINE(int line_number() const) { return line_number_; }
- INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
- INLINE(int script_id() const) { return script_id_; }
- INLINE(void set_script_id(int script_id)) { script_id_ = script_id; }
- INLINE(void set_bailout_reason(const char* bailout_reason)) {
+ bool is_js_function() const { return is_js_function_tag(tag_); }
+ const char* name_prefix() const { return name_prefix_; }
+ bool has_name_prefix() const { return name_prefix_[0] != '\0'; }
+ const char* name() const { return name_; }
+ const char* resource_name() const { return resource_name_; }
+ int line_number() const { return line_number_; }
+ int column_number() const { return column_number_; }
+ void set_shared_id(int shared_id) { shared_id_ = shared_id; }
+ int script_id() const { return script_id_; }
+ void set_script_id(int script_id) { script_id_ = script_id; }
+ void set_bailout_reason(const char* bailout_reason) {
bailout_reason_ = bailout_reason;
}
- INLINE(const char* bailout_reason() const) { return bailout_reason_; }
+ const char* bailout_reason() const { return bailout_reason_; }
- INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
+ static inline bool is_js_function_tag(Logger::LogEventsAndTags tag);
List<OffsetRange>* no_frame_ranges() const { return no_frame_ranges_; }
void set_no_frame_ranges(List<OffsetRange>* ranges) {
@@ -104,7 +104,6 @@ class CodeEntry {
void SetBuiltinId(Builtins::Name id);
Builtins::Name builtin_id() const { return builtin_id_; }
- void CopyData(const CodeEntry& source);
uint32_t GetCallUid() const;
bool IsSameAs(CodeEntry* entry) const;
@@ -119,6 +118,7 @@ class CodeEntry {
const char* name_;
const char* resource_name_;
int line_number_;
+ int column_number_;
int shared_id_;
int script_id_;
List<OffsetRange>* no_frame_ranges_;
@@ -132,27 +132,27 @@ class ProfileTree;
class ProfileNode {
public:
- INLINE(ProfileNode(ProfileTree* tree, CodeEntry* entry));
+ inline ProfileNode(ProfileTree* tree, CodeEntry* entry);
ProfileNode* FindChild(CodeEntry* entry);
ProfileNode* FindOrAddChild(CodeEntry* entry);
- INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
- INLINE(void IncreaseSelfTicks(unsigned amount)) { self_ticks_ += amount; }
+ void IncrementSelfTicks() { ++self_ticks_; }
+ void IncreaseSelfTicks(unsigned amount) { self_ticks_ += amount; }
- INLINE(CodeEntry* entry() const) { return entry_; }
- INLINE(unsigned self_ticks() const) { return self_ticks_; }
- INLINE(const List<ProfileNode*>* children() const) { return &children_list_; }
+ CodeEntry* entry() const { return entry_; }
+ unsigned self_ticks() const { return self_ticks_; }
+ const List<ProfileNode*>* children() const { return &children_list_; }
unsigned id() const { return id_; }
void Print(int indent);
private:
- INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) {
+ static bool CodeEntriesMatch(void* entry1, void* entry2) {
return reinterpret_cast<CodeEntry*>(entry1)->IsSameAs(
reinterpret_cast<CodeEntry*>(entry2));
}
- INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) {
+ static uint32_t CodeEntryHash(CodeEntry* entry) {
return entry->GetCallUid();
}
@@ -304,7 +304,8 @@ class CpuProfilesCollection {
const char* name,
const char* name_prefix = CodeEntry::kEmptyNamePrefix,
const char* resource_name = CodeEntry::kEmptyResourceName,
- int line_number = v8::CpuProfileNode::kNoLineNumberInfo);
+ int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
+ int column_number = v8::CpuProfileNode::kNoColumnNumberInfo);
// Called from profile generator thread.
void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
@@ -331,7 +332,7 @@ class ProfileGenerator {
void RecordTickSample(const TickSample& sample);
- INLINE(CodeMap* code_map()) { return &code_map_; }
+ CodeMap* code_map() { return &code_map_; }
static const char* const kAnonymousFunctionName;
static const char* const kProgramEntryName;
@@ -342,7 +343,7 @@ class ProfileGenerator {
static const char* const kUnresolvedFunctionName;
private:
- INLINE(CodeEntry* EntryForVMState(StateTag tag));
+ CodeEntry* EntryForVMState(StateTag tag);
CpuProfilesCollection* profiles_;
CodeMap code_map_;
diff --git a/chromium/v8/src/promise.js b/chromium/v8/src/promise.js
new file mode 100644
index 00000000000..30f4f07b4b7
--- /dev/null
+++ b/chromium/v8/src/promise.js
@@ -0,0 +1,305 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"use strict";
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Object = global.Object
+// var $WeakMap = global.WeakMap
+
+
+var $Promise = Promise;
+
+
+//-------------------------------------------------------------------
+
+// Core functionality.
+
+// Event queue format: [(value, [(handler, deferred)*])*]
+// I.e., a list of value/tasks pairs, where the value is a resolution value or
+// rejection reason, and the tasks are a respective list of handler/deferred
+// pairs waiting for notification of this value. Each handler is an onResolve or
+// onReject function provided to the same call of 'chain' that produced the
+// associated deferred.
+var promiseEvents = new InternalArray;
+
+// Status values: 0 = pending, +1 = resolved, -1 = rejected
+var promiseStatus = NEW_PRIVATE("Promise#status");
+var promiseValue = NEW_PRIVATE("Promise#value");
+var promiseOnResolve = NEW_PRIVATE("Promise#onResolve");
+var promiseOnReject = NEW_PRIVATE("Promise#onReject");
+var promiseRaw = NEW_PRIVATE("Promise#raw");
+
+function IsPromise(x) {
+ return IS_SPEC_OBJECT(x) && %HasLocalProperty(x, promiseStatus);
+}
+
+function Promise(resolver) {
+ if (resolver === promiseRaw) return;
+ var promise = PromiseInit(this);
+ resolver(function(x) { PromiseResolve(promise, x) },
+ function(r) { PromiseReject(promise, r) });
+ // TODO(rossberg): current draft makes exception from this call asynchronous,
+ // but that's probably a mistake.
+}
+
+function PromiseSet(promise, status, value, onResolve, onReject) {
+ SET_PRIVATE(promise, promiseStatus, status);
+ SET_PRIVATE(promise, promiseValue, value);
+ SET_PRIVATE(promise, promiseOnResolve, onResolve);
+ SET_PRIVATE(promise, promiseOnReject, onReject);
+ return promise;
+}
+
+function PromiseInit(promise) {
+ return PromiseSet(promise, 0, UNDEFINED, new InternalArray, new InternalArray)
+}
+
+function PromiseDone(promise, status, value, promiseQueue) {
+ if (GET_PRIVATE(promise, promiseStatus) !== 0) return;
+ PromiseEnqueue(value, GET_PRIVATE(promise, promiseQueue));
+ PromiseSet(promise, status, value);
+}
+
+function PromiseResolve(promise, x) {
+ PromiseDone(promise, +1, x, promiseOnResolve)
+}
+
+function PromiseReject(promise, r) {
+ PromiseDone(promise, -1, r, promiseOnReject)
+}
+
+
+// Convenience.
+
+function PromiseDeferred() {
+ if (this === $Promise) {
+ // Optimized case, avoid extra closure.
+ var promise = PromiseInit(new Promise(promiseRaw));
+ return {
+ promise: promise,
+ resolve: function(x) { PromiseResolve(promise, x) },
+ reject: function(r) { PromiseReject(promise, r) }
+ };
+ } else {
+ var result = {};
+ result.promise = new this(function(resolve, reject) {
+ result.resolve = resolve;
+ result.reject = reject;
+ })
+ return result;
+ }
+}
+
+function PromiseResolved(x) {
+ if (this === $Promise) {
+ // Optimized case, avoid extra closure.
+ return PromiseSet(new Promise(promiseRaw), +1, x);
+ } else {
+ return new this(function(resolve, reject) { resolve(x) });
+ }
+}
+
+function PromiseRejected(r) {
+ if (this === $Promise) {
+ // Optimized case, avoid extra closure.
+ return PromiseSet(new Promise(promiseRaw), -1, r);
+ } else {
+ return new this(function(resolve, reject) { reject(r) });
+ }
+}
+
+
+// Simple chaining.
+
+function PromiseIdResolveHandler(x) { return x }
+function PromiseIdRejectHandler(r) { throw r }
+
+function PromiseChain(onResolve, onReject) { // a.k.a. flatMap
+ onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
+ onReject = IS_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject;
+ var deferred = %_CallFunction(this.constructor, PromiseDeferred);
+ switch (GET_PRIVATE(this, promiseStatus)) {
+ case UNDEFINED:
+ throw MakeTypeError('not_a_promise', [this]);
+ case 0: // Pending
+ GET_PRIVATE(this, promiseOnResolve).push(onResolve, deferred);
+ GET_PRIVATE(this, promiseOnReject).push(onReject, deferred);
+ break;
+ case +1: // Resolved
+ PromiseEnqueue(GET_PRIVATE(this, promiseValue), [onResolve, deferred]);
+ break;
+ case -1: // Rejected
+ PromiseEnqueue(GET_PRIVATE(this, promiseValue), [onReject, deferred]);
+ break;
+ }
+ return deferred.promise;
+}
+
+function PromiseCatch(onReject) {
+ return this.chain(UNDEFINED, onReject);
+}
+
+function PromiseEnqueue(value, tasks) {
+ promiseEvents.push(value, tasks);
+ %SetMicrotaskPending(true);
+}
+
+function PromiseMicrotaskRunner() {
+ var events = promiseEvents;
+ if (events.length > 0) {
+ promiseEvents = new InternalArray;
+ for (var i = 0; i < events.length; i += 2) {
+ var value = events[i];
+ var tasks = events[i + 1];
+ for (var j = 0; j < tasks.length; j += 2) {
+ var handler = tasks[j];
+ var deferred = tasks[j + 1];
+ try {
+ var result = handler(value);
+ if (result === deferred.promise)
+ throw MakeTypeError('promise_cyclic', [result]);
+ else if (IsPromise(result))
+ result.chain(deferred.resolve, deferred.reject);
+ else
+ deferred.resolve(result);
+ } catch(e) {
+ // TODO(rossberg): perhaps log uncaught exceptions below.
+ try { deferred.reject(e) } catch(e) {}
+ }
+ }
+ }
+ }
+}
+RunMicrotasks.runners.push(PromiseMicrotaskRunner);
+
+
+// Multi-unwrapped chaining with thenable coercion.
+
+function PromiseThen(onResolve, onReject) {
+ onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
+ var that = this;
+ var constructor = this.constructor;
+ return this.chain(
+ function(x) {
+ x = PromiseCoerce(constructor, x);
+ return x === that ? onReject(MakeTypeError('promise_cyclic', [x])) :
+ IsPromise(x) ? x.then(onResolve, onReject) : onResolve(x);
+ },
+ onReject
+ );
+}
+
+PromiseCoerce.table = new $WeakMap;
+
+function PromiseCoerce(constructor, x) {
+ var then;
+ if (IsPromise(x)) {
+ return x;
+ } else if (!IS_NULL_OR_UNDEFINED(x) && %IsCallable(then = x.then)) {
+ if (PromiseCoerce.table.has(x)) {
+ return PromiseCoerce.table.get(x);
+ } else {
+ var deferred = constructor.deferred();
+ PromiseCoerce.table.set(x, deferred.promise);
+ try {
+ %_CallFunction(x, deferred.resolve, deferred.reject, then);
+ } catch(e) {
+ deferred.reject(e);
+ }
+ return deferred.promise;
+ }
+ } else {
+ return x;
+ }
+}
+
+
+// Combinators.
+
+function PromiseCast(x) {
+ // TODO(rossberg): cannot do better until we support @@create.
+ return IsPromise(x) ? x : this.resolved(x);
+}
+
+function PromiseAll(values) {
+ var deferred = this.deferred();
+ var resolutions = [];
+ var count = values.length;
+ if (count === 0) {
+ deferred.resolve(resolutions);
+ } else {
+ for (var i = 0; i < values.length; ++i) {
+ this.cast(values[i]).chain(
+ function(i, x) {
+ resolutions[i] = x;
+ if (--count === 0) deferred.resolve(resolutions);
+ }.bind(UNDEFINED, i), // TODO(rossberg): use let loop once available
+ function(r) {
+ if (count > 0) { count = 0; deferred.reject(r) }
+ }
+ );
+ }
+ }
+ return deferred.promise;
+}
+
+function PromiseOne(values) { // a.k.a. race
+ var deferred = this.deferred();
+ var done = false;
+ for (var i = 0; i < values.length; ++i) {
+ this.cast(values[i]).chain(
+ function(x) { if (!done) { done = true; deferred.resolve(x) } },
+ function(r) { if (!done) { done = true; deferred.reject(r) } }
+ );
+ }
+ return deferred.promise;
+}
+
+//-------------------------------------------------------------------
+
+function SetUpPromise() {
+ %CheckIsBootstrapping()
+ global.Promise = $Promise;
+ InstallFunctions($Promise, DONT_ENUM, [
+ "deferred", PromiseDeferred,
+ "resolved", PromiseResolved,
+ "rejected", PromiseRejected,
+ "all", PromiseAll,
+ "one", PromiseOne,
+ "cast", PromiseCast
+ ]);
+ InstallFunctions($Promise.prototype, DONT_ENUM, [
+ "chain", PromiseChain,
+ "then", PromiseThen,
+ "catch", PromiseCatch
+ ]);
+}
+
+SetUpPromise();
diff --git a/chromium/v8/src/property-details.h b/chromium/v8/src/property-details.h
index 7f44b79277c..200657f11f0 100644
--- a/chromium/v8/src/property-details.h
+++ b/chromium/v8/src/property-details.h
@@ -82,6 +82,10 @@ class Representation {
public:
enum Kind {
kNone,
+ kInteger8,
+ kUInteger8,
+ kInteger16,
+ kUInteger16,
kSmi,
kInteger32,
kDouble,
@@ -95,6 +99,12 @@ class Representation {
static Representation None() { return Representation(kNone); }
static Representation Tagged() { return Representation(kTagged); }
+ static Representation Integer8() { return Representation(kInteger8); }
+ static Representation UInteger8() { return Representation(kUInteger8); }
+ static Representation Integer16() { return Representation(kInteger16); }
+ static Representation UInteger16() {
+ return Representation(kUInteger16);
+ }
static Representation Smi() { return Representation(kSmi); }
static Representation Integer32() { return Representation(kInteger32); }
static Representation Double() { return Representation(kDouble); }
@@ -121,9 +131,15 @@ class Representation {
}
bool is_more_general_than(const Representation& other) const {
+ if (kind_ == kExternal && other.kind_ == kNone) return true;
+ if (kind_ == kExternal && other.kind_ == kExternal) return false;
+ if (kind_ == kNone && other.kind_ == kExternal) return false;
+
ASSERT(kind_ != kExternal);
ASSERT(other.kind_ != kExternal);
- if (IsHeapObject()) return other.IsDouble() || other.IsNone();
+ if (IsHeapObject()) return other.IsNone();
+ if (kind_ == kUInteger8 && other.kind_ == kInteger8) return false;
+ if (kind_ == kUInteger16 && other.kind_ == kInteger16) return false;
return kind_ > other.kind_;
}
@@ -137,8 +153,26 @@ class Representation {
return Representation::Tagged();
}
+ int size() const {
+ ASSERT(!IsNone());
+ if (IsInteger8() || IsUInteger8()) {
+ return sizeof(uint8_t);
+ }
+ if (IsInteger16() || IsUInteger16()) {
+ return sizeof(uint16_t);
+ }
+ if (IsInteger32()) {
+ return sizeof(uint32_t);
+ }
+ return kPointerSize;
+ }
+
Kind kind() const { return static_cast<Kind>(kind_); }
bool IsNone() const { return kind_ == kNone; }
+ bool IsInteger8() const { return kind_ == kInteger8; }
+ bool IsUInteger8() const { return kind_ == kUInteger8; }
+ bool IsInteger16() const { return kind_ == kInteger16; }
+ bool IsUInteger16() const { return kind_ == kUInteger16; }
bool IsTagged() const { return kind_ == kTagged; }
bool IsSmi() const { return kind_ == kSmi; }
bool IsSmiOrTagged() const { return IsSmi() || IsTagged(); }
@@ -148,7 +182,9 @@ class Representation {
bool IsHeapObject() const { return kind_ == kHeapObject; }
bool IsExternal() const { return kind_ == kExternal; }
bool IsSpecialization() const {
- return kind_ == kInteger32 || kind_ == kDouble || kind_ == kSmi;
+ return IsInteger8() || IsUInteger8() ||
+ IsInteger16() || IsUInteger16() ||
+ IsSmi() || IsInteger32() || IsDouble();
}
const char* Mnemonic() const;
@@ -162,6 +198,15 @@ class Representation {
};
+static const int kDescriptorIndexBitCount = 10;
+// The maximum number of descriptors we want in a descriptor array (should
+// fit in a page).
+static const int kMaxNumberOfDescriptors =
+ (1 << kDescriptorIndexBitCount) - 2;
+static const int kInvalidEnumCacheSentinel =
+ (1 << kDescriptorIndexBitCount) - 1;
+
+
// PropertyDetails captures type and attributes for a property.
// They are used both in property dictionaries and instance descriptors.
class PropertyDetails BASE_EMBEDDED {
@@ -252,9 +297,14 @@ class PropertyDetails BASE_EMBEDDED {
class DictionaryStorageField: public BitField<uint32_t, 7, 24> {};
// Bit fields for fast objects.
- class DescriptorPointer: public BitField<uint32_t, 6, 11> {};
- class RepresentationField: public BitField<uint32_t, 17, 3> {};
- class FieldIndexField: public BitField<uint32_t, 20, 11> {};
+ class RepresentationField: public BitField<uint32_t, 6, 4> {};
+ class DescriptorPointer: public BitField<uint32_t, 10,
+ kDescriptorIndexBitCount> {}; // NOLINT
+ class FieldIndexField: public BitField<uint32_t,
+ 10 + kDescriptorIndexBitCount,
+ kDescriptorIndexBitCount> {}; // NOLINT
+ // All bits for fast objects must fix in a smi.
+ STATIC_ASSERT(10 + kDescriptorIndexBitCount + kDescriptorIndexBitCount <= 31);
static const int kInitialIndex = 1;
diff --git a/chromium/v8/src/property.cc b/chromium/v8/src/property.cc
index 83a6a365b87..2f72eec48ec 100644
--- a/chromium/v8/src/property.cc
+++ b/chromium/v8/src/property.cc
@@ -35,6 +35,7 @@ void LookupResult::Iterate(ObjectVisitor* visitor) {
LookupResult* current = this; // Could be NULL.
while (current != NULL) {
visitor->VisitPointer(BitCast<Object**>(&current->holder_));
+ visitor->VisitPointer(BitCast<Object**>(&current->transition_));
current = current->next_;
}
}
@@ -82,13 +83,13 @@ void LookupResult::Print(FILE* out) {
case FIELD:
PrintF(out, " -type = map transition\n");
PrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
+ GetTransitionTarget()->Print(out);
PrintF(out, "\n");
return;
case CONSTANT:
PrintF(out, " -type = constant property transition\n");
PrintF(out, " -map:\n");
- GetTransitionMap()->Print(out);
+ GetTransitionTarget()->Print(out);
PrintF(out, "\n");
return;
case CALLBACKS:
diff --git a/chromium/v8/src/property.h b/chromium/v8/src/property.h
index 0f78ba478ec..da772dc86c3 100644
--- a/chromium/v8/src/property.h
+++ b/chromium/v8/src/property.h
@@ -184,6 +184,7 @@ class LookupResult BASE_EMBEDDED {
next_(isolate->top_lookup_result()),
lookup_type_(NOT_FOUND),
holder_(NULL),
+ transition_(NULL),
cacheable_(true),
details_(NONE, NONEXISTENT, Representation::None()) {
isolate->SetTopLookupResult(this);
@@ -201,6 +202,7 @@ class LookupResult BASE_EMBEDDED {
holder_ = holder;
details_ = details;
number_ = number;
+ transition_ = NULL;
}
bool CanHoldValue(Handle<Object> value) {
@@ -209,16 +211,18 @@ class LookupResult BASE_EMBEDDED {
return value->FitsRepresentation(details_.representation());
}
- void TransitionResult(JSObject* holder, int number) {
+ void TransitionResult(JSObject* holder, Map* target) {
lookup_type_ = TRANSITION_TYPE;
details_ = PropertyDetails(NONE, TRANSITION, Representation::None());
holder_ = holder;
- number_ = number;
+ transition_ = target;
+ number_ = 0xAAAA;
}
void DictionaryResult(JSObject* holder, int entry) {
lookup_type_ = DICTIONARY_TYPE;
holder_ = holder;
+ transition_ = NULL;
details_ = holder->property_dictionary()->DetailsAt(entry);
number_ = entry;
}
@@ -226,6 +230,7 @@ class LookupResult BASE_EMBEDDED {
void HandlerResult(JSProxy* proxy) {
lookup_type_ = HANDLER_TYPE;
holder_ = proxy;
+ transition_ = NULL;
details_ = PropertyDetails(NONE, HANDLER, Representation::Tagged());
cacheable_ = false;
}
@@ -233,6 +238,7 @@ class LookupResult BASE_EMBEDDED {
void InterceptorResult(JSObject* holder) {
lookup_type_ = INTERCEPTOR_TYPE;
holder_ = holder;
+ transition_ = NULL;
details_ = PropertyDetails(NONE, INTERCEPTOR, Representation::Tagged());
}
@@ -248,7 +254,7 @@ class LookupResult BASE_EMBEDDED {
}
JSProxy* proxy() {
- ASSERT(IsFound());
+ ASSERT(IsHandler());
return JSProxy::cast(holder_);
}
@@ -373,42 +379,20 @@ class LookupResult BASE_EMBEDDED {
return NULL;
}
- Map* GetTransitionTarget(Map* map) {
- ASSERT(IsTransition());
- TransitionArray* transitions = map->transitions();
- return transitions->GetTarget(number_);
- }
-
Map* GetTransitionTarget() {
- return GetTransitionTarget(holder()->map());
- }
-
- PropertyDetails GetTransitionDetails(Map* map) {
- ASSERT(IsTransition());
- TransitionArray* transitions = map->transitions();
- return transitions->GetTargetDetails(number_);
+ return transition_;
}
PropertyDetails GetTransitionDetails() {
- return GetTransitionDetails(holder()->map());
- }
-
- bool IsTransitionToField(Map* map) {
- return IsTransition() && GetTransitionDetails(map).type() == FIELD;
+ return transition_->GetLastDescriptorDetails();
}
- bool IsTransitionToConstant(Map* map) {
- return IsTransition() && GetTransitionDetails(map).type() == CONSTANT;
+ bool IsTransitionToField() {
+ return IsTransition() && GetTransitionDetails().type() == FIELD;
}
- Map* GetTransitionMap() {
- ASSERT(IsTransition());
- return Map::cast(GetValue());
- }
-
- Map* GetTransitionMapFromMap(Map* map) {
- ASSERT(IsTransition());
- return map->transitions()->GetTarget(number_);
+ bool IsTransitionToConstant() {
+ return IsTransition() && GetTransitionDetails().type() == CONSTANT;
}
int GetTransitionIndex() {
@@ -501,6 +485,7 @@ class LookupResult BASE_EMBEDDED {
} lookup_type_;
JSReceiver* holder_;
+ Map* transition_;
int number_;
bool cacheable_;
PropertyDetails details_;
diff --git a/chromium/v8/src/proxy.js b/chromium/v8/src/proxy.js
index de9be50ddca..4c03f215389 100644
--- a/chromium/v8/src/proxy.js
+++ b/chromium/v8/src/proxy.js
@@ -40,7 +40,7 @@ function ProxyCreate(handler, proto) {
throw MakeTypeError("handler_non_object", ["create"])
if (IS_UNDEFINED(proto))
proto = null
- else if (!(IS_SPEC_OBJECT(proto) || proto === null))
+ else if (!(IS_SPEC_OBJECT(proto) || IS_NULL(proto)))
throw MakeTypeError("proto_non_object", ["create"])
return %CreateJSProxy(handler, proto)
}
@@ -56,7 +56,7 @@ function ProxyCreateFunction(handler, callTrap, constructTrap) {
// Make sure the trap receives 'undefined' as this.
var construct = constructTrap
constructTrap = function() {
- return %Apply(construct, void 0, arguments, 0, %_ArgumentsLength());
+ return %Apply(construct, UNDEFINED, arguments, 0, %_ArgumentsLength());
}
} else {
throw MakeTypeError("trap_function_expected",
diff --git a/chromium/v8/src/regexp.js b/chromium/v8/src/regexp.js
index cb11ad107cf..22b08775b32 100644
--- a/chromium/v8/src/regexp.js
+++ b/chromium/v8/src/regexp.js
@@ -189,7 +189,7 @@ function RegExpExec(string) {
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
- if (matchIndices === null) {
+ if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
return null;
}
@@ -232,7 +232,7 @@ function RegExpTest(string) {
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
- if (matchIndices === null) {
+ if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
return false;
}
@@ -253,7 +253,7 @@ function RegExpTest(string) {
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
- if (matchIndices === null) {
+ if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
return false;
}
@@ -384,7 +384,7 @@ function RegExpMakeCaptureGetter(n) {
var lastMatchInfo = new InternalPackedArray(
2, // REGEXP_NUMBER_OF_CAPTURES
"", // Last subject.
- void 0, // Last input - settable with RegExpSetInput.
+ UNDEFINED, // Last input - settable with RegExpSetInput.
0, // REGEXP_FIRST_CAPTURE + 0
0 // REGEXP_FIRST_CAPTURE + 1
);
diff --git a/chromium/v8/src/rewriter.cc b/chromium/v8/src/rewriter.cc
index 06335a80c7b..ba35284d7f9 100644
--- a/chromium/v8/src/rewriter.cc
+++ b/chromium/v8/src/rewriter.cc
@@ -207,6 +207,11 @@ void Processor::VisitSwitchStatement(SwitchStatement* node) {
}
+void Processor::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+
void Processor::VisitContinueStatement(ContinueStatement* node) {
is_set_ = false;
}
@@ -258,7 +263,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
ZoneList<Statement*>* body = function->body();
if (!body->is_empty()) {
Variable* result = scope->NewTemporary(
- info->isolate()->factory()->result_string());
+ info->isolate()->factory()->dot_result_string());
Processor processor(result, info->zone());
processor.Process(body);
if (processor.HasStackOverflow()) return false;
@@ -271,13 +276,12 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
// eval('with ({x:1}) x = 1');
// the end position of the function generated for executing the eval code
// coincides with the end of the with scope which is the position of '1'.
- int position = function->end_position();
+ int pos = function->end_position();
VariableProxy* result_proxy = processor.factory()->NewVariableProxy(
- result->name(), false, result->interface(), position);
+ result->name(), false, result->interface(), pos);
result_proxy->BindTo(result);
Statement* result_statement =
- processor.factory()->NewReturnStatement(result_proxy);
- result_statement->set_statement_pos(position);
+ processor.factory()->NewReturnStatement(result_proxy, pos);
body->Add(result_statement, info->zone());
}
}
diff --git a/chromium/v8/src/runtime-profiler.cc b/chromium/v8/src/runtime-profiler.cc
index 95dcc4f983b..691fc666419 100644
--- a/chromium/v8/src/runtime-profiler.cc
+++ b/chromium/v8/src/runtime-profiler.cc
@@ -33,7 +33,6 @@
#include "bootstrapper.h"
#include "code-stubs.h"
#include "compilation-cache.h"
-#include "deoptimizer.h"
#include "execution.h"
#include "full-codegen.h"
#include "global-handles.h"
@@ -140,8 +139,9 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
}
- if (FLAG_concurrent_recompilation && !isolate_->bootstrapper()->IsActive()) {
- if (FLAG_concurrent_osr &&
+ if (isolate_->concurrent_recompilation_enabled() &&
+ !isolate_->bootstrapper()->IsActive()) {
+ if (isolate_->concurrent_osr_enabled() &&
isolate_->optimizing_compiler_thread()->IsQueuedForOSR(function)) {
// Do not attempt regular recompilation if we already queued this for OSR.
// TODO(yangguo): This is necessary so that we don't install optimized
@@ -185,7 +185,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
PrintF("]\n");
}
- Deoptimizer::PatchInterruptCode(isolate_, shared->code());
+ BackEdgeTable::Patch(isolate_, shared->code());
}
diff --git a/chromium/v8/src/runtime.cc b/chromium/v8/src/runtime.cc
index 3f7c0b98499..c909f34db17 100644
--- a/chromium/v8/src/runtime.cc
+++ b/chromium/v8/src/runtime.cc
@@ -31,6 +31,7 @@
#include "v8.h"
#include "accessors.h"
+#include "allocation-site-scopes.h"
#include "api.h"
#include "arguments.h"
#include "bootstrapper.h"
@@ -348,10 +349,8 @@ MaybeObject* TransitionElements(Handle<Object> object,
ElementsKind from_kind =
Handle<JSObject>::cast(object)->map()->elements_kind();
if (Map::IsValidElementsTransition(from_kind, to_kind)) {
- Handle<Object> result = JSObject::TransitionElementsKind(
- Handle<JSObject>::cast(object), to_kind);
- if (result.is_null()) return isolate->ThrowIllegalOperation();
- return *result;
+ JSObject::TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
+ return *object;
}
return isolate->ThrowIllegalOperation();
}
@@ -488,44 +487,39 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
// Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal);
- RETURN_IF_EMPTY_HANDLE(isolate, boilerplate);
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return JSObject::cast(*boilerplate)->DeepCopy(isolate);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
- CONVERT_SMI_ARG_CHECKED(flags, 3);
- bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
+ Handle<Object> literal_site(literals->get(literals_index), isolate);
+ Handle<AllocationSite> site;
+ Handle<JSObject> boilerplate;
+ if (*literal_site == isolate->heap()->undefined_value()) {
+ Handle<Object> raw_boilerplate = CreateObjectLiteralBoilerplate(
+ isolate,
+ literals,
+ constant_properties,
+ should_have_fast_elements,
+ has_function_literal);
+ RETURN_IF_EMPTY_HANDLE(isolate, raw_boilerplate);
+ boilerplate = Handle<JSObject>::cast(raw_boilerplate);
+
+ AllocationSiteCreationContext creation_context(isolate);
+ site = creation_context.EnterNewScope();
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::DeepWalk(boilerplate, &creation_context));
+ creation_context.ExitScope(site, boilerplate);
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal);
- RETURN_IF_EMPTY_HANDLE(isolate, boilerplate);
// Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
+ literals->set(literals_index, *site);
+ } else {
+ site = Handle<AllocationSite>::cast(literal_site);
+ boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()),
+ isolate);
}
- return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
+
+ AllocationSiteUsageContext usage_context(isolate, site, true);
+ usage_context.EnterNewScope();
+ Handle<Object> copy = JSObject::DeepCopy(boilerplate, &usage_context);
+ usage_context.ExitScope(site, boilerplate);
+ RETURN_IF_EMPTY_HANDLE(isolate, copy);
+ return *copy;
}
@@ -541,9 +535,16 @@ static Handle<AllocationSite> GetLiteralAllocationSite(
ASSERT(*elements != isolate->heap()->empty_fixed_array());
Handle<Object> boilerplate =
Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (boilerplate.is_null()) return site;
- site = isolate->factory()->NewAllocationSite();
- site->set_transition_info(*boilerplate);
+ if (boilerplate.is_null()) return Handle<AllocationSite>::null();
+
+ AllocationSiteCreationContext creation_context(isolate);
+ site = creation_context.EnterNewScope();
+ if (JSObject::DeepWalk(Handle<JSObject>::cast(boilerplate),
+ &creation_context).is_null()) {
+ return Handle<AllocationSite>::null();
+ }
+ creation_context.ExitScope(site, Handle<JSObject>::cast(boilerplate));
+
literals->set(literals_index, *site);
} else {
site = Handle<AllocationSite>::cast(literal_site);
@@ -553,47 +554,52 @@ static Handle<AllocationSite> GetLiteralAllocationSite(
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
-
+static MaybeObject* CreateArrayLiteralImpl(Isolate* isolate,
+ Handle<FixedArray> literals,
+ int literals_index,
+ Handle<FixedArray> elements,
+ int flags) {
Handle<AllocationSite> site = GetLiteralAllocationSite(isolate, literals,
literals_index, elements);
RETURN_IF_EMPTY_HANDLE(isolate, site);
- JSObject* boilerplate = JSObject::cast(site->transition_info());
- return boilerplate->DeepCopy(isolate);
+ bool enable_mementos = (flags & ArrayLiteral::kDisableMementos) == 0;
+ Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()));
+ AllocationSiteUsageContext usage_context(isolate, site, enable_mementos);
+ usage_context.EnterNewScope();
+ JSObject::DeepCopyHints hints = (flags & ArrayLiteral::kShallowElements) == 0
+ ? JSObject::kNoHints
+ : JSObject::kObjectIsShallowArray;
+ Handle<JSObject> copy = JSObject::DeepCopy(boilerplate, &usage_context,
+ hints);
+ usage_context.ExitScope(site, boilerplate);
+ RETURN_IF_EMPTY_HANDLE(isolate, copy);
+ return *copy;
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
HandleScope scope(isolate);
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
+ CONVERT_SMI_ARG_CHECKED(flags, 3);
- Handle<AllocationSite> site = GetLiteralAllocationSite(isolate, literals,
- literals_index, elements);
- RETURN_IF_EMPTY_HANDLE(isolate, site);
+ return CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
+ flags);
+}
- JSObject* boilerplate = JSObject::cast(site->transition_info());
- if (boilerplate->elements()->map() ==
- isolate->heap()->fixed_cow_array_map()) {
- isolate->counters()->cow_arrays_created_runtime()->Increment();
- }
- AllocationSiteMode mode = AllocationSite::GetMode(
- boilerplate->GetElementsKind());
- if (mode == TRACK_ALLOCATION_SITE) {
- return isolate->heap()->CopyJSObjectWithAllocationSite(
- boilerplate, *site);
- }
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralStubBailout) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
- return isolate->heap()->CopyJSObject(boilerplate);
+ return CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
+ ArrayLiteral::kShallowElements);
}
@@ -610,6 +616,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateSymbol) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreatePrivateSymbol) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ Handle<Object> name(args[0], isolate);
+ RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
+ Symbol* symbol;
+ MaybeObject* maybe = isolate->heap()->AllocatePrivateSymbol();
+ if (!maybe->To(&symbol)) return maybe;
+ if (name->IsString()) symbol->set_name(*name);
+ return symbol;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -618,6 +637,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolIsPrivate) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Symbol, symbol, 0);
+ return isolate->heap()->ToBoolean(symbol->is_private());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -695,13 +722,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
void Runtime::FreeArrayBuffer(Isolate* isolate,
JSArrayBuffer* phantom_array_buffer) {
+ if (phantom_array_buffer->should_be_freed()) {
+ ASSERT(phantom_array_buffer->is_external());
+ free(phantom_array_buffer->backing_store());
+ }
if (phantom_array_buffer->is_external()) return;
size_t allocated_length = NumberToSize(
isolate, phantom_array_buffer->byte_length());
isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
- -static_cast<intptr_t>(allocated_length));
+ -static_cast<int64_t>(allocated_length));
CHECK(V8::ArrayBufferAllocator() != NULL);
V8::ArrayBufferAllocator()->Free(
phantom_array_buffer->backing_store(),
@@ -822,20 +853,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferSliceImpl) {
}
-enum TypedArrayId {
- // arrayIds below should be synchromized with typedarray.js natives.
- ARRAY_ID_UINT8 = 1,
- ARRAY_ID_INT8 = 2,
- ARRAY_ID_UINT16 = 3,
- ARRAY_ID_INT16 = 4,
- ARRAY_ID_UINT32 = 5,
- ARRAY_ID_INT32 = 6,
- ARRAY_ID_FLOAT32 = 7,
- ARRAY_ID_FLOAT64 = 8,
- ARRAY_ID_UINT8C = 9
-};
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferIsView) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, object, 0);
+ return object->IsJSArrayBufferView()
+ ? isolate->heap()->true_value()
+ : isolate->heap()->false_value();
+}
-static void ArrayIdToTypeAndSize(
+
+void Runtime::ArrayIdToTypeAndSize(
int arrayId, ExternalArrayType* array_type, size_t* element_size) {
switch (arrayId) {
case ARRAY_ID_UINT8:
@@ -897,7 +925,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
ExternalArrayType array_type = kExternalByteArray; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
+ Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
holder->set_buffer(*buffer);
holder->set_byte_offset(*byte_offset_object);
@@ -949,7 +977,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
ExternalArrayType array_type = kExternalByteArray; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
+ Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &element_size);
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
size_t length = NumberToSize(isolate, *length_obj);
@@ -962,17 +990,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
}
size_t byte_length = length * element_size;
+ // NOTE: not initializing backing store.
// We assume that the caller of this function will initialize holder
// with the loop
// for(i = 0; i < length; i++) { holder[i] = source[i]; }
+ // We assume that the caller of this function is always a typed array
+ // constructor.
// If source is a typed array, this loop will always run to completion,
// so we are sure that the backing store will be initialized.
- // Otherwise, we do not know (the indexing operation might throw).
- // Hence we require zero initialization unless our source is a typed array.
- bool should_zero_initialize = !source->IsJSTypedArray();
+ // Otherwise, the indexing operation might throw, so the loop will not
+ // run to completion and the typed array might remain partly initialized.
+ // However we further assume that the caller of this function is a typed array
+ // constructor, and the exception will propagate out of the constructor,
+ // therefore uninitialized memory will not be accessible by a user program.
+ //
+ // TODO(dslomov): revise this once we support subclassing.
if (!Runtime::SetupArrayBufferAllocatingData(
- isolate, buffer, byte_length, should_zero_initialize)) {
+ isolate, buffer, byte_length, false)) {
return isolate->Throw(*isolate->factory()->
NewRangeError("invalid_array_buffer_length",
HandleVector<Object>(NULL, 0)));
@@ -1196,7 +1231,10 @@ inline static bool DataViewGetValue(
Handle<Object> byte_offset_obj,
bool is_little_endian,
T* result) {
- size_t byte_offset = NumberToSize(isolate, *byte_offset_obj);
+ size_t byte_offset = 0;
+ if (!TryNumberToSize(isolate, *byte_offset_obj, &byte_offset)) {
+ return false;
+ }
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
size_t data_view_byte_offset =
@@ -1237,7 +1275,10 @@ static bool DataViewSetValue(
Handle<Object> byte_offset_obj,
bool is_little_endian,
T data) {
- size_t byte_offset = NumberToSize(isolate, *byte_offset_obj);
+ size_t byte_offset = 0;
+ if (!TryNumberToSize(isolate, *byte_offset_obj, &byte_offset)) {
+ return false;
+ }
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
size_t data_view_byte_offset =
@@ -1400,7 +1441,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) {
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
Handle<Object> key(args[1], isolate);
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
- table = ObjectHashSetAdd(table, key);
+ table = ObjectHashSet::Add(table, key);
holder->set_table(*table);
return isolate->heap()->undefined_value();
}
@@ -1422,7 +1463,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
Handle<Object> key(args[1], isolate);
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
- table = ObjectHashSetRemove(table, key);
+ table = ObjectHashSet::Remove(table, key);
holder->set_table(*table);
return isolate->heap()->undefined_value();
}
@@ -1477,7 +1518,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapDelete) {
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
Handle<Object> lookup(table->Lookup(*key), isolate);
Handle<ObjectHashTable> new_table =
- PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
+ ObjectHashTable::Put(table, key, isolate->factory()->the_hole_value());
holder->set_table(*new_table);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
@@ -1490,7 +1531,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) {
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
+ Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value);
holder->set_table(*new_table);
return isolate->heap()->undefined_value();
}
@@ -1556,7 +1597,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionDelete) {
weak_collection->table()));
Handle<Object> lookup(table->Lookup(*key), isolate);
Handle<ObjectHashTable> new_table =
- PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
+ ObjectHashTable::Put(table, key, isolate->factory()->the_hole_value());
weak_collection->set_table(*new_table);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
@@ -1570,7 +1611,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakCollectionSet) {
Handle<Object> value(args[2], isolate);
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
- Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
+ Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value);
weak_collection->set_table(*new_table);
return isolate->heap()->undefined_value();
}
@@ -1586,24 +1627,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
// We don't expect access checks to be needed on JSProxy objects.
ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
do {
if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(JSObject::cast(obj),
- isolate->heap()->proto_string(),
- v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(JSObject::cast(obj), v8::ACCESS_GET);
+ !isolate->MayNamedAccessWrapper(Handle<JSObject>::cast(obj),
+ isolate->factory()->proto_string(),
+ v8::ACCESS_GET)) {
+ isolate->ReportFailedAccessCheck(JSObject::cast(*obj), v8::ACCESS_GET);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
}
- obj = obj->GetPrototype(isolate);
+ obj = handle(obj->GetPrototype(isolate), isolate);
} while (obj->IsJSObject() &&
- JSObject::cast(obj)->map()->is_hidden_prototype());
- return obj;
+ JSObject::cast(*obj)->map()->is_hidden_prototype());
+ return *obj;
}
@@ -1633,7 +1674,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
Handle<Object> new_value(
GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
if (!new_value->SameValue(*old_value)) {
- JSObject::EnqueueChangeRecord(obj, "prototype",
+ JSObject::EnqueueChangeRecord(obj, "setPrototype",
isolate->factory()->proto_string(),
old_value);
}
@@ -1662,6 +1703,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
static bool CheckAccessException(Object* callback,
v8::AccessType access_type) {
+ DisallowHeapAllocation no_gc;
if (callback->IsAccessorInfo()) {
AccessorInfo* info = AccessorInfo::cast(callback);
return
@@ -1684,20 +1726,20 @@ static bool CheckAccessException(Object* callback,
template<class Key>
static bool CheckGenericAccess(
- JSObject* receiver,
- JSObject* holder,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
Key key,
v8::AccessType access_type,
- bool (Isolate::*mayAccess)(JSObject*, Key, v8::AccessType)) {
+ bool (Isolate::*mayAccess)(Handle<JSObject>, Key, v8::AccessType)) {
Isolate* isolate = receiver->GetIsolate();
- for (JSObject* current = receiver;
+ for (Handle<JSObject> current = receiver;
true;
- current = JSObject::cast(current->GetPrototype())) {
+ current = handle(JSObject::cast(current->GetPrototype()), isolate)) {
if (current->IsAccessCheckNeeded() &&
!(isolate->*mayAccess)(current, key, access_type)) {
return false;
}
- if (current == holder) break;
+ if (current.is_identical_to(holder)) break;
}
return true;
}
@@ -1710,28 +1752,29 @@ enum AccessCheckResult {
};
-static AccessCheckResult CheckPropertyAccess(
- JSObject* obj,
- Name* name,
- v8::AccessType access_type) {
+static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj,
+ Handle<Name> name,
+ v8::AccessType access_type) {
uint32_t index;
if (name->AsArrayIndex(&index)) {
// TODO(1095): we should traverse hidden prototype hierachy as well.
if (CheckGenericAccess(
- obj, obj, index, access_type, &Isolate::MayIndexedAccess)) {
+ obj, obj, index, access_type, &Isolate::MayIndexedAccessWrapper)) {
return ACCESS_ALLOWED;
}
- obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
+ obj->GetIsolate()->ReportFailedAccessCheck(*obj, access_type);
return ACCESS_FORBIDDEN;
}
- LookupResult lookup(obj->GetIsolate());
- obj->LocalLookup(name, &lookup, true);
+ Isolate* isolate = obj->GetIsolate();
+ LookupResult lookup(isolate);
+ obj->LocalLookup(*name, &lookup, true);
if (!lookup.IsProperty()) return ACCESS_ABSENT;
- if (CheckGenericAccess<Object*>(
- obj, lookup.holder(), name, access_type, &Isolate::MayNamedAccess)) {
+ Handle<JSObject> holder(lookup.holder(), isolate);
+ if (CheckGenericAccess<Handle<Object> >(
+ obj, holder, name, access_type, &Isolate::MayNamedAccessWrapper)) {
return ACCESS_ALLOWED;
}
@@ -1748,7 +1791,7 @@ static AccessCheckResult CheckPropertyAccess(
case INTERCEPTOR:
// If the object has an interceptor, try real named properties.
// Overwrite the result to fetch the correct property later.
- lookup.holder()->LookupRealNamedProperty(name, &lookup);
+ holder->LookupRealNamedProperty(*name, &lookup);
if (lookup.IsProperty() && lookup.IsPropertyCallbacks()) {
if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
return ACCESS_ALLOWED;
@@ -1759,7 +1802,7 @@ static AccessCheckResult CheckPropertyAccess(
break;
}
- obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
+ isolate->ReportFailedAccessCheck(*obj, access_type);
return ACCESS_FORBIDDEN;
}
@@ -1777,30 +1820,30 @@ enum PropertyDescriptorIndices {
};
-static MaybeObject* GetOwnProperty(Isolate* isolate,
- Handle<JSObject> obj,
- Handle<Name> name) {
+static Handle<Object> GetOwnProperty(Isolate* isolate,
+ Handle<JSObject> obj,
+ Handle<Name> name) {
Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
// Due to some WebKit tests, we want to make sure that we do not log
// more than one access failure here.
AccessCheckResult access_check_result =
- CheckPropertyAccess(*obj, *name, v8::ACCESS_HAS);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ CheckPropertyAccess(obj, name, v8::ACCESS_HAS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
switch (access_check_result) {
- case ACCESS_FORBIDDEN: return heap->false_value();
+ case ACCESS_FORBIDDEN: return factory->false_value();
case ACCESS_ALLOWED: break;
- case ACCESS_ABSENT: return heap->undefined_value();
+ case ACCESS_ABSENT: return factory->undefined_value();
}
PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name);
if (attrs == ABSENT) {
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return heap->undefined_value();
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return factory->undefined_value();
}
ASSERT(!isolate->has_scheduled_exception());
AccessorPair* raw_accessors = obj->GetLocalPropertyAccessorPair(*name);
Handle<AccessorPair> accessors(raw_accessors, isolate);
-
Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0));
elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0));
@@ -1810,28 +1853,30 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
// GetProperty does access check.
Handle<Object> value = GetProperty(isolate, obj, name);
- RETURN_IF_EMPTY_HANDLE(isolate, value);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<Object>::null());
elms->set(VALUE_INDEX, *value);
} else {
// Access checks are performed for both accessors separately.
// When they fail, the respective field is not set in the descriptor.
- Object* getter = accessors->GetComponent(ACCESSOR_GETTER);
- Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
- if (!getter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_GET)) {
+ Handle<Object> getter(accessors->GetComponent(ACCESSOR_GETTER), isolate);
+ Handle<Object> setter(accessors->GetComponent(ACCESSOR_SETTER), isolate);
+
+ if (!getter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_GET)) {
ASSERT(!isolate->has_scheduled_exception());
- elms->set(GETTER_INDEX, getter);
+ elms->set(GETTER_INDEX, *getter);
} else {
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
- if (!setter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_SET)) {
+
+ if (!setter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_SET)) {
ASSERT(!isolate->has_scheduled_exception());
- elms->set(SETTER_INDEX, setter);
+ elms->set(SETTER_INDEX, *setter);
} else {
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
}
- return *isolate->factory()->NewJSArrayWithElements(elms);
+ return isolate->factory()->NewJSArrayWithElements(elms);
}
@@ -1847,15 +1892,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- return GetOwnProperty(isolate, obj, name);
+ Handle<Object> result = GetOwnProperty(isolate, obj, name);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- return obj->PreventExtensions();
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ Handle<Object> result = JSObject::PreventExtensions(obj);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -1879,8 +1928,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
- Handle<Object> result =
- RegExpImpl::Compile(re, pattern, flags);
+ Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
@@ -2172,7 +2220,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
// Declare the property by setting it to the initial value if provided,
// or undefined, and use the correct mode (e.g. READ_ONLY attribute for
// constant declarations).
- ASSERT(!object->HasLocalProperty(*name));
+ ASSERT(!JSReceiver::HasLocalProperty(object, name));
Handle<Object> value(isolate->heap()->undefined_value(), isolate);
if (*initial_value != NULL) value = initial_value;
// Declaring a const context slot is a conflicting declaration if
@@ -2204,7 +2252,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
// args[0] == name
// args[1] == language_mode
// args[2] == value (optional)
@@ -2215,7 +2263,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
bool assign = args.length() == 3;
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- GlobalObject* global = isolate->context()->global_object();
RUNTIME_ASSERT(args[1]->IsSmi());
CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
@@ -2232,28 +2279,33 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
// to assign to the property.
// Note that objects can have hidden prototypes, so we need to traverse
// the whole chain of hidden prototypes to do a 'local' lookup.
- Object* object = global;
LookupResult lookup(isolate);
- JSObject::cast(object)->LocalLookup(*name, &lookup, true);
+ isolate->context()->global_object()->LocalLookup(*name, &lookup, true);
if (lookup.IsInterceptor()) {
- HandleScope handle_scope(isolate);
PropertyAttributes intercepted =
lookup.holder()->GetPropertyAttribute(*name);
if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
// Found an interceptor that's not read only.
if (assign) {
- return lookup.holder()->SetProperty(
- &lookup, *name, args[2], attributes, strict_mode_flag);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<Object> result = JSObject::SetPropertyForResult(
+ handle(lookup.holder()), &lookup, name, value, attributes,
+ strict_mode_flag);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
} else {
return isolate->heap()->undefined_value();
}
}
}
- // Reload global in case the loop above performed a GC.
- global = isolate->context()->global_object();
if (assign) {
- return global->SetProperty(*name, args[2], attributes, strict_mode_flag);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<GlobalObject> global(isolate->context()->global_object());
+ Handle<Object> result = JSReceiver::SetProperty(
+ global, name, value, attributes, strict_mode_flag);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
return isolate->heap()->undefined_value();
}
@@ -2611,6 +2663,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsCallable) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsCallable());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -2909,19 +2969,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
source_shared->set_dont_flush(true);
// Set the code, scope info, formal parameter count, and the length
- // of the target shared function info. Set the source code of the
- // target function to undefined. SetCode is only used for built-in
- // constructors like String, Array, and Object, and some web code
- // doesn't like seeing source code for constructors.
+ // of the target shared function info.
target_shared->ReplaceCode(source_shared->code());
target_shared->set_scope_info(source_shared->scope_info());
target_shared->set_length(source_shared->length());
target_shared->set_formal_parameter_count(
source_shared->formal_parameter_count());
- target_shared->set_script(isolate->heap()->undefined_value());
-
- // Since we don't store the source we should never optimize this.
- target_shared->code()->set_optimizable(false);
+ target_shared->set_script(source_shared->script());
+ target_shared->set_start_position_and_type(
+ source_shared->start_position_and_type());
+ target_shared->set_end_position(source_shared->end_position());
+ bool was_native = target_shared->native();
+ target_shared->set_compiler_hints(source_shared->compiler_hints());
+ target_shared->set_native(was_native);
// Set the code of the target function.
target->ReplaceCode(source_shared->code());
@@ -2953,39 +3013,51 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
CONVERT_SMI_ARG_CHECKED(num, 1);
RUNTIME_ASSERT(num >= 0);
- SetExpectedNofProperties(function, num);
+ // If objects constructed from this function exist then changing
+ // 'estimated_nof_properties' is dangerous since the previous value might
+ // have been compiled into the fast construct stub. Moreover, the inobject
+ // slack tracking logic might have adjusted the previous value, so even
+ // passing the same value is risky.
+ if (!func->shared()->live_objects_may_exist()) {
+ func->shared()->set_expected_nof_properties(num);
+ if (func->has_initial_map()) {
+ Handle<Map> new_initial_map =
+ func->GetIsolate()->factory()->CopyMap(
+ Handle<Map>(func->initial_map()));
+ new_initial_map->set_unused_property_fields(num);
+ func->set_initial_map(*new_initial_map);
+ }
+ }
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
- JSFunction* function = frame->function();
+ Handle<JSFunction> function(frame->function());
RUNTIME_ASSERT(function->shared()->is_generator());
- JSGeneratorObject* generator;
+ Handle<JSGeneratorObject> generator;
if (frame->IsConstructor()) {
- generator = JSGeneratorObject::cast(frame->receiver());
+ generator = handle(JSGeneratorObject::cast(frame->receiver()));
} else {
- MaybeObject* maybe_generator =
- isolate->heap()->AllocateJSGeneratorObject(function);
- if (!maybe_generator->To(&generator)) return maybe_generator;
+ generator = isolate->factory()->NewJSGeneratorObject(function);
}
- generator->set_function(function);
+ generator->set_function(*function);
generator->set_context(Context::cast(frame->context()));
generator->set_receiver(frame->receiver());
generator->set_continuation(0);
generator->set_operand_stack(isolate->heap()->empty_fixed_array());
generator->set_stack_handler_index(-1);
- return generator;
+ return *generator;
}
@@ -3098,10 +3170,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowGeneratorStateError) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectFreeze) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- return object->Freeze(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ Handle<Object> result = JSObject::Freeze(object);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -4397,10 +4471,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
RUNTIME_ASSERT(start >= 0);
RUNTIME_ASSERT(end <= value->length());
isolate->counters()->sub_string_runtime()->Increment();
- if (end - start == 1) {
- return isolate->heap()->LookupSingleCharacterStringFromCode(
- value->Get(start));
- }
return value->SubString(start, end);
}
@@ -4778,6 +4848,19 @@ MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
}
+static Handle<Name> ToName(Isolate* isolate, Handle<Object> key) {
+ if (key->IsName()) {
+ return Handle<Name>::cast(key);
+ } else {
+ bool has_pending_exception = false;
+ Handle<Object> converted =
+ Execution::ToString(isolate, key, &has_pending_exception);
+ if (has_pending_exception) return Handle<Name>();
+ return Handle<Name>::cast(converted);
+ }
+}
+
+
MaybeObject* Runtime::HasObjectProperty(Isolate* isolate,
Handle<JSReceiver> object,
Handle<Object> key) {
@@ -4786,22 +4869,14 @@ MaybeObject* Runtime::HasObjectProperty(Isolate* isolate,
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
- return isolate->heap()->ToBoolean(object->HasElement(index));
+ return isolate->heap()->ToBoolean(JSReceiver::HasElement(object, index));
}
// Convert the key to a name - possibly by calling back into JavaScript.
- Handle<Name> name;
- if (key->IsName()) {
- name = Handle<Name>::cast(key);
- } else {
- bool has_pending_exception = false;
- Handle<Object> converted =
- Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- name = Handle<Name>::cast(converted);
- }
+ Handle<Name> name = ToName(isolate, key);
+ RETURN_IF_EMPTY_HANDLE(isolate, name);
- return isolate->heap()->ToBoolean(object->HasProperty(*name));
+ return isolate->heap()->ToBoolean(JSReceiver::HasProperty(object, name));
}
MaybeObject* Runtime::GetObjectPropertyOrFail(
@@ -4832,16 +4907,8 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
}
// Convert the key to a name - possibly by calling back into JavaScript.
- Handle<Name> name;
- if (key->IsName()) {
- name = Handle<Name>::cast(key);
- } else {
- bool has_pending_exception = false;
- Handle<Object> converted =
- Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- name = Handle<Name>::cast(converted);
- }
+ Handle<Name> name = ToName(isolate, key);
+ RETURN_IF_EMPTY_HANDLE(isolate, name);
// Check if the name is trivially convertible to an index and get
// the element if so.
@@ -5019,12 +5086,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
- LookupResult result(isolate);
- js_object->LocalLookupRealNamedProperty(*name, &result);
+ LookupResult lookup(isolate);
+ js_object->LocalLookupRealNamedProperty(*name, &lookup);
// Special case for callback properties.
- if (result.IsPropertyCallbacks()) {
- Object* callback = result.GetCallbackObject();
+ if (lookup.IsPropertyCallbacks()) {
+ Handle<Object> callback(lookup.GetCallbackObject(), isolate);
// To be compatible with Safari we do not change the value on API objects
// in Object.defineProperty(). Firefox disagrees here, and actually changes
// the value.
@@ -5035,12 +5102,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
// setter to update the value instead.
// TODO(mstarzinger): So far this only works if property attributes don't
// change, this should be fixed once we cleanup the underlying code.
- if (callback->IsForeign() && result.GetAttributes() == attr) {
- return js_object->SetPropertyWithCallback(callback,
- *name,
- *obj_value,
- result.holder(),
- kStrictMode);
+ if (callback->IsForeign() && lookup.GetAttributes() == attr) {
+ Handle<Object> result_object =
+ JSObject::SetPropertyWithCallback(js_object,
+ callback,
+ name,
+ obj_value,
+ handle(lookup.holder()),
+ kStrictMode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result_object);
+ return *result_object;
}
}
@@ -5050,8 +5121,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
// map. The current version of SetObjectProperty does not handle attributes
// correctly in the case where a property is a field and is reset with
// new attributes.
- if (result.IsFound() &&
- (attr != result.GetAttributes() || result.IsPropertyCallbacks())) {
+ if (lookup.IsFound() &&
+ (attr != lookup.GetAttributes() || lookup.IsPropertyCallbacks())) {
// New attributes - normalize to avoid writing to instance descriptor
if (js_object->IsJSGlobalProxy()) {
// Since the result is a property, the prototype will exist so
@@ -5067,11 +5138,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
return *result;
}
- return Runtime::ForceSetObjectProperty(isolate,
- js_object,
- name,
- obj_value,
- attr);
+ Handle<Object> result = Runtime::ForceSetObjectProperty(isolate, js_object,
+ name,
+ obj_value,
+ attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -5105,46 +5177,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) {
}
-MaybeObject* Runtime::SetObjectPropertyOrFail(
- Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION_PASS_EXCEPTION(isolate,
- SetObjectProperty(isolate, object, key, value, attr, strict_mode));
-}
-
-
-MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode) {
+Handle<Object> Runtime::SetObjectProperty(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attr,
+ StrictModeFlag strict_mode) {
SetPropertyMode set_mode = attr == NONE ? SET_PROPERTY : DEFINE_PROPERTY;
- HandleScope scope(isolate);
if (object->IsUndefined() || object->IsNull()) {
Handle<Object> args[2] = { key, object };
Handle<Object> error =
isolate->factory()->NewTypeError("non_object_property_store",
HandleVector(args, 2));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
if (object->IsJSProxy()) {
bool has_pending_exception = false;
- Handle<Object> name = key->IsSymbol()
+ Handle<Object> name_object = key->IsSymbol()
? key : Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
- return JSProxy::cast(*object)->SetProperty(
- Name::cast(*name), *value, attr, strict_mode);
+ if (has_pending_exception) return Handle<Object>(); // exception
+ Handle<Name> name = Handle<Name>::cast(name_object);
+ return JSReceiver::SetProperty(Handle<JSProxy>::cast(object), name, value,
+ attr,
+ strict_mode);
}
// If the object isn't a JavaScript object, we ignore the store.
- if (!object->IsJSObject()) return *value;
+ if (!object->IsJSObject()) return value;
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
@@ -5159,7 +5221,7 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
// string does nothing with the assignment then we can ignore such
// assignments.
if (js_object->IsStringObjectWithCharacterAt(index)) {
- return *value;
+ return value;
}
js_object->ValidateElements();
@@ -5168,19 +5230,19 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
bool has_exception;
Handle<Object> number =
Execution::ToNumber(isolate, value, &has_exception);
- if (has_exception) return Failure::Exception();
+ if (has_exception) return Handle<Object>(); // exception
value = number;
}
}
- MaybeObject* result = js_object->SetElement(
- index, *value, attr, strict_mode, true, set_mode);
+ Handle<Object> result = JSObject::SetElement(js_object, index, value, attr,
+ strict_mode,
+ true,
+ set_mode);
js_object->ValidateElements();
- if (result->IsFailure()) return result;
- return *value;
+ return result.is_null() ? result : value;
}
if (key->IsName()) {
- MaybeObject* result;
Handle<Name> name = Handle<Name>::cast(key);
if (name->AsArrayIndex(&index)) {
if (js_object->HasExternalArrayElements()) {
@@ -5188,43 +5250,41 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
bool has_exception;
Handle<Object> number =
Execution::ToNumber(isolate, value, &has_exception);
- if (has_exception) return Failure::Exception();
+ if (has_exception) return Handle<Object>(); // exception
value = number;
}
}
- result = js_object->SetElement(
- index, *value, attr, strict_mode, true, set_mode);
+ return JSObject::SetElement(js_object, index, value, attr, strict_mode,
+ true,
+ set_mode);
} else {
if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- result = js_object->SetProperty(*name, *value, attr, strict_mode);
+ return JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
}
- if (result->IsFailure()) return result;
- return *value;
}
// Call-back into JavaScript to convert the key to a string.
bool has_pending_exception = false;
Handle<Object> converted =
Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
+ if (has_pending_exception) return Handle<Object>(); // exception
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(
- index, *value, attr, strict_mode, true, set_mode);
+ return JSObject::SetElement(js_object, index, value, attr, strict_mode,
+ true,
+ set_mode);
} else {
- return js_object->SetProperty(*name, *value, attr, strict_mode);
+ return JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
}
}
-MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
- Handle<JSObject> js_object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr) {
- HandleScope scope(isolate);
-
+Handle<Object> Runtime::ForceSetObjectProperty(Isolate* isolate,
+ Handle<JSObject> js_object,
+ Handle<Object> key,
+ Handle<Object> value,
+ PropertyAttributes attr) {
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
@@ -5236,24 +5296,24 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
// string does nothing with the assignment then we can ignore such
// assignments.
if (js_object->IsStringObjectWithCharacterAt(index)) {
- return *value;
+ return value;
}
- return js_object->SetElement(
- index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
+ return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
+ false,
+ DEFINE_PROPERTY);
}
if (key->IsName()) {
Handle<Name> name = Handle<Name>::cast(key);
if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(
- index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
+ return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
+ false,
+ DEFINE_PROPERTY);
} else {
if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
- js_object, name, value, attr);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
+ return JSObject::SetLocalPropertyIgnoreAttributes(js_object, name,
+ value, attr);
}
}
@@ -5261,17 +5321,16 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
bool has_pending_exception = false;
Handle<Object> converted =
Execution::ToString(isolate, key, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
+ if (has_pending_exception) return Handle<Object>(); // exception
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(
- index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
+ return JSObject::SetElement(js_object, index, value, attr, kNonStrictMode,
+ false,
+ DEFINE_PROPERTY);
} else {
- Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
- js_object, name, value, attr);
- RETURN_IF_EMPTY_HANDLE(isolate, result);
- return *result;
+ return JSObject::SetLocalPropertyIgnoreAttributes(js_object, name, value,
+ attr);
}
}
@@ -5320,12 +5379,12 @@ MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
RUNTIME_ASSERT(
(unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
@@ -5339,12 +5398,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
strict_mode = strict_mode_flag;
}
- return Runtime::SetObjectProperty(isolate,
- object,
- key,
- value,
- attributes,
- strict_mode);
+ Handle<Object> result = Runtime::SetObjectProperty(isolate, object, key,
+ value,
+ attributes,
+ strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -5365,11 +5424,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, object, 0);
+
+ if (object->IsJSFunction()) {
+ JSFunction* func = JSFunction::cast(object);
+ func->shared()->set_native(true);
+ }
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInlineBuiltinFlag) {
+ SealHandleScope shs(isolate);
+ RUNTIME_ASSERT(args.length() == 1);
+
Handle<Object> object = args.at<Object>(0);
if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(*object);
- func->shared()->set_native(true);
+ func->shared()->set_inline_builtin(true);
}
return isolate->heap()->undefined_value();
}
@@ -5512,7 +5585,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate,
Handle<JSObject> object,
Handle<Name> key) {
- if (object->HasLocalProperty(*key)) return isolate->heap()->true_value();
+ if (JSReceiver::HasLocalProperty(object, key)) {
+ return isolate->heap()->true_value();
+ }
// Handle hidden prototypes. If there's a hidden prototype above this thing
// then we have to check it for properties, because they are supposed to
// look like they are on this object.
@@ -5529,40 +5604,39 @@ static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
+ Handle<Object> object = args.at<Object>(0);
uint32_t index;
const bool key_is_array_index = key->AsArrayIndex(&index);
- Object* obj = args[0];
// Only JS objects can have properties.
- if (obj->IsJSObject()) {
- JSObject* object = JSObject::cast(obj);
+ if (object->IsJSObject()) {
+ Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
// Fast case: either the key is a real named property or it is not
// an array index and there are no interceptors or hidden
// prototypes.
- if (object->HasRealNamedProperty(isolate, key)) {
+ if (JSObject::HasRealNamedProperty(js_obj, key)) {
ASSERT(!isolate->has_scheduled_exception());
return isolate->heap()->true_value();
} else {
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
}
- Map* map = object->map();
+ Map* map = js_obj->map();
if (!key_is_array_index &&
!map->has_named_interceptor() &&
!HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) {
return isolate->heap()->false_value();
}
// Slow case.
- HandleScope scope(isolate);
return HasLocalPropertyImplementation(isolate,
- Handle<JSObject>(object),
+ Handle<JSObject>(js_obj),
Handle<Name>(key));
- } else if (obj->IsString() && key_is_array_index) {
+ } else if (object->IsString() && key_is_array_index) {
// Well, there is one exception: Handle [] on strings.
- String* string = String::cast(obj);
+ Handle<String> string = Handle<String>::cast(object);
if (index < static_cast<uint32_t>(string->length())) {
return isolate->heap()->true_value();
}
@@ -5572,12 +5646,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- bool result = receiver->HasProperty(key);
+ bool result = JSReceiver::HasProperty(receiver, key);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (isolate->has_pending_exception()) return Failure::Exception();
return isolate->heap()->ToBoolean(result);
@@ -5585,12 +5659,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
- bool result = receiver->HasElement(index);
+ bool result = JSReceiver::HasElement(receiver, index);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (isolate->has_pending_exception()) return Failure::Exception();
return isolate->heap()->ToBoolean(result);
@@ -5931,12 +6005,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- Object* object = args[0];
- return (object->IsJSObject() && !object->IsGlobalObject())
- ? JSObject::cast(object)->TransformToFastProperties(0)
- : object;
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ if (object->IsJSObject() && !object->IsGlobalObject()) {
+ JSObject::TransformToFastProperties(Handle<JSObject>::cast(object), 0);
+ }
+ return *object;
}
@@ -6304,6 +6379,29 @@ static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
}
+#ifdef DEBUG
+static bool CheckFastAsciiConvert(char* dst,
+ char* src,
+ int length,
+ bool changed,
+ bool is_to_lower) {
+ bool expected_changed = false;
+ for (int i = 0; i < length; i++) {
+ if (dst[i] == src[i]) continue;
+ expected_changed = true;
+ if (is_to_lower) {
+ ASSERT('A' <= src[i] && src[i] <= 'Z');
+ ASSERT(dst[i] == src[i] + ('a' - 'A'));
+ } else {
+ ASSERT('a' <= src[i] && src[i] <= 'z');
+ ASSERT(dst[i] == src[i] - ('a' - 'A'));
+ }
+ }
+ return (expected_changed == changed);
+}
+#endif
+
+
template<class Converter>
static bool FastAsciiConvert(char* dst,
char* src,
@@ -6375,28 +6473,6 @@ static bool FastAsciiConvert(char* dst,
return true;
}
-#ifdef DEBUG
-static bool CheckFastAsciiConvert(char* dst,
- char* src,
- int length,
- bool changed,
- bool is_to_lower) {
- bool expected_changed = false;
- for (int i = 0; i < length; i++) {
- if (dst[i] == src[i]) continue;
- expected_changed = true;
- if (is_to_lower) {
- ASSERT('A' <= src[i] && src[i] <= 'Z');
- ASSERT(dst[i] == src[i] + ('a' - 'A'));
- } else {
- ASSERT('a' <= src[i] && src[i] <= 'z');
- ASSERT(dst[i] == src[i] - ('a' - 'A'));
- }
- }
- return (expected_changed == changed);
-}
-#endif
-
} // namespace
@@ -7629,16 +7705,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- isolate->counters()->math_ceil()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->heap()->NumberFromDouble(ceiling(x));
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -7954,21 +8020,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
// Allocate the elements if needed.
if (length > 0) {
// Allocate the fixed array.
- Object* obj;
- { MaybeObject* maybe_obj = isolate->heap()->AllocateRawFixedArray(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ FixedArray* array;
+ { MaybeObject* maybe_obj =
+ isolate->heap()->AllocateUninitializedFixedArray(length);
+ if (!maybe_obj->To(&array)) return maybe_obj;
}
DisallowHeapAllocation no_gc;
- FixedArray* array = reinterpret_cast<FixedArray*>(obj);
- array->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
- array->set_length(length);
-
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
array->set(i, *--parameters, mode);
}
- JSObject::cast(result)->set_elements(FixedArray::cast(obj));
+ JSObject::cast(result)->set_elements(array);
}
return result;
}
@@ -8297,7 +8360,7 @@ bool AllowOptimization(Isolate* isolate, Handle<JSFunction> function) {
// If the function is not optimizable or debugger is active continue using the
// code from the full compiler.
- if (!FLAG_crankshaft ||
+ if (!isolate->use_crankshaft() ||
function->shared()->optimization_disabled() ||
isolate->DebuggerHasBreakPoints()) {
if (FLAG_trace_opt) {
@@ -8345,7 +8408,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ConcurrentRecompile) {
return isolate->heap()->undefined_value();
}
function->shared()->code()->set_profiler_ticks(0);
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(isolate->concurrent_recompilation_enabled());
if (!Compiler::RecompileConcurrent(function)) {
function->ReplaceCode(function->shared()->code());
}
@@ -8445,14 +8508,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) {
- SealHandleScope shs(isolate);
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- delete deoptimizer;
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8490,7 +8545,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConcurrentRecompilationSupported) {
HandleScope scope(isolate);
- return FLAG_concurrent_recompilation
+ return isolate->concurrent_recompilation_enabled()
? isolate->heap()->true_value() : isolate->heap()->false_value();
}
@@ -8510,8 +8565,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) {
// Start patching from the currently patched loop nesting level.
int current_level = unoptimized->allow_osr_at_loop_nesting_level();
- ASSERT(Deoptimizer::VerifyInterruptCode(
- isolate, unoptimized, current_level));
+ ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level));
for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) {
unoptimized->set_allow_osr_at_loop_nesting_level(i);
isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
@@ -8549,7 +8603,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
}
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_concurrent_recompilation && sync_with_compiler_thread) {
+ if (isolate->concurrent_recompilation_enabled() &&
+ sync_with_compiler_thread) {
while (function->IsInRecompileQueue()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
OS::Sleep(50);
@@ -8569,6 +8624,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_UnblockConcurrentRecompilation) {
+ RUNTIME_ASSERT(FLAG_block_concurrent_recompilation);
+ isolate->optimizing_compiler_thread()->Unblock();
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8581,7 +8643,7 @@ static bool IsSuitableForOnStackReplacement(Isolate* isolate,
Handle<JSFunction> function,
Handle<Code> unoptimized) {
// Keep track of whether we've succeeded in optimizing.
- if (!unoptimized->optimizable()) return false;
+ if (!isolate->use_crankshaft() || !unoptimized->optimizable()) return false;
// If we are trying to do OSR when there are already optimized
// activations of the function, it means (a) the function is directly or
// indirectly recursive and (b) an optimized invocation has been
@@ -8620,7 +8682,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
Handle<Code> result = Handle<Code>::null();
BailoutId ast_id = BailoutId::None();
- if (FLAG_concurrent_recompilation && FLAG_concurrent_osr) {
+ if (isolate->concurrent_osr_enabled()) {
if (isolate->optimizing_compiler_thread()->
IsQueuedForOSR(function, pc_offset)) {
// Still waiting for the optimizing compiler thread to finish. Carry on.
@@ -8632,25 +8694,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
return NULL;
}
- OptimizingCompiler* compiler = isolate->optimizing_compiler_thread()->
+ RecompileJob* job = isolate->optimizing_compiler_thread()->
FindReadyOSRCandidate(function, pc_offset);
- if (compiler == NULL) {
+ if (job == NULL) {
if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
Compiler::RecompileConcurrent(function, pc_offset)) {
if (function->IsMarkedForLazyRecompilation() ||
function->IsMarkedForConcurrentRecompilation()) {
// Prevent regular recompilation if we queue this for OSR.
// TODO(yangguo): remove this as soon as OSR becomes one-shot.
- function->ReplaceCode(function->shared()->code());
+ function->ReplaceCode(*unoptimized);
}
return NULL;
}
// Fall through to the end in case of failure.
} else {
// TODO(titzer): don't install the OSR code into the function.
- ast_id = compiler->info()->osr_ast_id();
- result = Compiler::InstallOptimizedCode(compiler);
+ ast_id = job->info()->osr_ast_id();
+ result = Compiler::InstallOptimizedCode(job);
}
} else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
@@ -8664,8 +8726,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION);
}
- // Revert the patched interrupt now, regardless of whether OSR succeeds.
- Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
+ // Revert the patched back edge table, regardless of whether OSR succeeds.
+ BackEdgeTable::Revert(isolate, *unoptimized);
// Check whether we ended up with usable optimized code.
if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
@@ -9202,7 +9264,7 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
// property from it.
if (!holder.is_null()) {
Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
- ASSERT(object->IsJSProxy() || object->HasProperty(*name));
+ ASSERT(object->IsJSProxy() || JSReceiver::HasProperty(object, name));
// GetProperty below can cause GC.
Handle<Object> receiver_handle(
object->IsGlobalObject()
@@ -9371,6 +9433,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowMessage) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ const char* message = GetBailoutReason(
+ static_cast<BailoutReason>(message_id));
+ Handle<Name> message_handle =
+ isolate->factory()->NewStringFromAscii(CStrVector(message));
+ return isolate->Throw(*message_handle);
+}
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
SealHandleScope shs(isolate);
@@ -9573,6 +9646,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAttachedGlobal) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ Object* global = args[0];
+ if (!global->IsJSGlobalObject()) return isolate->heap()->false_value();
+ return isolate->heap()->ToBoolean(
+ !JSGlobalObject::cast(global)->IsDetached());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
HandleScope scope(isolate);
ASSERT_EQ(1, args.length());
@@ -9708,54 +9791,45 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
}
+// Allocate a block of memory in the given space (filled with a filler).
+// Used as a fall-back for generated code when the space is full.
static MaybeObject* Allocate(Isolate* isolate,
int size,
+ bool double_align,
AllocationSpace space) {
- // Allocate a block of memory in the given space (filled with a filler).
- // Use as fallback for allocation in generated code when the space
- // is full.
- SealHandleScope shs(isolate);
+ Heap* heap = isolate->heap();
RUNTIME_ASSERT(IsAligned(size, kPointerSize));
RUNTIME_ASSERT(size > 0);
- Heap* heap = isolate->heap();
RUNTIME_ASSERT(size <= heap->MaxRegularSpaceAllocationSize());
- Object* allocation;
- { MaybeObject* maybe_allocation;
- if (space == NEW_SPACE) {
- maybe_allocation = heap->new_space()->AllocateRaw(size);
- } else {
- ASSERT(space == OLD_POINTER_SPACE || space == OLD_DATA_SPACE);
- maybe_allocation = heap->paged_space(space)->AllocateRaw(size);
- }
- if (maybe_allocation->ToObject(&allocation)) {
- heap->CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size);
- }
- return maybe_allocation;
+ HeapObject* allocation;
+ { MaybeObject* maybe_allocation = heap->AllocateRaw(size, space, space);
+ if (!maybe_allocation->To(&allocation)) return maybe_allocation;
}
+#ifdef DEBUG
+ MemoryChunk* chunk = MemoryChunk::FromAddress(allocation->address());
+ ASSERT(chunk->owner()->identity() == space);
+#endif
+ heap->CreateFillerObjectAt(allocation->address(), size);
+ return allocation;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
- return Allocate(isolate, size_smi->value(), NEW_SPACE);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldPointerSpace) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
- return Allocate(isolate, size_smi->value(), OLD_POINTER_SPACE);
+ CONVERT_SMI_ARG_CHECKED(size, 0);
+ return Allocate(isolate, size, false, NEW_SPACE);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldDataSpace) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInTargetSpace) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
- return Allocate(isolate, size_smi->value(), OLD_DATA_SPACE);
+ ASSERT(args.length() == 2);
+ CONVERT_SMI_ARG_CHECKED(size, 0);
+ CONVERT_SMI_ARG_CHECKED(flags, 1);
+ bool double_align = AllocateDoubleAlignFlag::decode(flags);
+ AllocationSpace space = AllocateTargetSpace::decode(flags);
+ return Allocate(isolate, size, double_align, space);
}
@@ -9763,22 +9837,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldDataSpace) {
// array. Returns true if the element was pushed on the stack and
// false otherwise.
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
- CONVERT_ARG_CHECKED(JSReceiver, element, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, element, 1);
RUNTIME_ASSERT(array->HasFastSmiOrObjectElements());
int length = Smi::cast(array->length())->value();
FixedArray* elements = FixedArray::cast(array->elements());
for (int i = 0; i < length; i++) {
- if (elements->get(i) == element) return isolate->heap()->false_value();
+ if (elements->get(i) == *element) return isolate->heap()->false_value();
}
- Object* obj;
+
// Strict not needed. Used for cycle detection in Array join implementation.
- { MaybeObject* maybe_obj =
- array->SetFastElement(length, element, kNonStrictMode, true);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ RETURN_IF_EMPTY_HANDLE(isolate, JSObject::SetFastElement(array, length,
+ element,
+ kNonStrictMode,
+ true));
return isolate->heap()->true_value();
}
@@ -10183,7 +10257,7 @@ static bool IterateElements(Isolate* isolate,
Handle<Object> element_value(elements->get(j), isolate);
if (!element_value->IsTheHole()) {
visitor->visit(j, element_value);
- } else if (receiver->HasElement(j)) {
+ } else if (JSReceiver::HasElement(receiver, j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
element_value = Object::GetElement(isolate, receiver, j);
@@ -10208,7 +10282,7 @@ static bool IterateElements(Isolate* isolate,
Handle<Object> element_value =
isolate->factory()->NewNumber(double_value);
visitor->visit(j, element_value);
- } else if (receiver->HasElement(j)) {
+ } else if (JSReceiver::HasElement(receiver, j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
Handle<Object> element_value =
@@ -10501,11 +10575,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
// property.
// Returns the number of non-undefined elements collected.
RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
- return object->PrepareElementsForSort(limit);
+ return *JSObject::PrepareElementsForSort(object, limit);
}
@@ -10596,14 +10670,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_SMI_ARG_CHECKED(flag, 2);
AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER;
if (!receiver->IsJSObject()) return isolate->heap()->undefined_value();
- return JSObject::cast(receiver)->LookupAccessor(name, component);
+ Handle<Object> result =
+ JSObject::GetAccessor(Handle<JSObject>::cast(receiver), name, component);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -10683,19 +10760,20 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
case CALLBACKS: {
Object* structure = result->GetCallbackObject();
if (structure->IsForeign() || structure->IsAccessorInfo()) {
- MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback(
- receiver, structure, name);
- if (!maybe_value->ToObject(&value)) {
- if (maybe_value->IsRetryAfterGC()) return maybe_value;
- ASSERT(maybe_value->IsException());
- maybe_value = heap->isolate()->pending_exception();
+ Isolate* isolate = heap->isolate();
+ HandleScope scope(isolate);
+ Handle<Object> value = JSObject::GetPropertyWithCallback(
+ handle(result->holder(), isolate),
+ handle(receiver, isolate),
+ handle(structure, isolate),
+ handle(name, isolate));
+ if (value.is_null()) {
+ MaybeObject* exception = heap->isolate()->pending_exception();
heap->isolate()->clear_pending_exception();
- if (caught_exception != NULL) {
- *caught_exception = true;
- }
- return maybe_value;
+ if (caught_exception != NULL) *caught_exception = true;
+ return exception;
}
- return value;
+ return *value;
} else {
return heap->undefined_value();
}
@@ -10883,7 +10961,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) {
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
PropertyAttributes attributes;
- return obj->GetPropertyWithInterceptor(*obj, *name, &attributes);
+ Handle<Object> result =
+ JSObject::GetPropertyWithInterceptor(obj, obj, name, &attributes);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -11323,12 +11404,12 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- target,
- Handle<String>(scope_info->ParameterName(i)),
- value,
- NONE,
- kNonStrictMode),
+ Runtime::SetObjectProperty(isolate,
+ target,
+ Handle<String>(scope_info->ParameterName(i)),
+ value,
+ NONE,
+ kNonStrictMode),
Handle<JSObject>());
}
@@ -11339,12 +11420,13 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- target,
- Handle<String>(scope_info->StackLocalName(i)),
- value,
- NONE,
- kNonStrictMode),
+ Runtime::SetObjectProperty(
+ isolate,
+ target,
+ Handle<String>(scope_info->StackLocalName(i)),
+ value,
+ NONE,
+ kNonStrictMode),
Handle<JSObject>());
}
@@ -11400,8 +11482,8 @@ static Handle<JSObject> MaterializeLocalContext(Isolate* isolate,
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->declaration_context());
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, function_context, target)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(
+ scope_info, function_context, target)) {
return Handle<JSObject>();
}
@@ -11422,12 +11504,12 @@ static Handle<JSObject> MaterializeLocalContext(Isolate* isolate,
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- target,
- key,
- GetProperty(isolate, ext, key),
- NONE,
- kNonStrictMode),
+ Runtime::SetObjectProperty(isolate,
+ target,
+ key,
+ GetProperty(isolate, ext, key),
+ NONE,
+ kNonStrictMode),
Handle<JSObject>());
}
}
@@ -11524,15 +11606,12 @@ static bool SetLocalVariableValue(Isolate* isolate,
!function_context->IsNativeContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- if (ext->HasProperty(*variable_name)) {
+ if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing
// property value.
- SetProperty(isolate,
- ext,
- variable_name,
- new_value,
- NONE,
- kNonStrictMode);
+ Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
+ NONE,
+ kNonStrictMode);
return true;
}
}
@@ -11558,8 +11637,8 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals to the context extension.
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, context, closure_scope)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(
+ scope_info, context, closure_scope)) {
return Handle<JSObject>();
}
@@ -11578,12 +11657,10 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- closure_scope,
- key,
- GetProperty(isolate, ext, key),
- NONE,
- kNonStrictMode),
+ Runtime::SetObjectProperty(isolate, closure_scope, key,
+ GetProperty(isolate, ext, key),
+ NONE,
+ kNonStrictMode),
Handle<JSObject>());
}
}
@@ -11612,14 +11689,11 @@ static bool SetClosureVariableValue(Isolate* isolate,
// be variables introduced by eval.
if (context->has_extension()) {
Handle<JSObject> ext(JSObject::cast(context->extension()));
- if (ext->HasProperty(*variable_name)) {
+ if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing property value.
- SetProperty(isolate,
- ext,
- variable_name,
- new_value,
- NONE,
- kNonStrictMode);
+ Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
+ NONE,
+ kNonStrictMode);
return true;
}
}
@@ -11640,12 +11714,9 @@ static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
isolate->factory()->NewJSObject(isolate->object_function());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- catch_scope,
- name,
- thrown_object,
- NONE,
- kNonStrictMode),
+ Runtime::SetObjectProperty(isolate, catch_scope, name, thrown_object,
+ NONE,
+ kNonStrictMode),
Handle<JSObject>());
return catch_scope;
}
@@ -11679,8 +11750,8 @@ static Handle<JSObject> MaterializeBlockScope(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals.
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, context, block_scope)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(
+ scope_info, context, block_scope)) {
return Handle<JSObject>();
}
@@ -11702,8 +11773,8 @@ static Handle<JSObject> MaterializeModuleScope(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals.
- if (!scope_info->CopyContextLocalsToScopeObject(
- isolate, context, module_scope)) {
+ if (!ScopeInfo::CopyContextLocalsToScopeObject(
+ scope_info, context, module_scope)) {
return Handle<JSObject>();
}
@@ -12655,19 +12726,19 @@ static Handle<JSObject> MaterializeArgumentsObject(
// Do not materialize the arguments object for eval or top-level code.
// Skip if "arguments" is already taken.
if (!function->shared()->is_function() ||
- target->HasLocalProperty(isolate->heap()->arguments_string())) {
+ JSReceiver::HasLocalProperty(target,
+ isolate->factory()->arguments_string())) {
return target;
}
// FunctionGetArguments can't throw an exception.
Handle<JSObject> arguments = Handle<JSObject>::cast(
Accessors::FunctionGetArguments(function));
- SetProperty(isolate,
- target,
- isolate->factory()->arguments_string(),
- arguments,
- ::NONE,
- kNonStrictMode);
+ Runtime::SetObjectProperty(isolate, target,
+ isolate->factory()->arguments_string(),
+ arguments,
+ ::NONE,
+ kNonStrictMode);
return target;
}
@@ -14270,6 +14341,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AbortJS) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
+ OS::PrintError("abort: %s\n", *message->ToCString());
+ isolate->PrintStack(stderr);
+ OS::Abort();
+ UNREACHABLE();
+ return NULL;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_FlattenString) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -14542,30 +14625,33 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
if (obj->IsJSGlobalProxy()) {
Object* proto = obj->GetPrototype();
if (proto->IsNull()) return isolate->heap()->undefined_value();
ASSERT(proto->IsJSGlobalObject());
- obj = JSReceiver::cast(proto);
+ obj = handle(JSReceiver::cast(proto));
}
if (obj->IsJSProxy())
return isolate->heap()->undefined_value();
ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() &&
- JSObject::cast(obj)->HasFastElements()));
+ Handle<JSObject>::cast(obj)->HasFastElements()));
ASSERT(obj->IsJSObject());
- return JSObject::cast(obj)->SetObserved(isolate);
+ JSObject::SetObserved(Handle<JSObject>::cast(obj));
+ return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetObserverDeliveryPending) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetMicrotaskPending) {
SealHandleScope shs(isolate);
- ASSERT(args.length() == 0);
- isolate->set_observer_delivery_pending(true);
- return isolate->heap()->undefined_value();
+ ASSERT(args.length() == 1);
+ CONVERT_BOOLEAN_ARG_CHECKED(new_state, 0);
+ bool old_state = isolate->microtask_pending();
+ isolate->set_microtask_pending(new_state);
+ return isolate->heap()->ToBoolean(old_state);
}
@@ -14632,7 +14718,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) {
static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
Handle<JSFunction> constructor,
- Handle<Object> type_info,
+ Handle<AllocationSite> site,
Arguments* caller_args) {
bool holey = false;
bool can_use_type_feedback = true;
@@ -14654,14 +14740,7 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
JSArray* array;
MaybeObject* maybe_array;
- if (!type_info.is_null() &&
- *type_info != isolate->heap()->undefined_value() &&
- Cell::cast(*type_info)->value()->IsAllocationSite() &&
- can_use_type_feedback) {
- Handle<Cell> cell = Handle<Cell>::cast(type_info);
- Handle<AllocationSite> site = Handle<AllocationSite>(
- AllocationSite::cast(cell->value()), isolate);
- ASSERT(!site->IsLiteralSite());
+ if (!site.is_null() && can_use_type_feedback) {
ElementsKind to_kind = site->GetElementsKind();
if (holey && !IsFastHoleyElementsKind(to_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
@@ -14687,8 +14766,17 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0,
DONT_INITIALIZE_ARRAY_ELEMENTS);
if (maybe_array->IsFailure()) return maybe_array;
+ ElementsKind old_kind = array->GetElementsKind();
maybe_array = ArrayConstructInitializeElements(array, caller_args);
if (maybe_array->IsFailure()) return maybe_array;
+ if (!site.is_null() &&
+ (old_kind != array->GetElementsKind() ||
+ !can_use_type_feedback)) {
+ // The arguments passed in caused a transition. This kind of complexity
+ // can't be dealt with in the inlined hydrogen array constructor case.
+ // We must mark the allocationsite as un-inlinable.
+ site->SetDoNotInlineCall();
+ }
return array;
}
@@ -14696,21 +14784,38 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConstructor) {
HandleScope scope(isolate);
// If we get 2 arguments then they are the stub parameters (constructor, type
- // info). If we get 3, then the first one is a pointer to the arguments
- // passed by the caller.
+ // info). If we get 4, then the first one is a pointer to the arguments
+ // passed by the caller, and the last one is the length of the arguments
+ // passed to the caller (redundant, but useful to check on the deoptimizer
+ // with an assert).
Arguments empty_args(0, NULL);
bool no_caller_args = args.length() == 2;
- ASSERT(no_caller_args || args.length() == 3);
+ ASSERT(no_caller_args || args.length() == 4);
int parameters_start = no_caller_args ? 0 : 1;
Arguments* caller_args = no_caller_args
? &empty_args
: reinterpret_cast<Arguments*>(args[0]);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
CONVERT_ARG_HANDLE_CHECKED(Object, type_info, parameters_start + 1);
+#ifdef DEBUG
+ if (!no_caller_args) {
+ CONVERT_SMI_ARG_CHECKED(arg_count, parameters_start + 2);
+ ASSERT(arg_count == caller_args->length());
+ }
+#endif
+
+ Handle<AllocationSite> site;
+ if (!type_info.is_null() &&
+ *type_info != isolate->heap()->undefined_value() &&
+ Cell::cast(*type_info)->value()->IsAllocationSite()) {
+ site = Handle<AllocationSite>(
+ AllocationSite::cast(Cell::cast(*type_info)->value()), isolate);
+ ASSERT(!site->SitePointsToLiteral());
+ }
return ArrayConstructorCommon(isolate,
constructor,
- type_info,
+ site,
caller_args);
}
@@ -14719,16 +14824,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalArrayConstructor) {
HandleScope scope(isolate);
Arguments empty_args(0, NULL);
bool no_caller_args = args.length() == 1;
- ASSERT(no_caller_args || args.length() == 2);
+ ASSERT(no_caller_args || args.length() == 3);
int parameters_start = no_caller_args ? 0 : 1;
Arguments* caller_args = no_caller_args
? &empty_args
: reinterpret_cast<Arguments*>(args[0]);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
-
+#ifdef DEBUG
+ if (!no_caller_args) {
+ CONVERT_SMI_ARG_CHECKED(arg_count, parameters_start + 1);
+ ASSERT(arg_count == caller_args->length());
+ }
+#endif
return ArrayConstructorCommon(isolate,
constructor,
- Handle<Object>::null(),
+ Handle<AllocationSite>::null(),
caller_args);
}
@@ -14800,8 +14910,7 @@ const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
}
-void Runtime::PerformGC(Object* result) {
- Isolate* isolate = Isolate::Current();
+void Runtime::PerformGC(Object* result, Isolate* isolate) {
Failure* failure = Failure::cast(result);
if (failure->IsRetryAfterGC()) {
if (isolate->heap()->new_space()->AddFreshPage()) {
diff --git a/chromium/v8/src/runtime.h b/chromium/v8/src/runtime.h
index 1ad9b3d6663..6a0358399d1 100644
--- a/chromium/v8/src/runtime.h
+++ b/chromium/v8/src/runtime.h
@@ -64,6 +64,7 @@ namespace internal {
F(ToFastProperties, 1, 1) \
F(FinishArrayPrototypeSetup, 1, 1) \
F(SpecialArrayFunctions, 1, 1) \
+ F(IsCallable, 1, 1) \
F(IsClassicModeFunction, 1, 1) \
F(GetDefaultReceiver, 1, 1) \
\
@@ -91,7 +92,6 @@ namespace internal {
F(TryInstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyStubFailure, 0, 1) \
- F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
@@ -100,12 +100,13 @@ namespace internal {
F(NeverOptimizeFunction, 1, 1) \
F(GetOptimizationStatus, -1, 1) \
F(GetOptimizationCount, 1, 1) \
+ F(UnblockConcurrentRecompilation, 0, 1) \
F(CompileForOnStackReplacement, 2, 1) \
F(SetAllocationTimeout, 2, 1) \
F(AllocateInNewSpace, 1, 1) \
- F(AllocateInOldPointerSpace, 1, 1) \
- F(AllocateInOldDataSpace, 1, 1) \
+ F(AllocateInTargetSpace, 2, 1) \
F(SetNativeFlag, 1, 1) \
+ F(SetInlineBuiltinFlag, 1, 1) \
F(StoreArrayLiteralElement, 5, 1) \
F(DebugCallbackSupportsStepping, 1, 1) \
F(DebugPrepareStepInIfStepping, 1, 1) \
@@ -179,7 +180,6 @@ namespace internal {
F(Math_asin, 1, 1) \
F(Math_atan, 1, 1) \
F(Math_atan2, 2, 1) \
- F(Math_ceil, 1, 1) \
F(Math_cos, 1, 1) \
F(Math_exp, 1, 1) \
F(Math_floor, 1, 1) \
@@ -278,6 +278,7 @@ namespace internal {
\
/* Eval */ \
F(GlobalReceiver, 1, 1) \
+ F(IsAttachedGlobal, 1, 1) \
F(ResolvePossiblyDirectEval, 5, 2) \
\
F(SetProperty, -1 /* 4 or 5 */, 1) \
@@ -300,9 +301,8 @@ namespace internal {
/* Literals */ \
F(MaterializeRegExpLiteral, 4, 1)\
F(CreateObjectLiteral, 4, 1) \
- F(CreateObjectLiteralShallow, 4, 1) \
- F(CreateArrayLiteral, 3, 1) \
- F(CreateArrayLiteralShallow, 3, 1) \
+ F(CreateArrayLiteral, 4, 1) \
+ F(CreateArrayLiteralStubBailout, 3, 1) \
\
/* Harmony generators */ \
F(CreateJSGeneratorObject, 0, 1) \
@@ -318,7 +318,9 @@ namespace internal {
\
/* Harmony symbols */ \
F(CreateSymbol, 1, 1) \
+ F(CreatePrivateSymbol, 1, 1) \
F(SymbolName, 1, 1) \
+ F(SymbolIsPrivate, 1, 1) \
\
/* Harmony proxies */ \
F(CreateJSProxy, 2, 1) \
@@ -352,10 +354,12 @@ namespace internal {
F(WeakCollectionDelete, 2, 1) \
F(WeakCollectionSet, 3, 1) \
\
+ /* Harmony events */ \
+ F(SetMicrotaskPending, 1, 1) \
+ \
/* Harmony observe */ \
F(IsObserved, 1, 1) \
F(SetIsObserved, 1, 1) \
- F(SetObserverDeliveryPending, 0, 1) \
F(GetObservationState, 0, 1) \
F(ObservationWeakMapCreate, 0, 1) \
F(UnwrapGlobalProxy, 1, 1) \
@@ -365,6 +369,7 @@ namespace internal {
F(ArrayBufferInitialize, 2, 1)\
F(ArrayBufferGetByteLength, 1, 1)\
F(ArrayBufferSliceImpl, 3, 1) \
+ F(ArrayBufferIsView, 1, 1) \
\
F(TypedArrayInitialize, 5, 1) \
F(TypedArrayInitializeFromArrayLike, 4, 1) \
@@ -406,6 +411,7 @@ namespace internal {
F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \
F(ThrowNotDateError, 0, 1) \
+ F(ThrowMessage, 1, 1) \
F(StackGuard, 0, 1) \
F(Interrupt, 0, 1) \
F(PromoteScheduledException, 0, 1) \
@@ -437,6 +443,7 @@ namespace internal {
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
F(Abort, 2, 1) \
+ F(AbortJS, 1, 1) \
/* Logging */ \
F(Log, 2, 1) \
/* ES5 */ \
@@ -620,18 +627,15 @@ namespace internal {
F(OneByteSeqStringSetChar, 3, 1) \
F(TwoByteSeqStringSetChar, 3, 1) \
F(ObjectEquals, 2, 1) \
- F(RandomHeapNumber, 0, 1) \
F(IsObject, 1, 1) \
F(IsFunction, 1, 1) \
F(IsUndetectableObject, 1, 1) \
F(IsSpecObject, 1, 1) \
F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
F(MathPow, 2, 1) \
- F(MathSin, 1, 1) \
- F(MathCos, 1, 1) \
- F(MathTan, 1, 1) \
F(MathSqrt, 1, 1) \
F(MathLog, 1, 1) \
+ F(IsMinusZero, 1, 1) \
F(IsRegExpEquivalent, 2, 1) \
F(HasCachedArrayIndex, 1, 1) \
F(GetCachedArrayIndex, 1, 1) \
@@ -778,15 +782,7 @@ class Runtime : public AllStatic {
Handle<Object> object,
uint32_t index);
- MUST_USE_RESULT static MaybeObject* SetObjectProperty(
- Isolate* isolate,
- Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value,
- PropertyAttributes attr,
- StrictModeFlag strict_mode);
-
- MUST_USE_RESULT static MaybeObject* SetObjectPropertyOrFail(
+ static Handle<Object> SetObjectProperty(
Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
@@ -794,7 +790,7 @@ class Runtime : public AllStatic {
PropertyAttributes attr,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* ForceSetObjectProperty(
+ static Handle<Object> ForceSetObjectProperty(
Isolate* isolate,
Handle<JSObject> object,
Handle<Object> key,
@@ -838,8 +834,24 @@ class Runtime : public AllStatic {
Isolate* isolate,
JSArrayBuffer* phantom_array_buffer);
+ enum TypedArrayId {
+ // arrayIds below should be synchromized with typedarray.js natives.
+ ARRAY_ID_UINT8 = 1,
+ ARRAY_ID_INT8 = 2,
+ ARRAY_ID_UINT16 = 3,
+ ARRAY_ID_INT16 = 4,
+ ARRAY_ID_UINT32 = 5,
+ ARRAY_ID_INT32 = 6,
+ ARRAY_ID_FLOAT32 = 7,
+ ARRAY_ID_FLOAT64 = 8,
+ ARRAY_ID_UINT8C = 9
+ };
+
+ static void ArrayIdToTypeAndSize(int array_id,
+ ExternalArrayType *type, size_t *element_size);
+
// Helper functions used stubs.
- static void PerformGC(Object* result);
+ static void PerformGC(Object* result, Isolate* isolate);
// Used in runtime.cc and hydrogen's VisitArrayLiteral.
static Handle<Object> CreateArrayLiteralBoilerplate(
@@ -852,6 +864,9 @@ class Runtime : public AllStatic {
//---------------------------------------------------------------------------
// Constants used by interface to runtime functions.
+class AllocateDoubleAlignFlag: public BitField<bool, 0, 1> {};
+class AllocateTargetSpace: public BitField<AllocationSpace, 1, 3> {};
+
class DeclareGlobalsEvalFlag: public BitField<bool, 0, 1> {};
class DeclareGlobalsNativeFlag: public BitField<bool, 1, 1> {};
class DeclareGlobalsLanguageMode: public BitField<LanguageMode, 2, 2> {};
diff --git a/chromium/v8/src/runtime.js b/chromium/v8/src/runtime.js
index 5339570ef6e..35bc07a10e8 100644
--- a/chromium/v8/src/runtime.js
+++ b/chromium/v8/src/runtime.js
@@ -361,7 +361,7 @@ function IN(x) {
function INSTANCE_OF(F) {
var V = this;
if (!IS_SPEC_FUNCTION(F)) {
- throw %MakeTypeError('instanceof_function_expected', [V]);
+ throw %MakeTypeError('instanceof_function_expected', [F]);
}
// If V is not an object, return false.
@@ -526,8 +526,8 @@ function ToNumber(x) {
: %StringToNumber(x);
}
if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return $NaN;
- if (IS_SYMBOL(x)) return $NaN;
+ if (IS_UNDEFINED(x)) return NAN;
+ if (IS_SYMBOL(x)) return NAN;
return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
}
@@ -537,8 +537,8 @@ function NonNumberToNumber(x) {
: %StringToNumber(x);
}
if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return $NaN;
- if (IS_SYMBOL(x)) return $NaN;
+ if (IS_UNDEFINED(x)) return NAN;
+ if (IS_SYMBOL(x)) return NAN;
return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
}
@@ -606,7 +606,9 @@ function SameValue(x, y) {
if (IS_NUMBER(x)) {
if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
// x is +0 and y is -0 or vice versa.
- if (x === 0 && y === 0 && (1 / x) != (1 / y)) return false;
+ if (x === 0 && y === 0 && %_IsMinusZero(x) != %_IsMinusZero(y)) {
+ return false;
+ }
}
return x === y;
}
@@ -663,7 +665,7 @@ function DefaultString(x) {
function ToPositiveInteger(x, rangeErrorName) {
var i = TO_INTEGER(x);
- if (i < 0) throw %MakeRangeError(rangeErrorName);
+ if (i < 0) throw MakeRangeError(rangeErrorName);
return i;
}
diff --git a/chromium/v8/src/safepoint-table.cc b/chromium/v8/src/safepoint-table.cc
index b56556572eb..beecb27582d 100644
--- a/chromium/v8/src/safepoint-table.cc
+++ b/chromium/v8/src/safepoint-table.cc
@@ -83,7 +83,7 @@ SafepointEntry SafepointTable::FindEntry(Address pc) const {
}
-void SafepointTable::PrintEntry(unsigned index) const {
+void SafepointTable::PrintEntry(unsigned index, FILE* out) const {
disasm::NameConverter converter;
SafepointEntry entry = GetEntry(index);
uint8_t* bits = entry.bits();
@@ -93,25 +93,25 @@ void SafepointTable::PrintEntry(unsigned index) const {
ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
const int first = kNumSafepointRegisters >> kBitsPerByteLog2;
int last = entry_size_ - 1;
- for (int i = first; i < last; i++) PrintBits(bits[i], kBitsPerByte);
+ for (int i = first; i < last; i++) PrintBits(out, bits[i], kBitsPerByte);
int last_bits = code_->stack_slots() - ((last - first) * kBitsPerByte);
- PrintBits(bits[last], last_bits);
+ PrintBits(out, bits[last], last_bits);
// Print the registers (if any).
if (!entry.HasRegisters()) return;
for (int j = 0; j < kNumSafepointRegisters; j++) {
if (entry.HasRegisterAt(j)) {
- PrintF(" | %s", converter.NameOfCPURegister(j));
+ PrintF(out, " | %s", converter.NameOfCPURegister(j));
}
}
}
}
-void SafepointTable::PrintBits(uint8_t byte, int digits) {
+void SafepointTable::PrintBits(FILE* out, uint8_t byte, int digits) {
ASSERT(digits >= 0 && digits <= kBitsPerByte);
for (int i = 0; i < digits; i++) {
- PrintF("%c", ((byte & (1 << i)) == 0) ? '0' : '1');
+ PrintF(out, "%c", ((byte & (1 << i)) == 0) ? '0' : '1');
}
}
diff --git a/chromium/v8/src/safepoint-table.h b/chromium/v8/src/safepoint-table.h
index fc8bf7a411a..ea35253ff84 100644
--- a/chromium/v8/src/safepoint-table.h
+++ b/chromium/v8/src/safepoint-table.h
@@ -126,7 +126,7 @@ class SafepointTable BASE_EMBEDDED {
// Returns the entry for the given pc.
SafepointEntry FindEntry(Address pc) const;
- void PrintEntry(unsigned index) const;
+ void PrintEntry(unsigned index, FILE* out = stdout) const;
private:
static const uint8_t kNoRegisters = 0xFF;
@@ -149,7 +149,7 @@ class SafepointTable BASE_EMBEDDED {
return GetPcOffsetLocation(index) + kPcSize;
}
- static void PrintBits(uint8_t byte, int digits);
+ static void PrintBits(FILE* out, uint8_t byte, int digits);
DisallowHeapAllocation no_allocation_;
Code* code_;
diff --git a/chromium/v8/src/sampler.cc b/chromium/v8/src/sampler.cc
index 0aaa1e9b77e..684ef486c7d 100644
--- a/chromium/v8/src/sampler.cc
+++ b/chromium/v8/src/sampler.cc
@@ -216,11 +216,7 @@ class Sampler::PlatformData : public PlatformDataCommon {
class SimulatorHelper {
public:
inline bool Init(Sampler* sampler, Isolate* isolate) {
- ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
- Isolate::PerIsolateThreadData* per_thread_data = isolate->
- FindPerThreadDataForThread(thread_id);
- if (!per_thread_data) return false;
- simulator_ = per_thread_data->simulator();
+ simulator_ = isolate->thread_local_top()->simulator_;
// Check if there is active simulator.
return simulator_ != NULL;
}
diff --git a/chromium/v8/src/scanner.cc b/chromium/v8/src/scanner.cc
index 8b7cb569bdd..26f840b23a5 100644
--- a/chromium/v8/src/scanner.cc
+++ b/chromium/v8/src/scanner.cc
@@ -27,10 +27,14 @@
// Features shared by parsing and pre-parsing scanners.
+#include <cmath>
+
#include "scanner.h"
#include "../include/v8stdint.h"
#include "char-predicates-inl.h"
+#include "conversions-inl.h"
+#include "list-inl.h"
namespace v8 {
namespace internal {
@@ -1108,4 +1112,140 @@ bool Scanner::ScanRegExpFlags() {
return true;
}
+
+int DuplicateFinder::AddAsciiSymbol(Vector<const char> key, int value) {
+ return AddSymbol(Vector<const byte>::cast(key), true, value);
+}
+
+
+int DuplicateFinder::AddUtf16Symbol(Vector<const uint16_t> key, int value) {
+ return AddSymbol(Vector<const byte>::cast(key), false, value);
+}
+
+
+int DuplicateFinder::AddSymbol(Vector<const byte> key,
+ bool is_ascii,
+ int value) {
+ uint32_t hash = Hash(key, is_ascii);
+ byte* encoding = BackupKey(key, is_ascii);
+ HashMap::Entry* entry = map_.Lookup(encoding, hash, true);
+ int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ entry->value =
+ reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
+ return old_value;
+}
+
+
+int DuplicateFinder::AddNumber(Vector<const char> key, int value) {
+ ASSERT(key.length() > 0);
+ // Quick check for already being in canonical form.
+ if (IsNumberCanonical(key)) {
+ return AddAsciiSymbol(key, value);
+ }
+
+ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY;
+ double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
+ int length;
+ const char* string;
+ if (!std::isfinite(double_value)) {
+ string = "Infinity";
+ length = 8; // strlen("Infinity");
+ } else {
+ string = DoubleToCString(double_value,
+ Vector<char>(number_buffer_, kBufferSize));
+ length = StrLength(string);
+ }
+ return AddSymbol(Vector<const byte>(reinterpret_cast<const byte*>(string),
+ length), true, value);
+}
+
+
+bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) {
+ // Test for a safe approximation of number literals that are already
+ // in canonical form: max 15 digits, no leading zeroes, except an
+ // integer part that is a single zero, and no trailing zeros below
+ // the decimal point.
+ int pos = 0;
+ int length = number.length();
+ if (number.length() > 15) return false;
+ if (number[pos] == '0') {
+ pos++;
+ } else {
+ while (pos < length &&
+ static_cast<unsigned>(number[pos] - '0') <= ('9' - '0')) pos++;
+ }
+ if (length == pos) return true;
+ if (number[pos] != '.') return false;
+ pos++;
+ bool invalid_last_digit = true;
+ while (pos < length) {
+ byte digit = number[pos] - '0';
+ if (digit > '9' - '0') return false;
+ invalid_last_digit = (digit == 0);
+ pos++;
+ }
+ return !invalid_last_digit;
+}
+
+
+uint32_t DuplicateFinder::Hash(Vector<const byte> key, bool is_ascii) {
+ // Primitive hash function, almost identical to the one used
+ // for strings (except that it's seeded by the length and ASCII-ness).
+ int length = key.length();
+ uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ;
+ for (int i = 0; i < length; i++) {
+ uint32_t c = key[i];
+ hash = (hash + c) * 1025;
+ hash ^= (hash >> 6);
+ }
+ return hash;
+}
+
+
+bool DuplicateFinder::Match(void* first, void* second) {
+ // Decode lengths.
+ // Length + ASCII-bit is encoded as base 128, most significant heptet first,
+ // with a 8th bit being non-zero while there are more heptets.
+ // The value encodes the number of bytes following, and whether the original
+ // was ASCII.
+ byte* s1 = reinterpret_cast<byte*>(first);
+ byte* s2 = reinterpret_cast<byte*>(second);
+ uint32_t length_ascii_field = 0;
+ byte c1;
+ do {
+ c1 = *s1;
+ if (c1 != *s2) return false;
+ length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f);
+ s1++;
+ s2++;
+ } while ((c1 & 0x80) != 0);
+ int length = static_cast<int>(length_ascii_field >> 1);
+ return memcmp(s1, s2, length) == 0;
+}
+
+
+byte* DuplicateFinder::BackupKey(Vector<const byte> bytes,
+ bool is_ascii) {
+ uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0);
+ backing_store_.StartSequence();
+ // Emit ascii_length as base-128 encoded number, with the 7th bit set
+ // on the byte of every heptet except the last, least significant, one.
+ if (ascii_length >= (1 << 7)) {
+ if (ascii_length >= (1 << 14)) {
+ if (ascii_length >= (1 << 21)) {
+ if (ascii_length >= (1 << 28)) {
+ backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80));
+ }
+ backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u));
+ }
+ backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u));
+ }
+ backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u));
+ }
+ backing_store_.Add(static_cast<byte>(ascii_length & 0x7f));
+
+ backing_store_.AddBlock(bytes);
+ return backing_store_.EndSequence().start();
+}
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/scanner.h b/chromium/v8/src/scanner.h
index d7328085b79..3cefc833ac3 100644
--- a/chromium/v8/src/scanner.h
+++ b/chromium/v8/src/scanner.h
@@ -34,6 +34,8 @@
#include "char-predicates.h"
#include "checks.h"
#include "globals.h"
+#include "hashmap.h"
+#include "list.h"
#include "token.h"
#include "unicode-inl.h"
#include "utils.h"
@@ -121,9 +123,10 @@ class Utf16CharacterStream {
};
-class UnicodeCache {
// ---------------------------------------------------------------------
// Caching predicates used by scanners.
+
+class UnicodeCache {
public:
UnicodeCache() {}
typedef unibrow::Utf8Decoder<512> Utf8Decoder;
@@ -148,6 +151,56 @@ class UnicodeCache {
};
+// ---------------------------------------------------------------------
+// DuplicateFinder discovers duplicate symbols.
+
+class DuplicateFinder {
+ public:
+ explicit DuplicateFinder(UnicodeCache* constants)
+ : unicode_constants_(constants),
+ backing_store_(16),
+ map_(&Match) { }
+
+ int AddAsciiSymbol(Vector<const char> key, int value);
+ int AddUtf16Symbol(Vector<const uint16_t> key, int value);
+ // Add a a number literal by converting it (if necessary)
+ // to the string that ToString(ToNumber(literal)) would generate.
+ // and then adding that string with AddAsciiSymbol.
+ // This string is the actual value used as key in an object literal,
+ // and the one that must be different from the other keys.
+ int AddNumber(Vector<const char> key, int value);
+
+ private:
+ int AddSymbol(Vector<const byte> key, bool is_ascii, int value);
+ // Backs up the key and its length in the backing store.
+ // The backup is stored with a base 127 encoding of the
+ // length (plus a bit saying whether the string is ASCII),
+ // followed by the bytes of the key.
+ byte* BackupKey(Vector<const byte> key, bool is_ascii);
+
+ // Compare two encoded keys (both pointing into the backing store)
+ // for having the same base-127 encoded lengths and ASCII-ness,
+ // and then having the same 'length' bytes following.
+ static bool Match(void* first, void* second);
+ // Creates a hash from a sequence of bytes.
+ static uint32_t Hash(Vector<const byte> key, bool is_ascii);
+ // Checks whether a string containing a JS number is its canonical
+ // form.
+ static bool IsNumberCanonical(Vector<const char> key);
+
+ // Size of buffer. Sufficient for using it to call DoubleToCString in
+ // from conversions.h.
+ static const int kBufferSize = 100;
+
+ UnicodeCache* unicode_constants_;
+ // Backing store used to store strings used as hashmap keys.
+ SequenceCollector<unsigned char> backing_store_;
+ HashMap map_;
+ // Buffer used for string->number->canonical string conversions.
+ char number_buffer_[kBufferSize];
+};
+
+
// ----------------------------------------------------------------------------
// LiteralBuffer - Collector of chars of literals.
diff --git a/chromium/v8/src/scopeinfo.cc b/chromium/v8/src/scopeinfo.cc
index ba138f2adda..03e69bf3842 100644
--- a/chromium/v8/src/scopeinfo.cc
+++ b/chromium/v8/src/scopeinfo.cc
@@ -32,8 +32,6 @@
#include "scopeinfo.h"
#include "scopes.h"
-#include "allocation-inl.h"
-
namespace v8 {
namespace internal {
@@ -363,26 +361,25 @@ int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
}
-bool ScopeInfo::CopyContextLocalsToScopeObject(
- Isolate* isolate,
- Handle<Context> context,
- Handle<JSObject> scope_object) {
- int local_count = ContextLocalCount();
+bool ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<JSObject> scope_object) {
+ Isolate* isolate = scope_info->GetIsolate();
+ int local_count = scope_info->ContextLocalCount();
if (local_count == 0) return true;
// Fill all context locals to the context extension.
- int start = ContextLocalNameEntriesIndex();
+ int start = scope_info->ContextLocalNameEntriesIndex();
int end = start + local_count;
for (int i = start; i < end; ++i) {
int context_index = Context::MIN_CONTEXT_SLOTS + i - start;
- RETURN_IF_EMPTY_HANDLE_VALUE(
+ Handle<Object> result = Runtime::SetObjectProperty(
isolate,
- SetProperty(isolate,
- scope_object,
- Handle<String>(String::cast(get(i))),
- Handle<Object>(context->get(context_index), isolate),
- ::NONE,
- kNonStrictMode),
- false);
+ scope_object,
+ Handle<String>(String::cast(scope_info->get(i))),
+ Handle<Object>(context->get(context_index), isolate),
+ ::NONE,
+ kNonStrictMode);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, false);
}
return true;
}
diff --git a/chromium/v8/src/scopes.cc b/chromium/v8/src/scopes.cc
index ce1741a623a..fefc696d1a6 100644
--- a/chromium/v8/src/scopes.cc
+++ b/chromium/v8/src/scopes.cc
@@ -35,8 +35,6 @@
#include "messages.h"
#include "scopeinfo.h"
-#include "allocation-inl.h"
-
namespace v8 {
namespace internal {
@@ -437,8 +435,8 @@ Variable* Scope::LookupFunctionVar(Handle<String> name,
this, name, mode, true /* is valid LHS */,
Variable::NORMAL, kCreatedInitialized);
VariableProxy* proxy = factory->NewVariableProxy(var);
- VariableDeclaration* declaration =
- factory->NewVariableDeclaration(proxy, mode, this);
+ VariableDeclaration* declaration = factory->NewVariableDeclaration(
+ proxy, mode, this, RelocInfo::kNoPosition);
DeclareFunctionVar(declaration);
var->AllocateTo(Variable::CONTEXT, index);
return var;
@@ -1302,7 +1300,7 @@ void Scope::AllocateParameterLocals() {
void Scope::AllocateNonParameterLocal(Variable* var) {
ASSERT(var->scope() == this);
- ASSERT(!var->IsVariable(isolate_->factory()->result_string()) ||
+ ASSERT(!var->IsVariable(isolate_->factory()->dot_result_string()) ||
!var->IsStackLocal());
if (var->IsUnallocated() && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
diff --git a/chromium/v8/src/serialize.cc b/chromium/v8/src/serialize.cc
index d05dd261227..a0a66f9e9f1 100644
--- a/chromium/v8/src/serialize.cc
+++ b/chromium/v8/src/serialize.cc
@@ -297,15 +297,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
RUNTIME_ENTRY,
1,
"Runtime::PerformGC");
- Add(ExternalReference::fill_heap_number_with_random_function(
- isolate).address(),
- RUNTIME_ENTRY,
- 2,
- "V8::FillHeapNumberWithRandom");
- Add(ExternalReference::random_uint32_function(isolate).address(),
- RUNTIME_ENTRY,
- 3,
- "V8::Random");
Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
RUNTIME_ENTRY,
4,
@@ -325,8 +316,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
7,
"IncrementalMarking::RecordWrite");
-
-
// Miscellaneous
Add(ExternalReference::roots_array_start(isolate).address(),
UNCLASSIFIED,
@@ -532,55 +521,51 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
52,
"cpu_features");
- Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
+ Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
UNCLASSIFIED,
53,
- "Heap::NewSpaceAllocationTopAddress");
- Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
+ "Runtime::AllocateInNewSpace");
+ Add(ExternalReference(Runtime::kAllocateInTargetSpace, isolate).address(),
UNCLASSIFIED,
54,
- "Heap::NewSpaceAllocationLimitAddress");
- Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
- UNCLASSIFIED,
- 55,
- "Runtime::AllocateInNewSpace");
+ "Runtime::AllocateInTargetSpace");
Add(ExternalReference::old_pointer_space_allocation_top_address(
isolate).address(),
UNCLASSIFIED,
- 56,
+ 55,
"Heap::OldPointerSpaceAllocationTopAddress");
Add(ExternalReference::old_pointer_space_allocation_limit_address(
isolate).address(),
UNCLASSIFIED,
- 57,
+ 56,
"Heap::OldPointerSpaceAllocationLimitAddress");
- Add(ExternalReference(Runtime::kAllocateInOldPointerSpace, isolate).address(),
- UNCLASSIFIED,
- 58,
- "Runtime::AllocateInOldPointerSpace");
Add(ExternalReference::old_data_space_allocation_top_address(
isolate).address(),
UNCLASSIFIED,
- 59,
+ 57,
"Heap::OldDataSpaceAllocationTopAddress");
Add(ExternalReference::old_data_space_allocation_limit_address(
isolate).address(),
UNCLASSIFIED,
- 60,
+ 58,
"Heap::OldDataSpaceAllocationLimitAddress");
- Add(ExternalReference(Runtime::kAllocateInOldDataSpace, isolate).address(),
- UNCLASSIFIED,
- 61,
- "Runtime::AllocateInOldDataSpace");
Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate).
address(),
UNCLASSIFIED,
- 62,
+ 59,
"Heap::NewSpaceAllocationLimitAddress");
Add(ExternalReference::allocation_sites_list_address(isolate).address(),
UNCLASSIFIED,
- 63,
+ 60,
"Heap::allocation_sites_list_address()");
+ Add(ExternalReference::address_of_uint32_bias().address(),
+ UNCLASSIFIED,
+ 61,
+ "uint32_bias");
+ Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
+ UNCLASSIFIED,
+ 62,
+ "Code::MarkCodeAsExecuted");
// Add a small set of deopt entry addresses to encoder without generating the
// deopt table code, which isn't possible at deserialization time.
@@ -591,7 +576,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
entry,
Deoptimizer::LAZY,
Deoptimizer::CALCULATE_ENTRY_ADDRESS);
- Add(address, LAZY_DEOPTIMIZATION, 64 + entry, "lazy_deopt");
+ Add(address, LAZY_DEOPTIMIZATION, entry, "lazy_deopt");
}
}
@@ -809,6 +794,15 @@ Deserializer::Deserializer(SnapshotByteSource* source)
}
+void Deserializer::FlushICacheForNewCodeObjects() {
+ PageIterator it(isolate_->heap()->code_space());
+ while (it.has_next()) {
+ Page* p = it.next();
+ CPU::FlushICache(p->area_start(), p->area_end() - p->area_start());
+ }
+}
+
+
void Deserializer::Deserialize(Isolate* isolate) {
isolate_ = isolate;
ASSERT(isolate_ != NULL);
@@ -835,6 +829,8 @@ void Deserializer::Deserialize(Isolate* isolate) {
isolate_->heap()->undefined_value());
}
+ isolate_->heap()->InitializeWeakObjectToCodeTable();
+
// Update data pointers to the external strings containing natives sources.
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
Object* source = isolate_->heap()->natives_source_cache()->get(i);
@@ -843,6 +839,8 @@ void Deserializer::Deserialize(Isolate* isolate) {
}
}
+ FlushICacheForNewCodeObjects();
+
// Issue code events for newly deserialized code objects.
LOG_CODE_EVENT(isolate_, LogCodeObjects());
LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
@@ -1284,7 +1282,6 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
root_index_wave_front_(0) {
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
- ASSERT(isolate_->IsDefaultIsolate());
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
@@ -1317,6 +1314,14 @@ void PartialSerializer::Serialize(Object** object) {
}
+bool Serializer::ShouldBeSkipped(Object** current) {
+ Object** roots = isolate()->heap()->roots_array_start();
+ return current == &roots[Heap::kStoreBufferTopRootIndex]
+ || current == &roots[Heap::kStackLimitRootIndex]
+ || current == &roots[Heap::kRealStackLimitRootIndex];
+}
+
+
void Serializer::VisitPointers(Object** start, Object** end) {
Isolate* isolate = this->isolate();;
@@ -1325,8 +1330,7 @@ void Serializer::VisitPointers(Object** start, Object** end) {
root_index_wave_front_ =
Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
}
- if (reinterpret_cast<Address>(current) ==
- isolate->heap()->store_buffer()->TopAddress()) {
+ if (ShouldBeSkipped(current)) {
sink_->Put(kSkip, "Skip");
sink_->PutInt(kPointerSize, "SkipOneWord");
} else if ((*current)->IsSmi()) {
@@ -1656,90 +1660,71 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
- Object** current = rinfo->target_object_address();
-
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
- HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
- serializer_->SerializeObject(*current, representation, kStartOfObject, skip);
+ HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+ Object* object = rinfo->target_object();
+ serializer_->SerializeObject(object, how_to_code, kStartOfObject, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
-void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
- Address* end) {
- Address references_start = reinterpret_cast<Address>(start);
- int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
-
- for (Address* current = start; current < end; current++) {
- sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
- sink_->PutInt(skip, "SkipB4ExternalRef");
- skip = 0;
- int reference_id = serializer_->EncodeExternalReference(*current);
- sink_->PutInt(reference_id, "reference id");
- }
- bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize);
+void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
+ int skip = OutputRawData(reinterpret_cast<Address>(p),
+ kCanReturnSkipInsteadOfSkipping);
+ sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
+ sink_->PutInt(skip, "SkipB4ExternalRef");
+ Address target = *p;
+ sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
+ bytes_processed_so_far_ += kPointerSize;
}
void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
- Address references_start = rinfo->target_address_address();
- int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping);
-
- Address* current = rinfo->target_reference_address();
- int representation = rinfo->IsCodedSpecially() ?
- kFromCode + kStartOfObject : kPlain + kStartOfObject;
- sink_->Put(kExternalReference + representation, "ExternalRef");
+ int skip = OutputRawData(rinfo->target_address_address(),
+ kCanReturnSkipInsteadOfSkipping);
+ HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+ sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
sink_->PutInt(skip, "SkipB4ExternalRef");
- int reference_id = serializer_->EncodeExternalReference(*current);
- sink_->PutInt(reference_id, "reference id");
+ Address target = rinfo->target_reference();
+ sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
- Address target_start = rinfo->target_address_address();
- int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping);
- Address target = rinfo->target_address();
- uint32_t encoding = serializer_->EncodeExternalReference(target);
- CHECK(target == NULL ? encoding == 0 : encoding != 0);
- int representation;
- // Can't use a ternary operator because of gcc.
- if (rinfo->IsCodedSpecially()) {
- representation = kStartOfObject + kFromCode;
- } else {
- representation = kStartOfObject + kPlain;
- }
- sink_->Put(kExternalReference + representation, "ExternalReference");
+ int skip = OutputRawData(rinfo->target_address_address(),
+ kCanReturnSkipInsteadOfSkipping);
+ HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
+ sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
sink_->PutInt(skip, "SkipB4ExternalRef");
- sink_->PutInt(encoding, "reference id");
+ Address target = rinfo->target_address();
+ sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
- CHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Address target_start = rinfo->target_address_address();
- int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping);
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- serializer_->SerializeObject(target, kFromCode, kInnerPointer, skip);
+ int skip = OutputRawData(rinfo->target_address_address(),
+ kCanReturnSkipInsteadOfSkipping);
+ Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
bytes_processed_so_far_ += rinfo->target_address_size();
}
void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
- Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
- serializer_->SerializeObject(target, kPlain, kInnerPointer, skip);
+ Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
bytes_processed_so_far_ += kPointerSize;
}
void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
- ASSERT(rinfo->rmode() == RelocInfo::CELL);
- Cell* cell = Cell::cast(rinfo->target_cell());
int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
- serializer_->SerializeObject(cell, kPlain, kInnerPointer, skip);
+ Cell* object = Cell::cast(rinfo->target_cell());
+ serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
}
@@ -1768,10 +1753,29 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString(
}
+static Code* CloneCodeObject(HeapObject* code) {
+ Address copy = new byte[code->Size()];
+ OS::MemCopy(copy, code->address(), code->Size());
+ return Code::cast(HeapObject::FromAddress(copy));
+}
+
+
+static void WipeOutRelocations(Code* code) {
+ int mode_mask =
+ RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+ it.rinfo()->WipeOut();
+ }
+}
+
+
int Serializer::ObjectSerializer::OutputRawData(
Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
Address object_start = object_->address();
- Address base = object_start + bytes_processed_so_far_;
+ int base = bytes_processed_so_far_;
int up_to_offset = static_cast<int>(up_to - object_start);
int to_skip = up_to_offset - bytes_processed_so_far_;
int bytes_to_output = to_skip;
@@ -1801,10 +1805,22 @@ int Serializer::ObjectSerializer::OutputRawData(
sink_->Put(kRawData, "RawData");
sink_->PutInt(bytes_to_output, "length");
}
+
+ // To make snapshots reproducible, we need to wipe out all pointers in code.
+ if (code_object_) {
+ Code* code = CloneCodeObject(object_);
+ WipeOutRelocations(code);
+ // We need to wipe out the header fields *after* wiping out the
+ // relocations, because some of these fields are needed for the latter.
+ code->WipeOutHeader();
+ object_start = code->address();
+ }
+
+ const char* description = code_object_ ? "Code" : "Byte";
for (int i = 0; i < bytes_to_output; i++) {
- unsigned int data = base[i];
- sink_->PutSection(data, "Byte");
+ sink_->PutSection(object_start[base + i], description);
}
+ if (code_object_) delete[] object_start;
}
if (to_skip != 0 && return_skip == kIgnoringReturn) {
sink_->Put(kSkip, "Skip");
diff --git a/chromium/v8/src/serialize.h b/chromium/v8/src/serialize.h
index 020a744fc0e..ee9df39ad86 100644
--- a/chromium/v8/src/serialize.h
+++ b/chromium/v8/src/serialize.h
@@ -339,10 +339,6 @@ class Deserializer: public SerializerDeserializer {
private:
virtual void VisitPointers(Object** start, Object** end);
- virtual void VisitExternalReferences(Address* start, Address* end) {
- UNREACHABLE();
- }
-
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
UNREACHABLE();
}
@@ -366,6 +362,10 @@ class Deserializer: public SerializerDeserializer {
Address Allocate(int space_index, int size) {
Address address = high_water_[space_index];
high_water_[space_index] = address + size;
+ HeapProfiler* profiler = isolate_->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
+ profiler->AllocationEvent(address, size);
+ }
return address;
}
@@ -377,6 +377,7 @@ class Deserializer: public SerializerDeserializer {
return HeapObject::FromAddress(high_water_[space] - offset);
}
+ void FlushICacheForNewCodeObjects();
// Cached current isolate.
Isolate* isolate_;
@@ -517,7 +518,7 @@ class Serializer : public SerializerDeserializer {
void Serialize();
void VisitPointers(Object** start, Object** end);
void VisitEmbeddedPointer(RelocInfo* target);
- void VisitExternalReferences(Address* start, Address* end);
+ void VisitExternalReference(Address* p);
void VisitExternalReference(RelocInfo* rinfo);
void VisitCodeTarget(RelocInfo* target);
void VisitCodeEntry(Address entry_address);
@@ -569,6 +570,10 @@ class Serializer : public SerializerDeserializer {
int SpaceAreaSize(int space);
+ // Some roots should not be serialized, because their actual value depends on
+ // absolute addresses and they are reset after deserialization, anyway.
+ bool ShouldBeSkipped(Object** current);
+
Isolate* isolate_;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references.
diff --git a/chromium/v8/src/snapshot-common.cc b/chromium/v8/src/snapshot-common.cc
index 96034e352bc..4bdf63ceddb 100644
--- a/chromium/v8/src/snapshot-common.cc
+++ b/chromium/v8/src/snapshot-common.cc
@@ -102,10 +102,19 @@ bool Snapshot::Initialize(const char* snapshot_file) {
DeleteArray(str);
return success;
} else if (size_ > 0) {
+ ElapsedTimer timer;
+ if (FLAG_profile_deserialization) {
+ timer.Start();
+ }
SnapshotByteSource source(raw_data_, raw_size_);
Deserializer deserializer(&source);
ReserveSpaceForLinkedInSnapshot(&deserializer);
- return V8::Initialize(&deserializer);
+ bool success = V8::Initialize(&deserializer);
+ if (FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms);
+ }
+ return success;
}
return false;
}
diff --git a/chromium/v8/src/spaces-inl.h b/chromium/v8/src/spaces-inl.h
index be2ae2a57db..87de29c4a5d 100644
--- a/chromium/v8/src/spaces-inl.h
+++ b/chromium/v8/src/spaces-inl.h
@@ -28,6 +28,7 @@
#ifndef V8_SPACES_INL_H_
#define V8_SPACES_INL_H_
+#include "heap-profiler.h"
#include "isolate.h"
#include "spaces.h"
#include "v8memory.h"
@@ -263,11 +264,11 @@ void Page::set_prev_page(Page* page) {
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
- Address current_top = allocation_info_.top;
+ Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
- if (new_top > allocation_info_.limit) return NULL;
+ if (new_top > allocation_info_.limit()) return NULL;
- allocation_info_.top = new_top;
+ allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
@@ -311,29 +312,29 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
+ Address old_top = allocation_info_.top();
#ifdef DEBUG
// If we are stressing compaction we waste some memory in new space
// in order to get more frequent GCs.
if (FLAG_stress_compaction && !heap()->linear_allocation()) {
- if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
+ if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
int filler_size = size_in_bytes * 4;
for (int i = 0; i < filler_size; i += kPointerSize) {
*(reinterpret_cast<Object**>(old_top + i)) =
heap()->one_pointer_filler_map();
}
old_top += filler_size;
- allocation_info_.top += filler_size;
+ allocation_info_.set_top(allocation_info_.top() + filler_size);
}
}
#endif
- if (allocation_info_.limit - old_top < size_in_bytes) {
+ if (allocation_info_.limit() - old_top < size_in_bytes) {
return SlowAllocateRaw(size_in_bytes);
}
- Object* obj = HeapObject::FromAddress(old_top);
- allocation_info_.top += size_in_bytes;
+ HeapObject* obj = HeapObject::FromAddress(old_top);
+ allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
return obj;
diff --git a/chromium/v8/src/spaces.cc b/chromium/v8/src/spaces.cc
index 2faf41912e8..ee19a02967d 100644
--- a/chromium/v8/src/spaces.cc
+++ b/chromium/v8/src/spaces.cc
@@ -29,6 +29,7 @@
#include "macro-assembler.h"
#include "mark-compact.h"
+#include "msan.h"
#include "platform.h"
namespace v8 {
@@ -717,6 +718,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
executable,
owner);
result->set_reserved_memory(&reservation);
+ MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
return result;
}
@@ -958,8 +960,8 @@ PagedSpace::PagedSpace(Heap* heap,
* AreaSize();
accounting_stats_.Clear();
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
anchor_.InitializeAsAnchor(this);
}
@@ -988,7 +990,7 @@ void PagedSpace::TearDown() {
size_t PagedSpace::CommittedPhysicalMemory() {
if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0;
PageIterator it(this);
while (it.has_next()) {
@@ -1056,7 +1058,7 @@ intptr_t PagedSpace::SizeOfFirstPage() {
int size = 0;
switch (identity()) {
case OLD_POINTER_SPACE:
- size = 64 * kPointerSize * KB;
+ size = 72 * kPointerSize * KB;
break;
case OLD_DATA_SPACE:
size = 192 * KB;
@@ -1077,7 +1079,12 @@ intptr_t PagedSpace::SizeOfFirstPage() {
// upgraded to handle small pages.
size = AreaSize();
} else {
- size = 384 * KB;
+#if V8_TARGET_ARCH_MIPS
+ // TODO(plind): Investigate larger code stubs size on MIPS.
+ size = 480 * KB;
+#else
+ size = 416 * KB;
+#endif
}
break;
default:
@@ -1115,6 +1122,11 @@ void PagedSpace::ResetFreeListStatistics() {
}
+void PagedSpace::IncreaseCapacity(int size) {
+ accounting_stats_.ExpandSpace(size);
+}
+
+
void PagedSpace::ReleasePage(Page* page, bool unlink) {
ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size());
@@ -1135,8 +1147,9 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) {
DecreaseUnsweptFreeBytes(page);
}
- if (Page::FromAllocationTop(allocation_info_.top) == page) {
- allocation_info_.top = allocation_info_.limit = NULL;
+ if (Page::FromAllocationTop(allocation_info_.top()) == page) {
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
}
if (unlink) {
@@ -1163,12 +1176,12 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (was_swept_conservatively_) return;
bool allocation_pointer_found_in_space =
- (allocation_info_.top == allocation_info_.limit);
+ (allocation_info_.top() == allocation_info_.limit());
PageIterator page_iterator(this);
while (page_iterator.has_next()) {
Page* page = page_iterator.next();
CHECK(page->owner() == this);
- if (page == Page::FromAllocationTop(allocation_info_.top)) {
+ if (page == Page::FromAllocationTop(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
CHECK(page->WasSweptPrecisely());
@@ -1279,8 +1292,8 @@ void NewSpace::TearDown() {
}
start_ = NULL;
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
to_space_.TearDown();
from_space_.TearDown();
@@ -1337,23 +1350,15 @@ void NewSpace::Shrink() {
}
}
}
- allocation_info_.limit = to_space_.page_high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::UpdateAllocationInfo() {
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- allocation_info_.top = to_space_.page_low();
- allocation_info_.limit = to_space_.page_high();
-
- // Lower limit during incremental marking.
- if (heap()->incremental_marking()->IsMarking() &&
- inline_allocation_limit_step() != 0) {
- Address new_limit =
- allocation_info_.top + inline_allocation_limit_step();
- allocation_info_.limit = Min(new_limit, allocation_info_.limit);
- }
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.set_top(to_space_.page_low());
+ allocation_info_.set_limit(to_space_.page_high());
+ UpdateInlineAllocationLimit(0);
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -1370,8 +1375,28 @@ void NewSpace::ResetAllocationInfo() {
}
+void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
+ if (heap()->inline_allocation_disabled()) {
+ // Lowest limit when linear allocation was disabled.
+ Address high = to_space_.page_high();
+ Address new_top = allocation_info_.top() + size_in_bytes;
+ allocation_info_.set_limit(Min(new_top, high));
+ } else if (inline_allocation_limit_step() == 0) {
+ // Normal limit is the end of the current page.
+ allocation_info_.set_limit(to_space_.page_high());
+ } else {
+ // Lower limit during incremental marking.
+ Address high = to_space_.page_high();
+ Address new_top = allocation_info_.top() + size_in_bytes;
+ Address new_limit = new_top + inline_allocation_limit_step_;
+ allocation_info_.set_limit(Min(new_limit, high));
+ }
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top;
+ Address top = allocation_info_.top();
if (NewSpacePage::IsAtStart(top)) {
// The current page is already empty. Don't try to make another.
@@ -1403,18 +1428,17 @@ bool NewSpace::AddFreshPage() {
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
- Address new_top = old_top + size_in_bytes;
+ Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
- if (allocation_info_.limit < high) {
- // Incremental marking has lowered the limit to get a
- // chance to do a step.
- allocation_info_.limit = Min(
- allocation_info_.limit + inline_allocation_limit_step_,
- high);
+ if (allocation_info_.limit() < high) {
+ // Either the limit has been lowered because linear allocation was disabled
+ // or because incremental marking wants to get a chance to do a step. Set
+ // the new limit accordingly.
+ Address new_top = old_top + size_in_bytes;
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
+ UpdateInlineAllocationLimit(size_in_bytes);
top_on_previous_step_ = new_top;
return AllocateRaw(size_in_bytes);
} else if (AddFreshPage()) {
@@ -1502,6 +1526,7 @@ void SemiSpace::SetUp(Address start,
initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
capacity_ = initial_capacity;
maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
+ maximum_committed_ = 0;
committed_ = false;
start_ = start;
address_mask_ = ~(maximum_capacity - 1);
@@ -1520,22 +1545,21 @@ void SemiSpace::TearDown() {
bool SemiSpace::Commit() {
ASSERT(!is_committed());
int pages = capacity_ / Page::kPageSize;
- Address end = start_ + maximum_capacity_;
- Address start = end - pages * Page::kPageSize;
- if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
capacity_,
executable())) {
return false;
}
- NewSpacePage* page = anchor();
- for (int i = 1; i <= pages; i++) {
+ NewSpacePage* current = anchor();
+ for (int i = 0; i < pages; i++) {
NewSpacePage* new_page =
- NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
- new_page->InsertAfter(page);
- page = new_page;
+ NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
+ new_page->InsertAfter(current);
+ current = new_page;
}
+ SetCapacity(capacity_);
committed_ = true;
Reset();
return true;
@@ -1577,20 +1601,18 @@ bool SemiSpace::GrowTo(int new_capacity) {
int pages_before = capacity_ / Page::kPageSize;
int pages_after = new_capacity / Page::kPageSize;
- Address end = start_ + maximum_capacity_;
- Address start = end - new_capacity;
size_t delta = new_capacity - capacity_;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start, delta, executable())) {
+ start_ + capacity_, delta, executable())) {
return false;
}
- capacity_ = new_capacity;
+ SetCapacity(new_capacity);
NewSpacePage* last_page = anchor()->prev_page();
ASSERT(last_page != anchor());
- for (int i = pages_before + 1; i <= pages_after; i++) {
- Address page_address = end - i * Page::kPageSize;
+ for (int i = pages_before; i < pages_after; i++) {
+ Address page_address = start_ + i * Page::kPageSize;
NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
page_address,
this);
@@ -1610,28 +1632,23 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
ASSERT(new_capacity >= initial_capacity_);
ASSERT(new_capacity < capacity_);
if (is_committed()) {
- // Semispaces grow backwards from the end of their allocated capacity,
- // so we find the before and after start addresses relative to the
- // end of the space.
- Address space_end = start_ + maximum_capacity_;
- Address old_start = space_end - capacity_;
size_t delta = capacity_ - new_capacity;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
- if (!allocator->UncommitBlock(old_start, delta)) {
+ if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
return false;
}
int pages_after = new_capacity / Page::kPageSize;
NewSpacePage* new_last_page =
- NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
+ NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
new_last_page->set_next_page(anchor());
anchor()->set_prev_page(new_last_page);
- ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
+ ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page));
}
- capacity_ = new_capacity;
+ SetCapacity(new_capacity);
return true;
}
@@ -1694,6 +1711,14 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
}
+void SemiSpace::SetCapacity(int new_capacity) {
+ capacity_ = new_capacity;
+ if (capacity_ > maximum_committed_) {
+ maximum_committed_ = capacity_;
+ }
+}
+
+
void SemiSpace::set_age_mark(Address mark) {
ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
age_mark_ = mark;
@@ -1975,7 +2000,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
size_t NewSpace::CommittedPhysicalMemory() {
if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.is_committed()) {
size += from_space_.CommittedPhysicalMemory();
@@ -2359,7 +2384,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
int new_node_size = 0;
FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) {
- owner_->SetTop(NULL, NULL);
+ owner_->SetTopAndLimit(NULL, NULL);
return NULL;
}
@@ -2384,26 +2409,31 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// a little of this again immediately - see below.
owner_->Allocate(new_node_size);
- if (bytes_left > kThreshold &&
- owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
- FLAG_incremental_marking_steps) {
+ if (owner_->heap()->inline_allocation_disabled()) {
+ // Keep the linear allocation area empty if requested to do so, just
+ // return area back to the free list instead.
+ owner_->Free(new_node->address() + size_in_bytes, bytes_left);
+ ASSERT(owner_->top() == NULL && owner_->limit() == NULL);
+ } else if (bytes_left > kThreshold &&
+ owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
+ FLAG_incremental_marking_steps) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up.
owner_->Free(new_node->address() + size_in_bytes + linear_size,
new_node_size - size_in_bytes - linear_size);
- owner_->SetTop(new_node->address() + size_in_bytes,
- new_node->address() + size_in_bytes + linear_size);
+ owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+ new_node->address() + size_in_bytes + linear_size);
} else if (bytes_left > 0) {
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
- owner_->SetTop(new_node->address() + size_in_bytes,
- new_node->address() + new_node_size);
+ owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+ new_node->address() + new_node_size);
} else {
// TODO(gc) Try not freeing linear allocation region when bytes_left
// are zero.
- owner_->SetTop(NULL, NULL);
+ owner_->SetTopAndLimit(NULL, NULL);
}
return new_node;
@@ -2489,37 +2519,10 @@ intptr_t FreeList::SumFreeLists() {
// -----------------------------------------------------------------------------
// OldSpace implementation
-bool NewSpace::ReserveSpace(int bytes) {
- // We can't reliably unpack a partial snapshot that needs more new space
- // space than the minimum NewSpace size. The limit can be set lower than
- // the end of new space either because there is more space on the next page
- // or because we have lowered the limit in order to get periodic incremental
- // marking. The most reliable way to ensure that there is linear space is
- // to do the allocation, then rewind the limit.
- ASSERT(bytes <= InitialCapacity());
- MaybeObject* maybe = AllocateRaw(bytes);
- Object* object = NULL;
- if (!maybe->ToObject(&object)) return false;
- HeapObject* allocation = HeapObject::cast(object);
- Address top = allocation_info_.top;
- if ((top - bytes) == allocation->address()) {
- allocation_info_.top = allocation->address();
- return true;
- }
- // There may be a borderline case here where the allocation succeeded, but
- // the limit and top have moved on to a new page. In that case we try again.
- return ReserveSpace(bytes);
-}
-
-
void PagedSpace::PrepareForMarkCompact() {
// We don't have a linear allocation area while sweeping. It will be restored
// on the first allocation after the sweep.
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap.
- int old_linear_size = static_cast<int>(limit() - top());
- Free(top(), old_linear_size);
- SetTop(NULL, NULL);
+ EmptyAllocationInfo();
// Stop lazy sweeping and clear marking bits for unswept pages.
if (first_unswept_page_ != NULL) {
@@ -2546,28 +2549,6 @@ void PagedSpace::PrepareForMarkCompact() {
}
-bool PagedSpace::ReserveSpace(int size_in_bytes) {
- ASSERT(size_in_bytes <= AreaSize());
- ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
- Address current_top = allocation_info_.top;
- Address new_top = current_top + size_in_bytes;
- if (new_top <= allocation_info_.limit) return true;
-
- HeapObject* new_area = free_list_.Allocate(size_in_bytes);
- if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
- if (new_area == NULL) return false;
-
- int old_linear_size = static_cast<int>(limit() - top());
- // Mark the old linear allocation area with a free space so it can be
- // skipped when scanning the heap. This also puts it back in the free list
- // if it is big enough.
- Free(top(), old_linear_size);
-
- SetTop(new_area->address(), new_area->address() + size_in_bytes);
- return true;
-}
-
-
intptr_t PagedSpace::SizeOfObjects() {
ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
@@ -2583,15 +2564,6 @@ void PagedSpace::RepairFreeListsAfterBoot() {
}
-// You have to call this last, since the implementation from PagedSpace
-// doesn't know that memory was 'promised' to large object space.
-bool LargeObjectSpace::ReserveSpace(int bytes) {
- return heap()->OldGenerationCapacityAvailable() >= bytes &&
- (!heap()->incremental_marking()->IsStopped() ||
- heap()->OldGenerationSpaceAvailable() >= bytes);
-}
-
-
bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
if (IsLazySweepingComplete()) return true;
@@ -2626,16 +2598,17 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
- if (allocation_info_.top >= allocation_info_.limit) return;
+ if (allocation_info_.top() >= allocation_info_.limit()) return;
- if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
+ if (Page::FromAllocationTop(allocation_info_.top())->
+ IsEvacuationCandidate()) {
// Create filler object to keep page iterable if it was iterable.
int remaining =
- static_cast<int>(allocation_info_.limit - allocation_info_.top);
- heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top());
+ heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
}
}
@@ -2685,6 +2658,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Try to expand the space and allocate in the new next page.
if (Expand()) {
+ ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
return free_list_.Allocate(size_in_bytes);
}
@@ -2844,23 +2818,6 @@ void PagedSpace::ReportStatistics() {
}
#endif
-// -----------------------------------------------------------------------------
-// FixedSpace implementation
-
-void FixedSpace::PrepareForMarkCompact() {
- // Call prepare of the super class.
- PagedSpace::PrepareForMarkCompact();
-
- // During a non-compacting collection, everything below the linear
- // allocation pointer except wasted top-of-page blocks is considered
- // allocated and we will rediscover available bytes during the
- // collection.
- accounting_stats_.AllocateBytes(free_list_.available());
-
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_.Reset();
-}
-
// -----------------------------------------------------------------------------
// MapSpace implementation
@@ -2936,6 +2893,7 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap,
bool LargeObjectSpace::SetUp() {
first_page_ = NULL;
size_ = 0;
+ maximum_committed_ = 0;
page_count_ = 0;
objects_size_ = 0;
chunk_map_.Clear();
@@ -2982,6 +2940,10 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
page->set_next_page(first_page_);
first_page_ = page;
+ if (size_ > maximum_committed_) {
+ maximum_committed_ = size_;
+ }
+
// Register all MemoryChunk::kAlignment-aligned chunks covered by
// this large page in the chunk map.
uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
diff --git a/chromium/v8/src/spaces.h b/chromium/v8/src/spaces.h
index 43f44a5c707..7c650a2aca5 100644
--- a/chromium/v8/src/spaces.h
+++ b/chromium/v8/src/spaces.h
@@ -782,6 +782,12 @@ class Page : public MemoryChunk {
// Object area size in bytes.
static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
+ // Maximum object size that fits in a page. Objects larger than that size are
+ // allocated in large object space and are never moved in memory. This also
+ // applies to new space allocation, since objects are never migrated from new
+ // space to large object space. Takes double alignment into account.
+ static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
+
// Maximum object size that fits in a page. Objects larger than that size
// are allocated in large object space and are never moved in memory. This
// also applies to new space allocation, since objects are never migrated
@@ -1317,18 +1323,53 @@ class PageIterator BASE_EMBEDDED {
// space.
class AllocationInfo {
public:
- AllocationInfo() : top(NULL), limit(NULL) {
+ AllocationInfo() : top_(NULL), limit_(NULL) {
+ }
+
+ INLINE(void set_top(Address top)) {
+ SLOW_ASSERT(top == NULL ||
+ (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
+ top_ = top;
+ }
+
+ INLINE(Address top()) const {
+ SLOW_ASSERT(top_ == NULL ||
+ (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
+ return top_;
+ }
+
+ Address* top_address() {
+ return &top_;
+ }
+
+ INLINE(void set_limit(Address limit)) {
+ SLOW_ASSERT(limit == NULL ||
+ (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
+ limit_ = limit;
}
- Address top; // Current allocation top.
- Address limit; // Current allocation limit.
+ INLINE(Address limit()) const {
+ SLOW_ASSERT(limit_ == NULL ||
+ (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
+ return limit_;
+ }
+
+ Address* limit_address() {
+ return &limit_;
+ }
#ifdef DEBUG
bool VerifyPagedAllocation() {
- return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
- && (top <= limit);
+ return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
+ && (top_ <= limit_);
}
#endif
+
+ private:
+ // Current allocation top.
+ Address top_;
+ // Current allocation limit.
+ Address limit_;
};
@@ -1353,6 +1394,7 @@ class AllocationStats BASE_EMBEDDED {
// Zero out all the allocation statistics (i.e., no capacity).
void Clear() {
capacity_ = 0;
+ max_capacity_ = 0;
size_ = 0;
waste_ = 0;
}
@@ -1371,6 +1413,7 @@ class AllocationStats BASE_EMBEDDED {
// Accessors for the allocation statistics.
intptr_t Capacity() { return capacity_; }
+ intptr_t MaxCapacity() { return max_capacity_; }
intptr_t Size() { return size_; }
intptr_t Waste() { return waste_; }
@@ -1380,6 +1423,9 @@ class AllocationStats BASE_EMBEDDED {
void ExpandSpace(int size_in_bytes) {
capacity_ += size_in_bytes;
size_ += size_in_bytes;
+ if (capacity_ > max_capacity_) {
+ max_capacity_ = capacity_;
+ }
ASSERT(size_ >= 0);
}
@@ -1413,6 +1459,7 @@ class AllocationStats BASE_EMBEDDED {
private:
intptr_t capacity_;
+ intptr_t max_capacity_;
intptr_t size_;
intptr_t waste_;
};
@@ -1642,10 +1689,10 @@ class PagedSpace : public Space {
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
- virtual void RepairFreeListsAfterBoot();
+ void RepairFreeListsAfterBoot();
// Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact();
+ void PrepareForMarkCompact();
// Current capacity without growing (Size() + Available()).
intptr_t Capacity() { return accounting_stats_.Capacity(); }
@@ -1654,6 +1701,9 @@ class PagedSpace : public Space {
// spaces this equals the capacity.
intptr_t CommittedMemory() { return Capacity(); }
+ // The maximum amount of memory ever committed for this space.
+ intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
+
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory();
@@ -1707,19 +1757,23 @@ class PagedSpace : public Space {
virtual intptr_t Waste() { return accounting_stats_.Waste(); }
// Returns the allocation pointer in this space.
- Address top() { return allocation_info_.top; }
- Address limit() { return allocation_info_.limit; }
+ Address top() { return allocation_info_.top(); }
+ Address limit() { return allocation_info_.limit(); }
- // The allocation top and limit addresses.
- Address* allocation_top_address() { return &allocation_info_.top; }
- Address* allocation_limit_address() { return &allocation_info_.limit; }
+ // The allocation top address.
+ Address* allocation_top_address() {
+ return allocation_info_.top_address();
+ }
+
+ // The allocation limit address.
+ Address* allocation_limit_address() {
+ return allocation_info_.limit_address();
+ }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
- virtual bool ReserveSpace(int bytes);
-
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
@@ -1735,21 +1789,28 @@ class PagedSpace : public Space {
}
// Set space allocation info.
- void SetTop(Address top, Address limit) {
+ void SetTopAndLimit(Address top, Address limit) {
ASSERT(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- allocation_info_.top = top;
- allocation_info_.limit = limit;
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.set_top(top);
+ allocation_info_.set_limit(limit);
+ }
+
+ // Empty space allocation info, returning unused area to free list.
+ void EmptyAllocationInfo() {
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap.
+ int old_linear_size = static_cast<int>(limit() - top());
+ Free(top(), old_linear_size);
+ SetTopAndLimit(NULL, NULL);
}
void Allocate(int bytes) {
accounting_stats_.AllocateBytes(bytes);
}
- void IncreaseCapacity(int size) {
- accounting_stats_.ExpandSpace(size);
- }
+ void IncreaseCapacity(int size);
// Releases an unused page and shrinks the space.
void ReleasePage(Page* page, bool unlink);
@@ -1866,12 +1927,6 @@ class PagedSpace : public Space {
// Normal allocation information.
AllocationInfo allocation_info_;
- // Bytes of each page that cannot be allocated. Possibly non-zero
- // for pages in spaces with only fixed-size objects. Always zero
- // for pages in spaces with variable sized objects (those pages are
- // padded with free-list nodes).
- int page_extra_;
-
bool was_swept_conservatively_;
// The first page to be swept when the lazy sweeper advances. Is set
@@ -2119,11 +2174,6 @@ class SemiSpace : public Space {
return 0;
}
- virtual bool ReserveSpace(int bytes) {
- UNREACHABLE();
- return false;
- }
-
bool is_committed() { return committed_; }
bool Commit();
bool Uncommit();
@@ -2159,6 +2209,9 @@ class SemiSpace : public Space {
static void Swap(SemiSpace* from, SemiSpace* to);
+ // Returns the maximum amount of memory ever committed by the semi space.
+ size_t MaximumCommittedMemory() { return maximum_committed_; }
+
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory();
@@ -2167,6 +2220,9 @@ class SemiSpace : public Space {
// Copies the flags into the masked positions on all pages in the space.
void FlipPages(intptr_t flags, intptr_t flag_mask);
+ // Updates Capacity and MaximumCommitted based on new capacity.
+ void SetCapacity(int new_capacity);
+
NewSpacePage* anchor() { return &anchor_; }
// The current and maximum capacity of the space.
@@ -2174,6 +2230,8 @@ class SemiSpace : public Space {
int maximum_capacity_;
int initial_capacity_;
+ intptr_t maximum_committed_;
+
// The start address of the space.
Address start_;
// Used to govern object promotion during mark-compact collection.
@@ -2359,6 +2417,12 @@ class NewSpace : public Space {
return Capacity();
}
+ // Return the total amount of memory committed for new space.
+ intptr_t MaximumCommittedMemory() {
+ return to_space_.MaximumCommittedMemory() +
+ from_space_.MaximumCommittedMemory();
+ }
+
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory();
@@ -2381,9 +2445,15 @@ class NewSpace : public Space {
// Return the address of the allocation pointer in the active semispace.
Address top() {
- ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
- return allocation_info_.top;
+ ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
+ return allocation_info_.top();
}
+
+ void set_top(Address top) {
+ ASSERT(to_space_.current_page()->ContainsLimit(top));
+ allocation_info_.set_top(top);
+ }
+
// Return the address of the first object in the active semispace.
Address bottom() { return to_space_.space_start(); }
@@ -2408,25 +2478,26 @@ class NewSpace : public Space {
return reinterpret_cast<Address>(index << kPointerSizeLog2);
}
- // The allocation top and limit addresses.
- Address* allocation_top_address() { return &allocation_info_.top; }
- Address* allocation_limit_address() { return &allocation_info_.limit; }
+ // The allocation top and limit address.
+ Address* allocation_top_address() {
+ return allocation_info_.top_address();
+ }
+
+ // The allocation limit address.
+ Address* allocation_limit_address() {
+ return allocation_info_.limit_address();
+ }
MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
+ void UpdateInlineAllocationLimit(int size_in_bytes);
void LowerInlineAllocationLimit(intptr_t step) {
inline_allocation_limit_step_ = step;
- if (step == 0) {
- allocation_info_.limit = to_space_.page_high();
- } else {
- allocation_info_.limit = Min(
- allocation_info_.top + inline_allocation_limit_step_,
- allocation_info_.limit);
- }
- top_on_previous_step_ = allocation_info_.top;
+ UpdateInlineAllocationLimit(0);
+ top_on_previous_step_ = allocation_info_.top();
}
// Get the extent of the inactive semispace (for use as a marking stack,
@@ -2460,8 +2531,6 @@ class NewSpace : public Space {
// if successful.
bool AddFreshPage();
- virtual bool ReserveSpace(int bytes);
-
#ifdef VERIFY_HEAP
// Verify the active semispace.
virtual void Verify();
@@ -2557,12 +2626,6 @@ class OldSpace : public PagedSpace {
AllocationSpace id,
Executability executable)
: PagedSpace(heap, max_capacity, id, executable) {
- page_extra_ = 0;
- }
-
- // The limit of allocation for a page in this space.
- virtual Address PageAllocationLimit(Page* page) {
- return page->area_end();
}
public:
@@ -2573,49 +2636,19 @@ class OldSpace : public PagedSpace {
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
- SLOW_ASSERT((space).page_low() <= (info).top \
- && (info).top <= (space).page_high() \
- && (info).limit <= (space).page_high())
-
-
-// -----------------------------------------------------------------------------
-// Old space for objects of a fixed size
-
-class FixedSpace : public PagedSpace {
- public:
- FixedSpace(Heap* heap,
- intptr_t max_capacity,
- AllocationSpace id,
- int object_size_in_bytes)
- : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
- object_size_in_bytes_(object_size_in_bytes) {
- page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
- }
-
- // The limit of allocation for a page in this space.
- virtual Address PageAllocationLimit(Page* page) {
- return page->area_end() - page_extra_;
- }
-
- int object_size_in_bytes() { return object_size_in_bytes_; }
-
- // Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact();
-
- private:
- // The size of objects in this space.
- int object_size_in_bytes_;
-};
+ SLOW_ASSERT((space).page_low() <= (info).top() \
+ && (info).top() <= (space).page_high() \
+ && (info).limit() <= (space).page_high())
// -----------------------------------------------------------------------------
// Old space for all map objects
-class MapSpace : public FixedSpace {
+class MapSpace : public PagedSpace {
public:
// Creates a map space object with a maximum capacity.
MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, Map::kSize),
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
max_map_space_pages_(kMaxMapPageIndex - 1) {
}
@@ -2652,12 +2685,12 @@ class MapSpace : public FixedSpace {
// -----------------------------------------------------------------------------
// Old space for simple property cell objects
-class CellSpace : public FixedSpace {
+class CellSpace : public PagedSpace {
public:
// Creates a property cell space object with a maximum capacity.
CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, Cell::kSize)
- {}
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
+ }
virtual int RoundSizeDownToObjectAlignment(int size) {
if (IsPowerOf2(Cell::kSize)) {
@@ -2678,13 +2711,13 @@ class CellSpace : public FixedSpace {
// -----------------------------------------------------------------------------
// Old space for all global object property cell objects
-class PropertyCellSpace : public FixedSpace {
+class PropertyCellSpace : public PagedSpace {
public:
// Creates a property cell space object with a maximum capacity.
PropertyCellSpace(Heap* heap, intptr_t max_capacity,
AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, PropertyCell::kSize)
- {}
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
+ }
virtual int RoundSizeDownToObjectAlignment(int size) {
if (IsPowerOf2(PropertyCell::kSize)) {
@@ -2741,6 +2774,10 @@ class LargeObjectSpace : public Space {
return objects_size_;
}
+ intptr_t MaximumCommittedMemory() {
+ return maximum_committed_;
+ }
+
intptr_t CommittedMemory() {
return Size();
}
@@ -2770,11 +2807,6 @@ class LargeObjectSpace : public Space {
// Checks whether the space is empty.
bool IsEmpty() { return first_page_ == NULL; }
- // See the comments for ReserveSpace in the Space class. This has to be
- // called after ReserveSpace has been called on the paged spaces, since they
- // may use some memory, leaving less for large objects.
- virtual bool ReserveSpace(int bytes);
-
LargePage* first_page() { return first_page_; }
#ifdef VERIFY_HEAP
@@ -2792,6 +2824,7 @@ class LargeObjectSpace : public Space {
private:
intptr_t max_capacity_;
+ intptr_t maximum_committed_;
// The head of the linked list of large object chunks.
LargePage* first_page_;
intptr_t size_; // allocated bytes
diff --git a/chromium/v8/src/store-buffer-inl.h b/chromium/v8/src/store-buffer-inl.h
index e1fcdee6618..7e5432c8413 100644
--- a/chromium/v8/src/store-buffer-inl.h
+++ b/chromium/v8/src/store-buffer-inl.h
@@ -41,6 +41,7 @@ Address StoreBuffer::TopAddress() {
void StoreBuffer::Mark(Address addr) {
ASSERT(!heap_->cell_space()->Contains(addr));
ASSERT(!heap_->code_space()->Contains(addr));
+ ASSERT(!heap_->old_data_space()->Contains(addr));
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
*top++ = addr;
heap_->public_set_store_buffer_top(top);
diff --git a/chromium/v8/src/store-buffer.cc b/chromium/v8/src/store-buffer.cc
index 22a546742c8..e89eb1bfed4 100644
--- a/chromium/v8/src/store-buffer.cc
+++ b/chromium/v8/src/store-buffer.cc
@@ -224,7 +224,7 @@ void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
}
int old_counter = containing_chunk->store_buffer_counter();
- if (old_counter == threshold) {
+ if (old_counter >= threshold) {
containing_chunk->set_scan_on_scavenge(true);
created_new_scan_on_scavenge_pages = true;
}
diff --git a/chromium/v8/src/string-stream.cc b/chromium/v8/src/string-stream.cc
index 45b675fa8ba..e2d15f54056 100644
--- a/chromium/v8/src/string-stream.cc
+++ b/chromium/v8/src/string-stream.cc
@@ -30,8 +30,6 @@
#include "factory.h"
#include "string-stream.h"
-#include "allocation-inl.h"
-
namespace v8 {
namespace internal {
@@ -299,8 +297,7 @@ Handle<String> StringStream::ToString(Isolate* isolate) {
void StringStream::ClearMentionedObjectCache(Isolate* isolate) {
isolate->set_string_stream_current_security_token(NULL);
if (isolate->string_stream_debug_object_cache() == NULL) {
- isolate->set_string_stream_debug_object_cache(
- new List<HeapObject*, PreallocatedStorageAllocationPolicy>(0));
+ isolate->set_string_stream_debug_object_cache(new DebugObjectCache(0));
}
isolate->string_stream_debug_object_cache()->Clear();
}
diff --git a/chromium/v8/src/string.js b/chromium/v8/src/string.js
index cb82c166346..14b44ca41f3 100644
--- a/chromium/v8/src/string.js
+++ b/chromium/v8/src/string.js
@@ -28,7 +28,6 @@
// This file relies on the fact that the following declaration has been made
// in runtime.js:
// var $String = global.String;
-// var $NaN = 0/0;
// -------------------------------------------------------------------
@@ -574,7 +573,7 @@ function StringSlice(start, end) {
var s_len = s.length;
var start_i = TO_INTEGER(start);
var end_i = s_len;
- if (end !== void 0) {
+ if (!IS_UNDEFINED(end)) {
end_i = TO_INTEGER(end);
}
@@ -699,7 +698,7 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
%_CallFunction(result, %_SubString(subject, start, end),
ArrayPushBuiltin);
} else {
- %_CallFunction(result, void 0, ArrayPushBuiltin);
+ %_CallFunction(result, UNDEFINED, ArrayPushBuiltin);
}
if (result.length === limit) break outer_loop;
}
@@ -756,7 +755,7 @@ function StringSubstr(start, n) {
// Correct n: If not given, set to string length; if explicitly
// set to undefined, zero, or negative, returns empty string.
- if (n === void 0) {
+ if (IS_UNDEFINED(n)) {
len = s.length;
} else {
len = TO_INTEGER(n);
@@ -765,7 +764,7 @@ function StringSubstr(start, n) {
// Correct start: If not given (or undefined), set to zero; otherwise
// convert to integer and handle negative case.
- if (start === void 0) {
+ if (IS_UNDEFINED(start)) {
start = 0;
} else {
start = TO_INTEGER(start);
diff --git a/chromium/v8/src/stub-cache.cc b/chromium/v8/src/stub-cache.cc
index bb8a76609d1..689eeaef153 100644
--- a/chromium/v8/src/stub-cache.cc
+++ b/chromium/v8/src/stub-cache.cc
@@ -99,434 +99,104 @@ Code* StubCache::Set(Name* name, Map* map, Code* code) {
}
-Handle<JSObject> StubCache::StubHolder(Handle<JSObject> receiver,
- Handle<JSObject> holder) {
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*receiver, *holder);
- return Handle<JSObject>(IC::GetCodeCacheHolder(
- isolate_, *receiver, cache_holder));
-}
-
-
-Handle<Code> StubCache::FindIC(Handle<Name> name,
- Handle<Map> stub_holder_map,
- Code::Kind kind,
- Code::StubType type,
- Code::ExtraICState extra_state) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind, extra_state, type);
- Handle<Object> probe(stub_holder_map->FindInCodeCache(*name, flags),
- isolate_);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
- return Handle<Code>::null();
-}
-
-
Handle<Code> StubCache::FindIC(Handle<Name> name,
- Handle<JSObject> stub_holder,
+ Handle<Map> stub_holder,
Code::Kind kind,
- Code::StubType type,
- Code::ExtraICState extra_ic_state) {
- return FindIC(name, Handle<Map>(stub_holder->map()), kind,
- type, extra_ic_state);
-}
-
-
-Handle<Code> StubCache::FindLoadHandler(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> stub_holder,
- Code::Kind kind,
- Code::StubType type) {
- Code::ExtraICState extra_ic_state = Code::ComputeExtraICState(
- receiver.is_identical_to(stub_holder) ? Code::OWN_STUB
- : Code::PROTOTYPE_STUB);
- ASSERT(type != Code::NORMAL);
+ ExtraICState extra_state,
+ InlineCacheHolderFlag cache_holder) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STUB, extra_ic_state, type, kind);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
+ kind, extra_state, cache_holder);
+ Handle<Object> probe(stub_holder->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
return Handle<Code>::null();
}
-Handle<Code> StubCache::FindStoreHandler(Handle<Name> name,
- Handle<JSObject> receiver,
- Code::Kind kind,
- Code::StubType type,
- StrictModeFlag strict_mode) {
- Code::ExtraICState extra_ic_state = Code::ComputeExtraICState(
- STANDARD_STORE, strict_mode);
- ASSERT(type != Code::NORMAL);
+Handle<Code> StubCache::FindHandler(Handle<Name> name,
+ Handle<Map> stub_holder,
+ Code::Kind kind,
+ InlineCacheHolderFlag cache_holder) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STUB, extra_ic_state, type, kind);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Code::HANDLER, kNoExtraICState, cache_holder, Code::NORMAL, kind);
+
+ Handle<Object> probe(stub_holder->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
return Handle<Code>::null();
}
-Handle<Code> StubCache::ComputeMonomorphicLoadIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name) {
- Handle<Map> map(receiver->map());
- Handle<Code> ic = FindIC(name, map, Code::LOAD_IC, handler->type());
- if (!ic.is_null()) return ic;
-
- LoadStubCompiler ic_compiler(isolate());
- ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
-
- HeapObject::UpdateMapCodeCache(receiver, name, ic);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputeMonomorphicKeyedLoadIC(
- Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name) {
- Handle<Map> map(receiver->map());
- Handle<Code> ic = FindIC(name, map, Code::KEYED_LOAD_IC, handler->type());
- if (!ic.is_null()) return ic;
-
- KeyedLoadStubCompiler ic_compiler(isolate());
- ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
-
- HeapObject::UpdateMapCodeCache(receiver, name, ic);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputeMonomorphicStoreIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
- Handle<Map> map(receiver->map());
- Handle<Code> ic = FindIC(
- name, map, Code::STORE_IC, handler->type(), strict_mode);
- if (!ic.is_null()) return ic;
-
- StoreStubCompiler ic_compiler(isolate(), strict_mode);
- ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
-
- HeapObject::UpdateMapCodeCache(receiver, name, ic);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputeMonomorphicKeyedStoreIC(
- Handle<HeapObject> receiver,
- Handle<Code> handler,
+Handle<Code> StubCache::ComputeMonomorphicIC(
Handle<Name> name,
- StrictModeFlag strict_mode) {
- Handle<Map> map(receiver->map());
- Handle<Code> ic = FindIC(
- name, map, Code::KEYED_STORE_IC, handler->type(), strict_mode);
- if (!ic.is_null()) return ic;
+ Handle<Type> type,
+ Handle<Code> handler,
+ ExtraICState extra_ic_state) {
+ Code::Kind kind = handler->handler_kind();
+ InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type);
+
+ Handle<Map> stub_holder;
+ Handle<Code> ic;
+ // There are multiple string maps that all use the same prototype. That
+ // prototype cannot hold multiple handlers, one for each of the string maps,
+ // for a single name. Hence, turn off caching of the IC.
+ bool can_be_cached = !type->Is(Type::String());
+ if (can_be_cached) {
+ stub_holder = IC::GetCodeCacheHolder(flag, *type, isolate());
+ ic = FindIC(name, stub_holder, kind, extra_ic_state, flag);
+ if (!ic.is_null()) return ic;
+ }
- KeyedStoreStubCompiler ic_compiler(isolate(), strict_mode, STANDARD_STORE);
- ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+ if (kind == Code::LOAD_IC) {
+ LoadStubCompiler ic_compiler(isolate(), flag);
+ ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
+ } else if (kind == Code::KEYED_LOAD_IC) {
+ KeyedLoadStubCompiler ic_compiler(isolate(), flag);
+ ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
+ } else if (kind == Code::STORE_IC) {
+ StoreStubCompiler ic_compiler(isolate(), extra_ic_state);
+ ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
+ } else {
+ ASSERT(kind == Code::KEYED_STORE_IC);
+ ASSERT(STANDARD_STORE ==
+ KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state));
+ KeyedStoreStubCompiler ic_compiler(isolate(), extra_ic_state);
+ ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
+ }
- HeapObject::UpdateMapCodeCache(receiver, name, ic);
+ if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic);
return ic;
}
Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
- Handle<JSObject> receiver) {
- // If no global objects are present in the prototype chain, the load
- // nonexistent IC stub can be shared for all names for a given map
- // and we use the empty string for the map cache in that case. If
- // there are global objects involved, we need to check global
- // property cells in the stub and therefore the stub will be
- // specific to the name.
- Handle<Name> cache_name = factory()->empty_string();
- Handle<JSObject> current;
- Handle<Object> next = receiver;
- Handle<JSGlobalObject> global;
- do {
- current = Handle<JSObject>::cast(next);
- next = Handle<Object>(current->GetPrototype(), isolate_);
- if (current->IsJSGlobalObject()) {
- global = Handle<JSGlobalObject>::cast(current);
- cache_name = name;
- } else if (!current->HasFastProperties()) {
- cache_name = name;
- }
- } while (!next->IsNull());
+ Handle<Type> type) {
+ InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type);
+ Handle<Map> stub_holder = IC::GetCodeCacheHolder(flag, *type, isolate());
+ // If no dictionary mode objects are present in the prototype chain, the load
+ // nonexistent IC stub can be shared for all names for a given map and we use
+ // the empty string for the map cache in that case. If there are dictionary
+ // mode objects involved, we need to do negative lookups in the stub and
+ // therefore the stub will be specific to the name.
+ Handle<Map> current_map = stub_holder;
+ Handle<Name> cache_name = current_map->is_dictionary_map()
+ ? name : Handle<Name>::cast(isolate()->factory()->empty_string());
+ Handle<Object> next(current_map->prototype(), isolate());
+ Handle<JSObject> last = Handle<JSObject>::null();
+ while (!next->IsNull()) {
+ last = Handle<JSObject>::cast(next);
+ next = handle(current_map->prototype(), isolate());
+ current_map = handle(Handle<HeapObject>::cast(next)->map());
+ if (current_map->is_dictionary_map()) cache_name = name;
+ }
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
- Handle<Code> handler = FindLoadHandler(
- cache_name, receiver, receiver, Code::LOAD_IC, Code::NONEXISTENT);
+ Handle<Code> handler = FindHandler(
+ cache_name, stub_holder, Code::LOAD_IC, flag);
if (!handler.is_null()) return handler;
- LoadStubCompiler compiler(isolate_);
- handler =
- compiler.CompileLoadNonexistent(receiver, current, cache_name, global);
- HeapObject::UpdateMapCodeCache(receiver, cache_name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadField(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
- if (receiver.is_identical_to(holder)) {
- LoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- return stub.GetCode(isolate());
- }
-
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::FIELD);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadField(receiver, holder, name, field, representation);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadCallback(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadCallback(receiver, holder, name, callback);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadCallback(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- const CallOptimization& call_optimization) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadCallback(receiver, holder, name, call_optimization);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadViaGetter(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadViaGetter(receiver, holder, name, getter);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadConstant(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<Object> value) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> handler = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::CONSTANT);
- if (!handler.is_null()) return handler;
-
- LoadStubCompiler compiler(isolate_);
- handler = compiler.CompileLoadConstant(receiver, holder, name, value);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
-
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadInterceptor(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::LOAD_IC, Code::INTERCEPTOR);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadInterceptor(receiver, holder, name);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeLoadNormal(Handle<Name> name,
- Handle<JSObject> receiver) {
- return isolate_->builtins()->LoadIC_Normal();
-}
-
-
-Handle<Code> StubCache::ComputeLoadGlobal(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- bool is_dont_delete) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindIC(name, stub_holder, Code::LOAD_IC, Code::NORMAL);
- if (!stub.is_null()) return stub;
-
- LoadStubCompiler compiler(isolate_);
- Handle<Code> ic =
- compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
- HeapObject::UpdateMapCodeCache(stub_holder, name, ic);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadField(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
- if (receiver.is_identical_to(holder)) {
- // TODO(titzer): this should use an HObjectAccess
- KeyedLoadFieldStub stub(field.is_inobject(holder),
- field.translate(holder),
- representation);
- return stub.GetCode(isolate());
- }
-
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::FIELD);
- if (!stub.is_null()) return stub;
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadField(receiver, holder, name, field, representation);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<Object> value) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> handler = FindLoadHandler(
- name, receiver, stub_holder, Code::KEYED_LOAD_IC,
- Code::CONSTANT);
- if (!handler.is_null()) return handler;
-
- KeyedLoadStubCompiler compiler(isolate_);
- handler = compiler.CompileLoadConstant(receiver, holder, name, value);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::INTERCEPTOR);
- if (!stub.is_null()) return stub;
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadInterceptor(receiver, holder, name);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadCallback(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadCallback(receiver, holder, name, callback);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadCallback(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- const CallOptimization& call_optimization) {
- Handle<JSObject> stub_holder = StubHolder(receiver, holder);
- Handle<Code> stub = FindLoadHandler(
- name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS);
- if (!stub.is_null()) return stub;
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> handler =
- compiler.CompileLoadCallback(receiver, holder, name, call_optimization);
- HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeStoreField(Handle<Name> name,
- Handle<JSObject> receiver,
- LookupResult* lookup,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::FIELD, strict_mode);
- if (!stub.is_null()) return stub;
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> handler = compiler.CompileStoreField(receiver, lookup, name);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeStoreTransition(Handle<Name> name,
- Handle<JSObject> receiver,
- LookupResult* lookup,
- Handle<Map> transition,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::MAP_TRANSITION, strict_mode);
- if (!stub.is_null()) return stub;
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> handler =
- compiler.CompileStoreTransition(receiver, lookup, transition, name);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
+ LoadStubCompiler compiler(isolate_, flag);
+ handler = compiler.CompileLoadNonexistent(type, last, cache_name);
+ Map::UpdateCodeCache(stub_holder, cache_name, handler);
return handler;
}
@@ -551,8 +221,8 @@ Handle<Code> StubCache::ComputeKeyedStoreElement(
Handle<Map> receiver_map,
StrictModeFlag strict_mode,
KeyedAccessStoreMode store_mode) {
- Code::ExtraICState extra_state =
- Code::ComputeExtraICState(store_mode, strict_mode);
+ ExtraICState extra_state =
+ KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::KEYED_STORE_IC, extra_state);
@@ -566,167 +236,29 @@ Handle<Code> StubCache::ComputeKeyedStoreElement(
Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
- KeyedStoreStubCompiler compiler(isolate(), strict_mode, store_mode);
+ KeyedStoreStubCompiler compiler(isolate(), extra_state);
Handle<Code> code = compiler.CompileStoreElement(receiver_map);
Map::UpdateCodeCache(receiver_map, name, code);
- ASSERT(Code::GetKeyedAccessStoreMode(code->extra_ic_state()) == store_mode);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
- return (strict_mode == kStrictMode)
- ? isolate_->builtins()->Builtins::StoreIC_Normal_Strict()
- : isolate_->builtins()->Builtins::StoreIC_Normal();
-}
-
-
-Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name,
- Handle<GlobalObject> receiver,
- Handle<PropertyCell> cell,
- Handle<Object> value,
- StrictModeFlag strict_mode) {
- Isolate* isolate = cell->GetIsolate();
- Handle<Type> union_type(PropertyCell::UpdateType(cell, value), isolate);
- bool is_constant = union_type->IsConstant();
- StoreGlobalStub stub(strict_mode, is_constant);
-
- Handle<Code> code = FindIC(
- name, Handle<JSObject>::cast(receiver),
- Code::STORE_IC, Code::NORMAL, stub.GetExtraICState());
- if (!code.is_null()) return code;
-
- // Replace the placeholder cell and global object map with the actual global
- // cell and receiver map.
- Handle<Map> meta_map(isolate_->heap()->meta_map());
- Handle<Object> receiver_map(receiver->map(), isolate_);
- code = stub.GetCodeCopyFromTemplate(isolate_);
- code->ReplaceNthObject(1, *meta_map, *receiver_map);
- Handle<Map> cell_map(isolate_->heap()->global_property_cell_map());
- code->ReplaceNthObject(1, *cell_map, *cell);
-
- HeapObject::UpdateMapCodeCache(receiver, name, code);
-
+ ASSERT(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state())
+ == store_mode);
return code;
}
-Handle<Code> StubCache::ComputeStoreCallback(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback,
- StrictModeFlag strict_mode) {
- ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode);
- if (!stub.is_null()) return stub;
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> handler = compiler.CompileStoreCallback(
- receiver, holder, name, callback);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeStoreCallback(
- Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- const CallOptimization& call_optimization,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode);
- if (!stub.is_null()) return stub;
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> handler = compiler.CompileStoreCallback(
- receiver, holder, name, call_optimization);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeStoreViaSetter(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode);
- if (!stub.is_null()) return stub;
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> handler = compiler.CompileStoreViaSetter(
- receiver, holder, name, setter);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeStoreInterceptor(Handle<Name> name,
- Handle<JSObject> receiver,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::INTERCEPTOR, strict_mode);
- if (!stub.is_null()) return stub;
-
- StoreStubCompiler compiler(isolate_, strict_mode);
- Handle<Code> handler = compiler.CompileStoreInterceptor(receiver, name);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedStoreField(Handle<Name> name,
- Handle<JSObject> receiver,
- LookupResult* lookup,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::KEYED_STORE_IC, Code::FIELD, strict_mode);
- if (!stub.is_null()) return stub;
-
- KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
- Handle<Code> handler = compiler.CompileStoreField(receiver, lookup, name);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedStoreTransition(
- Handle<Name> name,
- Handle<JSObject> receiver,
- LookupResult* lookup,
- Handle<Map> transition,
- StrictModeFlag strict_mode) {
- Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::KEYED_STORE_IC, Code::MAP_TRANSITION, strict_mode);
- if (!stub.is_null()) return stub;
-
- KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
- Handle<Code> handler =
- compiler.CompileStoreTransition(receiver, lookup, transition, name);
- HeapObject::UpdateMapCodeCache(receiver, name, handler);
- return handler;
-}
-
-
#define CALL_LOGGER_TAG(kind, type) \
(kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
Handle<Code> StubCache::ComputeCallConstant(int argc,
Code::Kind kind,
- Code::ExtraICState extra_state,
+ ExtraICState extra_state,
Handle<Name> name,
Handle<Object> object,
Handle<JSObject> holder,
Handle<JSFunction> function) {
// Compute the check type and the map.
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
+ InlineCacheHolderFlag cache_holder = IC::GetCodeCacheForObject(*object);
+ Handle<HeapObject> stub_holder(IC::GetCodeCacheHolder(
isolate_, *object, cache_holder));
// Compute check type based on receiver/holder.
@@ -750,7 +282,7 @@ Handle<Code> StubCache::ComputeCallConstant(int argc,
}
Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::CONSTANT, argc, cache_holder);
+ kind, extra_state, cache_holder, Code::FAST, argc);
Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -773,15 +305,14 @@ Handle<Code> StubCache::ComputeCallConstant(int argc,
Handle<Code> StubCache::ComputeCallField(int argc,
Code::Kind kind,
- Code::ExtraICState extra_state,
+ ExtraICState extra_state,
Handle<Name> name,
Handle<Object> object,
Handle<JSObject> holder,
PropertyIndex index) {
// Compute the check type and the map.
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
+ InlineCacheHolderFlag cache_holder = IC::GetCodeCacheForObject(*object);
+ Handle<HeapObject> stub_holder(IC::GetCodeCacheHolder(
isolate_, *object, cache_holder));
// TODO(1233596): We cannot do receiver map check for non-JS objects
@@ -793,7 +324,7 @@ Handle<Code> StubCache::ComputeCallField(int argc,
}
Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::FIELD, argc, cache_holder);
+ kind, extra_state, cache_holder, Code::FAST, argc);
Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -813,14 +344,13 @@ Handle<Code> StubCache::ComputeCallField(int argc,
Handle<Code> StubCache::ComputeCallInterceptor(int argc,
Code::Kind kind,
- Code::ExtraICState extra_state,
+ ExtraICState extra_state,
Handle<Name> name,
Handle<Object> object,
Handle<JSObject> holder) {
// Compute the check type and the map.
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
+ InlineCacheHolderFlag cache_holder = IC::GetCodeCacheForObject(*object);
+ Handle<HeapObject> stub_holder(IC::GetCodeCacheHolder(
isolate_, *object, cache_holder));
// TODO(1233596): We cannot do receiver map check for non-JS objects
@@ -832,7 +362,7 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc,
}
Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::INTERCEPTOR, argc, cache_holder);
+ kind, extra_state, cache_holder, Code::FAST, argc);
Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -852,23 +382,19 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc,
Handle<Code> StubCache::ComputeCallGlobal(int argc,
Code::Kind kind,
- Code::ExtraICState extra_state,
+ ExtraICState extra_state,
Handle<Name> name,
Handle<JSObject> receiver,
Handle<GlobalObject> holder,
Handle<PropertyCell> cell,
Handle<JSFunction> function) {
- InlineCacheHolderFlag cache_holder =
- IC::GetCodeCacheForObject(*receiver, *holder);
- Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
- isolate_, *receiver, cache_holder));
Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, Code::NORMAL, argc, cache_holder);
- Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
+ kind, extra_state, OWN_MAP, Code::NORMAL, argc);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
- CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
+ CallStubCompiler compiler(isolate(), argc, kind, extra_state);
Handle<Code> code =
compiler.CompileCallGlobal(receiver, holder, cell, function, name);
ASSERT(flags == code->flags());
@@ -876,7 +402,7 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc,
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
if (CallStubCompiler::CanBeCached(function)) {
- HeapObject::UpdateMapCodeCache(stub_holder, name, code);
+ HeapObject::UpdateMapCodeCache(receiver, name, code);
}
return code;
}
@@ -894,9 +420,10 @@ static void FillCache(Isolate* isolate, Handle<Code> code) {
Code* StubCache::FindCallInitialize(int argc,
RelocInfo::Mode mode,
Code::Kind kind) {
- Code::ExtraICState extra_state =
+ ExtraICState extra_state =
CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
- CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
+ CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT
+ ? CONTEXTUAL : NOT_CONTEXTUAL);
Code::Flags flags =
Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
UnseededNumberDictionary* dictionary =
@@ -913,9 +440,10 @@ Code* StubCache::FindCallInitialize(int argc,
Handle<Code> StubCache::ComputeCallInitialize(int argc,
RelocInfo::Mode mode,
Code::Kind kind) {
- Code::ExtraICState extra_state =
+ ExtraICState extra_state =
CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
- CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
+ CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT
+ ? CONTEXTUAL : NOT_CONTEXTUAL);
Code::Flags flags =
Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
@@ -944,7 +472,7 @@ Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc) {
Handle<Code> StubCache::ComputeCallPreMonomorphic(
int argc,
Code::Kind kind,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
Code::Flags flags =
Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
@@ -961,7 +489,7 @@ Handle<Code> StubCache::ComputeCallPreMonomorphic(
Handle<Code> StubCache::ComputeCallNormal(int argc,
Code::Kind kind,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
Code::Flags flags =
Code::ComputeFlags(kind, MONOMORPHIC, extra_state, Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
@@ -979,7 +507,7 @@ Handle<Code> StubCache::ComputeCallNormal(int argc,
Handle<Code> StubCache::ComputeCallArguments(int argc) {
Code::Flags flags =
Code::ComputeFlags(Code::KEYED_CALL_IC, MEGAMORPHIC,
- Code::kNoExtraICState, Code::NORMAL, argc);
+ kNoExtraICState, Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -995,7 +523,7 @@ Handle<Code> StubCache::ComputeCallArguments(int argc) {
Handle<Code> StubCache::ComputeCallMegamorphic(
int argc,
Code::Kind kind,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
Code::Flags flags =
Code::ComputeFlags(kind, MEGAMORPHIC, extra_state,
Code::NORMAL, argc);
@@ -1013,7 +541,7 @@ Handle<Code> StubCache::ComputeCallMegamorphic(
Handle<Code> StubCache::ComputeCallMiss(int argc,
Code::Kind kind,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
// and monomorphic stubs are not mixed up together in the stub cache.
Code::Flags flags =
@@ -1036,7 +564,7 @@ Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
Handle<String> name(isolate_->heap()->empty_string());
if (!receiver_map->is_shared()) {
Handle<Code> cached_ic = FindIC(name, receiver_map, Code::COMPARE_NIL_IC,
- Code::NORMAL, stub.GetExtraICState());
+ stub.GetExtraICState());
if (!cached_ic.is_null()) return cached_ic;
}
@@ -1051,6 +579,7 @@ Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
}
+// TODO(verwaest): Change this method so it takes in a TypeHandleList.
Handle<Code> StubCache::ComputeLoadElementPolymorphic(
MapHandleList* receiver_maps) {
Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
@@ -1059,12 +588,15 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic(
Handle<Object> probe = cache->Lookup(receiver_maps, flags);
if (probe->IsCode()) return Handle<Code>::cast(probe);
+ TypeHandleList types(receiver_maps->length());
+ for (int i = 0; i < receiver_maps->length(); i++) {
+ types.Add(handle(Type::Class(receiver_maps->at(i)), isolate()));
+ }
CodeHandleList handlers(receiver_maps->length());
KeyedLoadStubCompiler compiler(isolate_);
compiler.CompileElementHandlers(receiver_maps, &handlers);
Handle<Code> code = compiler.CompilePolymorphicIC(
- receiver_maps, &handlers, factory()->empty_string(),
- Code::NORMAL, ELEMENT);
+ &types, &handlers, factory()->empty_string(), Code::NORMAL, ELEMENT);
isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
@@ -1073,30 +605,28 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic(
}
-Handle<Code> StubCache::ComputePolymorphicLoadIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name) {
- LoadStubCompiler ic_compiler(isolate_);
- Code::StubType type = number_of_valid_maps == 1 ? handlers->at(0)->type()
- : Code::NORMAL;
- Handle<Code> ic = ic_compiler.CompilePolymorphicIC(
- receiver_maps, handlers, name, type, PROPERTY);
- return ic;
-}
-
-
-Handle<Code> StubCache::ComputePolymorphicStoreIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode) {
- StoreStubCompiler ic_compiler(isolate_, strict_mode);
- Code::StubType type = number_of_valid_maps == 1 ? handlers->at(0)->type()
- : Code::NORMAL;
- Handle<Code> ic = ic_compiler.CompilePolymorphicIC(
- receiver_maps, handlers, name, type, PROPERTY);
- return ic;
+Handle<Code> StubCache::ComputePolymorphicIC(
+ TypeHandleList* types,
+ CodeHandleList* handlers,
+ int number_of_valid_types,
+ Handle<Name> name,
+ ExtraICState extra_ic_state) {
+
+ Handle<Code> handler = handlers->at(0);
+ Code::Kind kind = handler->handler_kind();
+ Code::StubType type = number_of_valid_types == 1 ? handler->type()
+ : Code::NORMAL;
+ if (kind == Code::LOAD_IC) {
+ LoadStubCompiler ic_compiler(isolate_);
+ return ic_compiler.CompilePolymorphicIC(
+ types, handlers, name, type, PROPERTY);
+ } else {
+ ASSERT(kind == Code::STORE_IC);
+ StrictModeFlag strict_mode = StoreIC::GetStrictMode(extra_ic_state);
+ StoreStubCompiler ic_compiler(isolate_, strict_mode);
+ return ic_compiler.CompilePolymorphicIC(
+ types, handlers, name, type, PROPERTY);
+ }
}
@@ -1110,14 +640,14 @@ Handle<Code> StubCache::ComputeStoreElementPolymorphic(
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
Handle<PolymorphicCodeCache> cache =
isolate_->factory()->polymorphic_code_cache();
- Code::ExtraICState extra_state = Code::ComputeExtraICState(store_mode,
- strict_mode);
+ ExtraICState extra_state = KeyedStoreIC::ComputeExtraICState(
+ strict_mode, store_mode);
Code::Flags flags =
Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
Handle<Object> probe = cache->Lookup(receiver_maps, flags);
if (probe->IsCode()) return Handle<Code>::cast(probe);
- KeyedStoreStubCompiler compiler(isolate_, strict_mode, store_mode);
+ KeyedStoreStubCompiler compiler(isolate_, extra_state);
Handle<Code> code = compiler.CompileStoreElementPolymorphic(receiver_maps);
PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
return code;
@@ -1300,12 +830,12 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
// If the load is non-contextual, just return the undefined result.
// Note that both keyed and non-keyed loads may end up here, so we
// can't use either LoadIC or KeyedLoadIC constructors.
+ HandleScope scope(isolate);
IC ic(IC::NO_EXTRA_FRAME, isolate);
- ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
+ ASSERT(ic.IsLoadStub());
if (!ic.SlowIsUndeclaredGlobal()) return isolate->heap()->undefined_value();
// Throw a reference error.
- HandleScope scope(isolate);
Handle<Name> name_handle(name);
Handle<Object> error =
isolate->factory()->NewReferenceError("not_defined",
@@ -1314,8 +844,8 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
}
-static MaybeObject* LoadWithInterceptor(Arguments* args,
- PropertyAttributes* attrs) {
+static Handle<Object> LoadWithInterceptor(Arguments* args,
+ PropertyAttributes* attrs) {
ASSERT(args->length() == StubCache::kInterceptorArgsLength);
Handle<Name> name_handle =
args->at<Name>(StubCache::kInterceptorArgsNameIndex);
@@ -1329,9 +859,10 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
Isolate* isolate = receiver_handle->GetIsolate();
// TODO(rossberg): Support symbols in the API.
- if (name_handle->IsSymbol())
- return holder_handle->GetPropertyPostInterceptor(
- *receiver_handle, *name_handle, attrs);
+ if (name_handle->IsSymbol()) {
+ return JSObject::GetPropertyPostInterceptor(
+ holder_handle, receiver_handle, name_handle, attrs);
+ }
Handle<String> name = Handle<String>::cast(name_handle);
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
@@ -1344,24 +875,21 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
*receiver_handle,
*holder_handle);
{
- // Use the interceptor getter.
HandleScope scope(isolate);
+ // Use the interceptor getter.
v8::Handle<v8::Value> r =
callback_args.Call(getter, v8::Utils::ToLocal(name));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!r.IsEmpty()) {
*attrs = NONE;
Handle<Object> result = v8::Utils::OpenHandle(*r);
result->VerifyApiCallResultType();
- return *result;
+ return scope.CloseAndEscape(result);
}
}
- MaybeObject* result = holder_handle->GetPropertyPostInterceptor(
- *receiver_handle,
- *name_handle,
- attrs);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ Handle<Object> result = JSObject::GetPropertyPostInterceptor(
+ holder_handle, receiver_handle, name_handle, attrs);
return result;
}
@@ -1372,40 +900,41 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
*/
RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) {
PropertyAttributes attr = NONE;
- Object* result;
- { MaybeObject* maybe_result = LoadWithInterceptor(&args, &attr);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ HandleScope scope(isolate);
+ Handle<Object> result = LoadWithInterceptor(&args, &attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
// If the property is present, return it.
- if (attr != ABSENT) return result;
+ if (attr != ABSENT) return *result;
return ThrowReferenceError(isolate, Name::cast(args[0]));
}
RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) {
PropertyAttributes attr;
- MaybeObject* result = LoadWithInterceptor(&args, &attr);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ HandleScope scope(isolate);
+ Handle<Object> result = LoadWithInterceptor(&args, &attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
// This is call IC. In this case, we simply return the undefined result which
// will lead to an exception when trying to invoke the result as a
// function.
- return result;
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
- ASSERT(args.length() == 4);
- JSObject* recv = JSObject::cast(args[0]);
- Name* name = Name::cast(args[1]);
- Object* value = args[2];
- ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode);
- StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
- ASSERT(recv->HasNamedInterceptor());
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<JSObject> receiver = args.at<JSObject>(0);
+ Handle<Name> name = args.at<Name>(1);
+ Handle<Object> value = args.at<Object>(2);
+ ASSERT(receiver->HasNamedInterceptor());
PropertyAttributes attr = NONE;
- MaybeObject* result = recv->SetPropertyWithInterceptor(
- name, value, attr, strict_mode);
- return result;
+ Handle<Object> result = JSObject::SetPropertyWithInterceptor(
+ receiver, name, value, attr, ic.strict_mode());
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -1420,7 +949,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
Handle<Code> StubCompiler::CompileCallInitialize(Code::Flags flags) {
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+ ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
if (kind == Code::CALL_IC) {
CallIC::GenerateInitialize(masm(), argc, extra_state);
} else {
@@ -1441,7 +970,7 @@ Handle<Code> StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
// The code of the PreMonomorphic stub is the same as the code
// of the Initialized stub. They just differ on the code object flags.
Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+ ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
if (kind == Code::CALL_IC) {
CallIC::GenerateInitialize(masm(), argc, extra_state);
} else {
@@ -1481,7 +1010,7 @@ Handle<Code> StubCompiler::CompileCallNormal(Code::Flags flags) {
Handle<Code> StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+ ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
if (kind == Code::CALL_IC) {
CallIC::GenerateMegamorphic(masm(), argc, extra_state);
} else {
@@ -1513,7 +1042,7 @@ Handle<Code> StubCompiler::CompileCallArguments(Code::Flags flags) {
Handle<Code> StubCompiler::CompileCallMiss(Code::Flags flags) {
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
- Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+ ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
if (kind == Code::CALL_IC) {
CallIC::GenerateMiss(masm(), argc, extra_state);
} else {
@@ -1548,7 +1077,7 @@ Handle<Code> StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
// For the debugger extra ic state is irrelevant.
- CallIC::GenerateMiss(masm(), argc, Code::kNoExtraICState);
+ CallIC::GenerateMiss(masm(), argc, kNoExtraICState);
} else {
KeyedCallIC::GenerateMiss(masm(), argc);
}
@@ -1571,6 +1100,9 @@ Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
CodeDesc desc;
masm_.GetCode(&desc);
Handle<Code> code = factory()->NewCode(desc, flags, masm_.CodeObject());
+ if (code->has_major_key()) {
+ code->set_major_key(CodeStub::NoCache);
+ }
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) code->Disassemble(name);
#endif
@@ -1599,89 +1131,196 @@ void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
#define __ ACCESS_MASM(masm())
-Register BaseLoadStubCompiler::HandlerFrontendHeader(
- Handle<JSObject> object,
+CallKind CallStubCompiler::call_kind() {
+ return CallICBase::Contextual::decode(extra_state())
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+}
+
+
+void CallStubCompiler::HandlerFrontendFooter(Label* miss) {
+ __ bind(miss);
+ GenerateMissBranch();
+}
+
+
+void CallStubCompiler::GenerateJumpFunctionIgnoreReceiver(
+ Handle<JSFunction> function) {
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), call_kind());
+}
+
+
+void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
+ Handle<JSFunction> function) {
+ PatchGlobalProxy(object);
+ GenerateJumpFunctionIgnoreReceiver(function);
+}
+
+
+void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
+ Register actual_closure,
+ Handle<JSFunction> function) {
+ PatchGlobalProxy(object);
+ ParameterCount expected(function);
+ __ InvokeFunction(actual_closure, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), call_kind());
+}
+
+
+Handle<Code> CallStubCompiler::CompileCallConstant(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Handle<JSFunction> function) {
+ if (HasCustomCallGenerator(function)) {
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<Cell>::null(),
+ function, Handle<String>::cast(name),
+ Code::FAST);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
+ }
+
+ Label miss;
+ HandlerFrontendHeader(object, holder, name, check, &miss);
+ GenerateJumpFunction(object, function);
+ HandlerFrontendFooter(&miss);
+
+ // Return the generated code.
+ return GetCode(function);
+}
+
+
+Register LoadStubCompiler::HandlerFrontendHeader(
+ Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Label* miss) {
- return CheckPrototypes(object, object_reg, holder,
- scratch1(), scratch2(), scratch3(),
- name, miss, SKIP_RECEIVER);
+ PrototypeCheckType check_type = CHECK_ALL_MAPS;
+ int function_index = -1;
+ if (type->Is(Type::String())) {
+ function_index = Context::STRING_FUNCTION_INDEX;
+ } else if (type->Is(Type::Symbol())) {
+ function_index = Context::SYMBOL_FUNCTION_INDEX;
+ } else if (type->Is(Type::Number())) {
+ function_index = Context::NUMBER_FUNCTION_INDEX;
+ } else if (type->Is(Type::Boolean())) {
+ // Booleans use the generic oddball map, so an additional check is needed to
+ // ensure the receiver is really a boolean.
+ GenerateBooleanCheck(object_reg, miss);
+ function_index = Context::BOOLEAN_FUNCTION_INDEX;
+ } else {
+ check_type = SKIP_RECEIVER;
+ }
+
+ if (check_type == CHECK_ALL_MAPS) {
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), function_index, scratch1(), miss);
+ Object* function = isolate()->native_context()->get(function_index);
+ Object* prototype = JSFunction::cast(function)->instance_prototype();
+ type = IC::CurrentTypeOf(handle(prototype, isolate()), isolate());
+ object_reg = scratch1();
+ }
+
+ // Check that the maps starting from the prototype haven't changed.
+ return CheckPrototypes(
+ type, object_reg, holder, scratch1(), scratch2(), scratch3(),
+ name, miss, check_type);
}
// HandlerFrontend for store uses the name register. It has to be restored
// before a miss.
-Register BaseStoreStubCompiler::HandlerFrontendHeader(
- Handle<JSObject> object,
+Register StoreStubCompiler::HandlerFrontendHeader(
+ Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Label* miss) {
- return CheckPrototypes(object, object_reg, holder,
- this->name(), scratch1(), scratch2(),
- name, miss, SKIP_RECEIVER);
+ return CheckPrototypes(type, object_reg, holder, this->name(),
+ scratch1(), scratch2(), name, miss, SKIP_RECEIVER);
}
-Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<JSObject> object,
+bool BaseLoadStoreStubCompiler::IncludesNumberType(TypeHandleList* types) {
+ for (int i = 0; i < types->length(); ++i) {
+ if (types->at(i)->Is(Type::Number())) return true;
+ }
+ return false;
+}
+
+
+Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
- Handle<Name> name,
- Label* success) {
+ Handle<Name> name) {
Label miss;
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
+
+ HandlerFrontendFooter(name, &miss);
- HandlerFrontendFooter(name, success, &miss);
return reg;
}
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Label* success,
- Handle<JSGlobalObject> global) {
+void LoadStubCompiler::NonexistentHandlerFrontend(Handle<Type> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
Label miss;
- Register holder =
- HandlerFrontendHeader(object, receiver(), last, name, &miss);
+ Register holder;
+ Handle<Map> last_map;
+ if (last.is_null()) {
+ holder = receiver();
+ last_map = IC::TypeToMap(*type, isolate());
+ // If |type| has null as its prototype, |last| is Handle<JSObject>::null().
+ ASSERT(last_map->prototype() == isolate()->heap()->null_value());
+ } else {
+ holder = HandlerFrontendHeader(type, receiver(), last, name, &miss);
+ last_map = handle(last->map());
+ }
- if (!last->HasFastProperties() &&
- !last->IsJSGlobalObject() &&
- !last->IsJSGlobalProxy()) {
+ if (last_map->is_dictionary_map() &&
+ !last_map->IsJSGlobalObjectMap() &&
+ !last_map->IsJSGlobalProxyMap()) {
if (!name->IsUniqueName()) {
ASSERT(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- ASSERT(last->property_dictionary()->FindEntry(*name) ==
- NameDictionary::kNotFound);
+ ASSERT(last.is_null() ||
+ last->property_dictionary()->FindEntry(*name) ==
+ NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), &miss, holder, name,
scratch2(), scratch3());
}
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
- if (!global.is_null()) {
+ if (last_map->IsJSGlobalObjectMap()) {
+ Handle<JSGlobalObject> global = last.is_null()
+ ? Handle<JSGlobalObject>::cast(type->AsConstant())
+ : Handle<JSGlobalObject>::cast(last);
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
- HandlerFrontendFooter(name, success, &miss);
+ HandlerFrontendFooter(name, &miss);
}
-Handle<Code> BaseLoadStubCompiler::CompileLoadField(
- Handle<JSObject> object,
+Handle<Code> LoadStubCompiler::CompileLoadField(
+ Handle<Type> type,
Handle<JSObject> holder,
Handle<Name> name,
PropertyIndex field,
Representation representation) {
Label miss;
- Register reg = HandlerFrontendHeader(object, receiver(), holder, name, &miss);
+ Register reg = HandlerFrontendHeader(type, receiver(), holder, name, &miss);
GenerateLoadField(reg, holder, field, representation);
@@ -1689,87 +1328,74 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadField(
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(kind(), Code::FIELD, name);
+ return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> BaseLoadStubCompiler::CompileLoadConstant(
- Handle<JSObject> object,
+Handle<Code> LoadStubCompiler::CompileLoadConstant(
+ Handle<Type> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<Object> value) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ HandlerFrontend(type, receiver(), holder, name);
GenerateLoadConstant(value);
// Return the generated code.
- return GetCode(kind(), Code::CONSTANT, name);
+ return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
- Handle<JSObject> object,
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<Type> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label success;
-
Register reg = CallbackHandlerFrontend(
- object, receiver(), holder, name, &success, callback);
- __ bind(&success);
+ type, receiver(), holder, name, callback);
GenerateLoadCallback(reg, callback);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
- Handle<JSObject> object,
+Handle<Code> LoadStubCompiler::CompileLoadCallback(
+ Handle<Type> type,
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization) {
ASSERT(call_optimization.is_simple_api_call());
- Label success;
-
Handle<JSFunction> callback = call_optimization.constant_function();
- CallbackHandlerFrontend(
- object, receiver(), holder, name, &success, callback);
- __ bind(&success);
+ CallbackHandlerFrontend(type, receiver(), holder, name, callback);
GenerateLoadCallback(call_optimization);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> BaseLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> object,
+Handle<Code> LoadStubCompiler::CompileLoadInterceptor(
+ Handle<Type> type,
Handle<JSObject> holder,
Handle<Name> name) {
- Label success;
-
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- Register reg = HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ Register reg = HandlerFrontend(type, receiver(), holder, name);
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
- GenerateLoadInterceptor(reg, object, holder, &lookup, name);
+ GenerateLoadInterceptor(reg, type, holder, &lookup, name);
// Return the generated code.
- return GetCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::FAST, name);
}
-void BaseLoadStubCompiler::GenerateLoadPostInterceptor(
+void LoadStubCompiler::GenerateLoadPostInterceptor(
Register interceptor_reg,
Handle<JSObject> interceptor_holder,
Handle<Name> name,
LookupResult* lookup) {
- Label success;
Handle<JSObject> holder(lookup->holder());
if (lookup->IsField()) {
PropertyIndex field = lookup->GetFieldIndex();
@@ -1780,8 +1406,8 @@ void BaseLoadStubCompiler::GenerateLoadPostInterceptor(
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
Register reg = HandlerFrontend(
- interceptor_holder, interceptor_reg, holder, name, &success);
- __ bind(&success);
+ IC::CurrentTypeOf(interceptor_holder, isolate()),
+ interceptor_reg, holder, name);
GenerateLoadField(
reg, holder, field, lookup->representation());
}
@@ -1794,43 +1420,40 @@ void BaseLoadStubCompiler::GenerateLoadPostInterceptor(
ASSERT(callback->getter() != NULL);
Register reg = CallbackHandlerFrontend(
- interceptor_holder, interceptor_reg, holder, name, &success, callback);
- __ bind(&success);
+ IC::CurrentTypeOf(interceptor_holder, isolate()),
+ interceptor_reg, holder, name, callback);
GenerateLoadCallback(reg, callback);
}
}
Handle<Code> BaseLoadStoreStubCompiler::CompileMonomorphicIC(
- Handle<Map> receiver_map,
+ Handle<Type> type,
Handle<Code> handler,
Handle<Name> name) {
- MapHandleList receiver_maps(1);
- receiver_maps.Add(receiver_map);
+ TypeHandleList types(1);
CodeHandleList handlers(1);
+ types.Add(type);
handlers.Add(handler);
- Code::StubType type = handler->type();
- return CompilePolymorphicIC(&receiver_maps, &handlers, name, type, PROPERTY);
+ Code::StubType stub_type = handler->type();
+ return CompilePolymorphicIC(&types, &handlers, name, stub_type, PROPERTY);
}
Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<JSObject> object,
+ Handle<Type> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<JSFunction> getter) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
-
- __ bind(&success);
- GenerateLoadViaGetter(masm(), getter);
+ HandlerFrontend(type, receiver(), holder, name);
+ GenerateLoadViaGetter(masm(), receiver(), getter);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> BaseStoreStubCompiler::CompileStoreTransition(
+Handle<Code> StoreStubCompiler::CompileStoreTransition(
Handle<JSObject> object,
LookupResult* lookup,
Handle<Map> transition,
@@ -1854,8 +1477,8 @@ Handle<Code> BaseStoreStubCompiler::CompileStoreTransition(
} while (holder->GetPrototype()->IsJSObject());
}
- Register holder_reg =
- HandlerFrontendHeader(object, receiver(), holder, name, &miss);
+ Register holder_reg = HandlerFrontendHeader(
+ IC::CurrentTypeOf(object, isolate()), receiver(), holder, name, &miss);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
@@ -1883,16 +1506,17 @@ Handle<Code> BaseStoreStubCompiler::CompileStoreTransition(
TailCallBuiltin(masm(), SlowBuiltin(kind()));
// Return the generated code.
- return GetCode(kind(), Code::MAP_TRANSITION, name);
+ return GetCode(kind(), Code::FAST, name);
}
-Handle<Code> BaseStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Name> name) {
+Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name) {
Label miss;
- HandlerFrontendHeader(object, receiver(), object, name, &miss);
+ HandlerFrontendHeader(IC::CurrentTypeOf(object, isolate()),
+ receiver(), object, name, &miss);
// Generate store field code.
GenerateStoreField(masm(),
@@ -1906,7 +1530,7 @@ Handle<Code> BaseStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetCode(kind(), Code::FIELD, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -1915,13 +1539,11 @@ Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
Handle<JSObject> holder,
Handle<Name> name,
Handle<JSFunction> setter) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
-
- __ bind(&success);
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
GenerateStoreViaSetter(masm(), setter);
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -1935,8 +1557,9 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
elements_kind).GetCode(isolate());
__ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
} else {
- Handle<Code> stub =
- KeyedLoadDictionaryElementStub().GetCode(isolate());
+ Handle<Code> stub = FLAG_compiled_keyed_dictionary_loads
+ ? KeyedLoadDictionaryElementStub().GetCode(isolate())
+ : KeyedLoadDictionaryElementPlatformStub().GetCode(isolate());
__ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
}
@@ -1957,11 +1580,11 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
stub = KeyedStoreFastElementStub(
is_jsarray,
elements_kind,
- store_mode_).GetCode(isolate());
+ store_mode()).GetCode(isolate());
} else {
stub = KeyedStoreElementStub(is_jsarray,
elements_kind,
- store_mode_).GetCode(isolate());
+ store_mode()).GetCode(isolate());
}
__ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
@@ -1982,23 +1605,33 @@ void StubCompiler::TailCallBuiltin(MacroAssembler* masm, Builtins::Name name) {
}
-void LoadStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-}
-
-
-void KeyedLoadStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-}
-
-
-void StoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
+void BaseLoadStoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ GDBJITInterface::CodeTag tag;
+ if (kind_ == Code::LOAD_IC) {
+ tag = GDBJITInterface::LOAD_IC;
+ } else if (kind_ == Code::KEYED_LOAD_IC) {
+ tag = GDBJITInterface::KEYED_LOAD_IC;
+ } else if (kind_ == Code::STORE_IC) {
+ tag = GDBJITInterface::STORE_IC;
+ } else {
+ tag = GDBJITInterface::KEYED_STORE_IC;
+ }
+ GDBJIT(AddCode(tag, *name, *code));
+#endif
}
-void KeyedStoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
- GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
+void BaseLoadStoreStubCompiler::InitializeRegisters() {
+ if (kind_ == Code::LOAD_IC) {
+ registers_ = LoadStubCompiler::registers();
+ } else if (kind_ == Code::KEYED_LOAD_IC) {
+ registers_ = KeyedLoadStubCompiler::registers();
+ } else if (kind_ == Code::STORE_IC) {
+ registers_ = StoreStubCompiler::registers();
+ } else {
+ registers_ = KeyedStoreStubCompiler::registers();
+ }
}
@@ -2006,8 +1639,7 @@ Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name,
InlineCacheState state) {
- Code::Flags flags = Code::ComputeFlags(
- kind, state, extra_state(), type);
+ Code::Flags flags = Code::ComputeFlags(kind, state, extra_state(), type);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
JitEvent(name, code);
@@ -2015,25 +1647,11 @@ Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind,
}
-Handle<Code> BaseLoadStubCompiler::GetCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name) {
- ASSERT(type != Code::NORMAL);
+Handle<Code> BaseLoadStoreStubCompiler::GetCode(Code::Kind kind,
+ Code::StubType type,
+ Handle<Name> name) {
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState, type, kind);
- Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
- JitEvent(name, code);
- return code;
-}
-
-
-Handle<Code> BaseStoreStubCompiler::GetCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name) {
- ASSERT(type != Code::NORMAL);
- Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, extra_state(), type, kind);
+ Code::HANDLER, MONOMORPHIC, extra_state(), type, kind, cache_holder_);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
JitEvent(name, code);
@@ -2049,6 +1667,8 @@ void KeyedLoadStubCompiler::CompileElementHandlers(MapHandleList* receiver_maps,
if ((receiver_map->instance_type() & kNotStringTag) == 0) {
cached_stub = isolate()->builtins()->KeyedLoadIC_String();
+ } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+ cached_stub = isolate()->builtins()->KeyedLoadIC_Slow();
} else {
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
ElementsKind elements_kind = receiver_map->elements_kind();
@@ -2092,19 +1712,21 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic(
elements_kind,
transitioned_map->elements_kind(),
is_js_array,
- store_mode_).GetCode(isolate());
+ store_mode()).GetCode(isolate());
+ } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+ cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
} else {
if (receiver_map->has_fast_elements() ||
receiver_map->has_external_array_elements()) {
cached_stub = KeyedStoreFastElementStub(
is_js_array,
elements_kind,
- store_mode_).GetCode(isolate());
+ store_mode()).GetCode(isolate());
} else {
cached_stub = KeyedStoreElementStub(
is_js_array,
elements_kind,
- store_mode_).GetCode(isolate());
+ store_mode()).GetCode(isolate());
}
}
ASSERT(!cached_stub.is_null());
@@ -2129,12 +1751,11 @@ void KeyedStoreStubCompiler::GenerateStoreDictionaryElement(
CallStubCompiler::CallStubCompiler(Isolate* isolate,
int argc,
Code::Kind kind,
- Code::ExtraICState extra_state,
+ ExtraICState extra_state,
InlineCacheHolderFlag cache_holder)
- : StubCompiler(isolate),
+ : StubCompiler(isolate, extra_state),
arguments_(argc),
kind_(kind),
- extra_state_(extra_state),
cache_holder_(cache_holder) {
}
@@ -2201,11 +1822,8 @@ Handle<Code> CallStubCompiler::CompileCustomCall(
Handle<Code> CallStubCompiler::GetCode(Code::StubType type,
Handle<Name> name) {
int argc = arguments_.immediate();
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
- extra_state_,
- type,
- argc,
- cache_holder_);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ kind_, extra_state(), cache_holder_, type, argc);
return GetCodeWithFlags(flags, name);
}
@@ -2215,7 +1833,7 @@ Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
if (function->shared()->name()->IsString()) {
function_name = Handle<String>(String::cast(function->shared()->name()));
}
- return GetCode(Code::CONSTANT, function_name);
+ return GetCode(Code::FAST, function_name);
}
@@ -2243,12 +1861,12 @@ int CallOptimization::GetPrototypeDepthOfExpectedType(
if (expected_receiver_type_.is_null()) return 0;
int depth = 0;
while (!object.is_identical_to(holder)) {
- if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
+ if (expected_receiver_type_->IsTemplateFor(object->map())) return depth;
object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
if (!object->map()->is_hidden_prototype()) return kInvalidProtoDepth;
++depth;
}
- if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
+ if (expected_receiver_type_->IsTemplateFor(holder->map())) return depth;
return kInvalidProtoDepth;
}
diff --git a/chromium/v8/src/stub-cache.h b/chromium/v8/src/stub-cache.h
index 12d4fc5266a..ebf0bd3c917 100644
--- a/chromium/v8/src/stub-cache.h
+++ b/chromium/v8/src/stub-cache.h
@@ -83,172 +83,20 @@ class StubCache {
Handle<Code> FindIC(Handle<Name> name,
Handle<Map> stub_holder_map,
Code::Kind kind,
- Code::StubType type,
- Code::ExtraICState extra_state = Code::kNoExtraICState);
+ ExtraICState extra_state = kNoExtraICState,
+ InlineCacheHolderFlag cache_holder = OWN_MAP);
- Handle<Code> FindIC(Handle<Name> name,
- Handle<JSObject> stub_holder,
- Code::Kind kind,
- Code::StubType type,
- Code::ExtraICState extra_state = Code::kNoExtraICState);
-
- Handle<Code> FindLoadHandler(Handle<Name> name,
- Handle<JSObject> receiver,
- Handle<JSObject> stub_holder,
- Code::Kind kind,
- Code::StubType type);
-
- Handle<Code> FindStoreHandler(Handle<Name> name,
- Handle<JSObject> receiver,
- Code::Kind kind,
- Code::StubType type,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeMonomorphicLoadIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name);
-
- Handle<Code> ComputeMonomorphicKeyedLoadIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name);
-
- Handle<Code> ComputeMonomorphicStoreIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeMonomorphicKeyedStoreIC(Handle<HeapObject> receiver,
- Handle<Code> handler,
- Handle<Name> name,
- StrictModeFlag strict_mode);
-
- // Computes the right stub matching. Inserts the result in the
- // cache before returning. This might compile a stub if needed.
- Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
- Handle<JSObject> object);
-
- Handle<Code> ComputeLoadField(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex field_index,
- Representation representation);
-
- Handle<Code> ComputeLoadCallback(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback);
-
- Handle<Code> ComputeLoadCallback(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- const CallOptimization& call_optimization);
-
- Handle<Code> ComputeLoadViaGetter(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> getter);
-
- Handle<Code> ComputeLoadConstant(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Object> value);
-
- Handle<Code> ComputeLoadInterceptor(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder);
-
- Handle<Code> ComputeLoadNormal(Handle<Name> name,
- Handle<JSObject> object);
-
- Handle<Code> ComputeLoadGlobal(Handle<Name> name,
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- bool is_dont_delete);
-
- // ---
-
- Handle<Code> ComputeKeyedLoadField(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex field_index,
- Representation representation);
-
- Handle<Code> ComputeKeyedLoadCallback(
- Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback);
-
- Handle<Code> ComputeKeyedLoadCallback(
- Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- const CallOptimization& call_optimization);
-
- Handle<Code> ComputeKeyedLoadConstant(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Object> value);
-
- Handle<Code> ComputeKeyedLoadInterceptor(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder);
-
- // ---
-
- Handle<Code> ComputeStoreField(Handle<Name> name,
- Handle<JSObject> object,
- LookupResult* lookup,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreTransition(Handle<Name> name,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreNormal(StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreGlobal(Handle<Name> name,
- Handle<GlobalObject> object,
- Handle<PropertyCell> cell,
- Handle<Object> value,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreCallback(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreCallback(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- const CallOptimization& call_optimation,
- StrictModeFlag strict_mode);
-
- Handle<Code> ComputeStoreViaSetter(Handle<Name> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> setter,
- StrictModeFlag strict_mode);
+ Handle<Code> FindHandler(Handle<Name> name,
+ Handle<Map> map,
+ Code::Kind kind,
+ InlineCacheHolderFlag cache_holder = OWN_MAP);
- Handle<Code> ComputeStoreInterceptor(Handle<Name> name,
- Handle<JSObject> object,
- StrictModeFlag strict_mode);
+ Handle<Code> ComputeMonomorphicIC(Handle<Name> name,
+ Handle<Type> type,
+ Handle<Code> handler,
+ ExtraICState extra_ic_state);
- // ---
-
- Handle<Code> ComputeKeyedStoreField(Handle<Name> name,
- Handle<JSObject> object,
- LookupResult* lookup,
- StrictModeFlag strict_mode);
- Handle<Code> ComputeKeyedStoreTransition(Handle<Name> name,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- StrictModeFlag strict_mode);
+ Handle<Code> ComputeLoadNonexistent(Handle<Name> name, Handle<Type> type);
Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map);
@@ -256,11 +104,9 @@ class StubCache {
StrictModeFlag strict_mode,
KeyedAccessStoreMode store_mode);
- // ---
-
Handle<Code> ComputeCallField(int argc,
Code::Kind,
- Code::ExtraICState extra_state,
+ ExtraICState extra_state,
Handle<Name> name,
Handle<Object> object,
Handle<JSObject> holder,
@@ -268,7 +114,7 @@ class StubCache {
Handle<Code> ComputeCallConstant(int argc,
Code::Kind,
- Code::ExtraICState extra_state,
+ ExtraICState extra_state,
Handle<Name> name,
Handle<Object> object,
Handle<JSObject> holder,
@@ -276,14 +122,14 @@ class StubCache {
Handle<Code> ComputeCallInterceptor(int argc,
Code::Kind,
- Code::ExtraICState extra_state,
+ ExtraICState extra_state,
Handle<Name> name,
Handle<Object> object,
Handle<JSObject> holder);
Handle<Code> ComputeCallGlobal(int argc,
Code::Kind,
- Code::ExtraICState extra_state,
+ ExtraICState extra_state,
Handle<Name> name,
Handle<JSObject> object,
Handle<GlobalObject> holder,
@@ -298,21 +144,21 @@ class StubCache {
Handle<Code> ComputeCallPreMonomorphic(int argc,
Code::Kind kind,
- Code::ExtraICState extra_state);
+ ExtraICState extra_state);
Handle<Code> ComputeCallNormal(int argc,
Code::Kind kind,
- Code::ExtraICState state);
+ ExtraICState state);
Handle<Code> ComputeCallArguments(int argc);
Handle<Code> ComputeCallMegamorphic(int argc,
Code::Kind kind,
- Code::ExtraICState state);
+ ExtraICState state);
Handle<Code> ComputeCallMiss(int argc,
Code::Kind kind,
- Code::ExtraICState state);
+ ExtraICState state);
// ---
@@ -326,16 +172,11 @@ class StubCache {
KeyedAccessStoreMode store_mode,
StrictModeFlag strict_mode);
- Handle<Code> ComputePolymorphicLoadIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name);
-
- Handle<Code> ComputePolymorphicStoreIC(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- StrictModeFlag strict_mode);
+ Handle<Code> ComputePolymorphicIC(TypeHandleList* types,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ ExtraICState extra_ic_state);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
@@ -523,8 +364,10 @@ enum IcCheckType { ELEMENT, PROPERTY };
// The stub compilers compile stubs for the stub cache.
class StubCompiler BASE_EMBEDDED {
public:
- explicit StubCompiler(Isolate* isolate)
- : isolate_(isolate), masm_(isolate, NULL, 256), failure_(NULL) { }
+ explicit StubCompiler(Isolate* isolate,
+ ExtraICState extra_ic_state = kNoExtraICState)
+ : isolate_(isolate), extra_ic_state_(extra_ic_state),
+ masm_(isolate, NULL, 256), failure_(NULL) { }
// Functions to compile either CallIC or KeyedCallIC. The specific kind
// is extracted from the code flags.
@@ -584,8 +427,7 @@ class StubCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss_label,
- bool support_wrappers);
+ Label* miss_label);
static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
@@ -602,15 +444,6 @@ class StubCompiler BASE_EMBEDDED {
Register scratch,
Label* miss);
- // Calls GenerateCheckPropertyCell for each global object in the prototype
- // chain from object to (but not including) holder.
- static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss);
-
static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
// Generates code that verifies that the property holder has not changed
@@ -628,7 +461,7 @@ class StubCompiler BASE_EMBEDDED {
// The function can optionally (when save_at_depth !=
// kInvalidProtoDepth) save the object at the given depth by moving
// it to [esp + kPointerSize].
- Register CheckPrototypes(Handle<JSObject> object,
+ Register CheckPrototypes(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
@@ -637,11 +470,11 @@ class StubCompiler BASE_EMBEDDED {
Handle<Name> name,
Label* miss,
PrototypeCheckType check = CHECK_ALL_MAPS) {
- return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
+ return CheckPrototypes(type, object_reg, holder, holder_reg, scratch1,
scratch2, name, kInvalidProtoDepth, miss, check);
}
- Register CheckPrototypes(Handle<JSObject> object,
+ Register CheckPrototypes(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
@@ -652,11 +485,14 @@ class StubCompiler BASE_EMBEDDED {
Label* miss,
PrototypeCheckType check = CHECK_ALL_MAPS);
+ void GenerateBooleanCheck(Register object, Label* miss);
protected:
Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
+ ExtraICState extra_state() { return extra_ic_state_; }
+
MacroAssembler* masm() { return &masm_; }
void set_failure(Failure* failure) { failure_ = failure; }
@@ -672,6 +508,7 @@ class StubCompiler BASE_EMBEDDED {
private:
Isolate* isolate_;
+ const ExtraICState extra_ic_state_;
MacroAssembler masm_;
Failure* failure_;
};
@@ -682,15 +519,22 @@ enum FrontendCheckType { PERFORM_INITIAL_CHECKS, SKIP_INITIAL_CHECKS };
class BaseLoadStoreStubCompiler: public StubCompiler {
public:
- BaseLoadStoreStubCompiler(Isolate* isolate, Register* registers)
- : StubCompiler(isolate), registers_(registers) { }
+ BaseLoadStoreStubCompiler(Isolate* isolate,
+ Code::Kind kind,
+ ExtraICState extra_ic_state = kNoExtraICState,
+ InlineCacheHolderFlag cache_holder = OWN_MAP)
+ : StubCompiler(isolate, extra_ic_state),
+ kind_(kind),
+ cache_holder_(cache_holder) {
+ InitializeRegisters();
+ }
virtual ~BaseLoadStoreStubCompiler() { }
- Handle<Code> CompileMonomorphicIC(Handle<Map> receiver_map,
+ Handle<Code> CompileMonomorphicIC(Handle<Type> type,
Handle<Code> handler,
Handle<Name> name);
- Handle<Code> CompilePolymorphicIC(MapHandleList* receiver_maps,
+ Handle<Code> CompilePolymorphicIC(TypeHandleList* types,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -712,94 +556,136 @@ class BaseLoadStoreStubCompiler: public StubCompiler {
}
protected:
- virtual Register HandlerFrontendHeader(Handle<JSObject> object,
+ virtual Register HandlerFrontendHeader(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Label* miss) = 0;
- virtual void HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) = 0;
+ virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss) = 0;
- Register HandlerFrontend(Handle<JSObject> object,
+ Register HandlerFrontend(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
- Handle<Name> name,
- Label* success);
+ Handle<Name> name);
+
+ Handle<Code> GetCode(Code::Kind kind,
+ Code::StubType type,
+ Handle<Name> name);
Handle<Code> GetICCode(Code::Kind kind,
Code::StubType type,
Handle<Name> name,
InlineCacheState state = MONOMORPHIC);
+ Code::Kind kind() { return kind_; }
+
+ Logger::LogEventsAndTags log_kind(Handle<Code> code) {
+ if (!code->is_inline_cache_stub()) return Logger::STUB_TAG;
+ if (kind_ == Code::LOAD_IC) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::LOAD_IC_TAG : Logger::LOAD_POLYMORPHIC_IC_TAG;
+ } else if (kind_ == Code::KEYED_LOAD_IC) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::KEYED_LOAD_IC_TAG : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
+ } else if (kind_ == Code::STORE_IC) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::STORE_IC_TAG : Logger::STORE_POLYMORPHIC_IC_TAG;
+ } else {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::KEYED_STORE_IC_TAG : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
+ }
+ }
+ void JitEvent(Handle<Name> name, Handle<Code> code);
- virtual Code::ExtraICState extra_state() { return Code::kNoExtraICState; }
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) = 0;
- virtual void JitEvent(Handle<Name> name, Handle<Code> code) = 0;
- virtual Code::Kind kind() = 0;
virtual Register receiver() = 0;
virtual Register name() = 0;
virtual Register scratch1() = 0;
virtual Register scratch2() = 0;
virtual Register scratch3() = 0;
+ void InitializeRegisters();
+
+ bool IncludesNumberType(TypeHandleList* types);
+
+ Code::Kind kind_;
+ InlineCacheHolderFlag cache_holder_;
Register* registers_;
};
-class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
+class LoadStubCompiler: public BaseLoadStoreStubCompiler {
public:
- BaseLoadStubCompiler(Isolate* isolate, Register* registers)
- : BaseLoadStoreStubCompiler(isolate, registers) { }
- virtual ~BaseLoadStubCompiler() { }
-
- Handle<Code> CompileLoadField(Handle<JSObject> object,
+ LoadStubCompiler(Isolate* isolate,
+ ExtraICState extra_ic_state = kNoExtraICState,
+ InlineCacheHolderFlag cache_holder = OWN_MAP,
+ Code::Kind kind = Code::LOAD_IC)
+ : BaseLoadStoreStubCompiler(isolate, kind, extra_ic_state,
+ cache_holder) { }
+ virtual ~LoadStubCompiler() { }
+
+ Handle<Code> CompileLoadField(Handle<Type> type,
Handle<JSObject> holder,
Handle<Name> name,
PropertyIndex index,
Representation representation);
- Handle<Code> CompileLoadCallback(Handle<JSObject> object,
+ Handle<Code> CompileLoadCallback(Handle<Type> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback);
- Handle<Code> CompileLoadCallback(Handle<JSObject> object,
+ Handle<Code> CompileLoadCallback(Handle<Type> type,
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization);
- Handle<Code> CompileLoadConstant(Handle<JSObject> object,
+ Handle<Code> CompileLoadConstant(Handle<Type> type,
Handle<JSObject> holder,
Handle<Name> name,
Handle<Object> value);
- Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
+ Handle<Code> CompileLoadInterceptor(Handle<Type> type,
Handle<JSObject> holder,
Handle<Name> name);
+ Handle<Code> CompileLoadViaGetter(Handle<Type> type,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<JSFunction> getter);
+
+ static void GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
+ Handle<JSFunction> getter);
+
+ Handle<Code> CompileLoadNonexistent(Handle<Type> type,
+ Handle<JSObject> last,
+ Handle<Name> name);
+
+ Handle<Code> CompileLoadGlobal(Handle<Type> type,
+ Handle<GlobalObject> holder,
+ Handle<PropertyCell> cell,
+ Handle<Name> name,
+ bool is_dont_delete);
+
+ static Register* registers();
+
protected:
- virtual Register HandlerFrontendHeader(Handle<JSObject> object,
+ virtual Register HandlerFrontendHeader(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Label* miss);
- virtual void HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss);
+ virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss);
- Register CallbackHandlerFrontend(Handle<JSObject> object,
+ Register CallbackHandlerFrontend(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
- Label* success,
Handle<Object> callback);
- void NonexistentHandlerFrontend(Handle<JSObject> object,
+ void NonexistentHandlerFrontend(Handle<Type> type,
Handle<JSObject> last,
- Handle<Name> name,
- Label* success,
- Handle<JSGlobalObject> global);
+ Handle<Name> name);
void GenerateLoadField(Register reg,
Handle<JSObject> holder,
@@ -810,7 +696,7 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<ExecutableAccessorInfo> callback);
void GenerateLoadCallback(const CallOptimization& call_optimization);
void GenerateLoadInterceptor(Register holder_reg,
- Handle<JSObject> object,
+ Handle<Object> object,
Handle<JSObject> holder,
LookupResult* lookup,
Handle<Name> name);
@@ -819,10 +705,6 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<Name> name,
LookupResult* lookup);
- Handle<Code> GetCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name);
-
virtual Register receiver() { return registers_[0]; }
virtual Register name() { return registers_[1]; }
virtual Register scratch1() { return registers_[2]; }
@@ -832,46 +714,13 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
};
-class LoadStubCompiler: public BaseLoadStubCompiler {
- public:
- explicit LoadStubCompiler(Isolate* isolate)
- : BaseLoadStubCompiler(isolate, registers()) { }
-
- Handle<Code> CompileLoadNonexistent(Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Handle<JSGlobalObject> global);
-
- static void GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<JSFunction> getter);
-
- Handle<Code> CompileLoadViaGetter(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Handle<JSFunction> getter);
-
- Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<PropertyCell> cell,
- Handle<Name> name,
- bool is_dont_delete);
-
- private:
- static Register* registers();
- virtual Code::Kind kind() { return Code::LOAD_IC; }
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- if (!code->is_inline_cache_stub()) return Logger::STUB_TAG;
- return code->ic_state() == MONOMORPHIC
- ? Logger::LOAD_IC_TAG : Logger::LOAD_POLYMORPHIC_IC_TAG;
- }
- virtual void JitEvent(Handle<Name> name, Handle<Code> code);
-};
-
-
-class KeyedLoadStubCompiler: public BaseLoadStubCompiler {
+class KeyedLoadStubCompiler: public LoadStubCompiler {
public:
- explicit KeyedLoadStubCompiler(Isolate* isolate)
- : BaseLoadStubCompiler(isolate, registers()) { }
+ KeyedLoadStubCompiler(Isolate* isolate,
+ ExtraICState extra_ic_state = kNoExtraICState,
+ InlineCacheHolderFlag cache_holder = OWN_MAP)
+ : LoadStubCompiler(isolate, extra_ic_state, cache_holder,
+ Code::KEYED_LOAD_IC) { }
Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
@@ -880,30 +729,25 @@ class KeyedLoadStubCompiler: public BaseLoadStubCompiler {
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
- private:
+ protected:
static Register* registers();
- virtual Code::Kind kind() { return Code::KEYED_LOAD_IC; }
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- if (!code->is_inline_cache_stub()) return Logger::STUB_TAG;
- return code->ic_state() == MONOMORPHIC
- ? Logger::KEYED_LOAD_IC_TAG : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
- }
- virtual void JitEvent(Handle<Name> name, Handle<Code> code);
+
+ private:
virtual void GenerateNameCheck(Handle<Name> name,
Register name_reg,
Label* miss);
+ friend class BaseLoadStoreStubCompiler;
};
-class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler {
+class StoreStubCompiler: public BaseLoadStoreStubCompiler {
public:
- BaseStoreStubCompiler(Isolate* isolate,
- StrictModeFlag strict_mode,
- Register* registers)
- : BaseLoadStoreStubCompiler(isolate, registers),
- strict_mode_(strict_mode) { }
+ StoreStubCompiler(Isolate* isolate,
+ ExtraICState extra_ic_state,
+ Code::Kind kind = Code::STORE_IC)
+ : BaseLoadStoreStubCompiler(isolate, kind, extra_ic_state) {}
- virtual ~BaseStoreStubCompiler() { }
+ virtual ~StoreStubCompiler() { }
Handle<Code> CompileStoreTransition(Handle<JSObject> object,
LookupResult* lookup,
@@ -944,16 +788,27 @@ class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler {
Register scratch2,
Label* miss_label);
- static Builtins::Name MissBuiltin(Code::Kind kind) {
- switch (kind) {
- case Code::LOAD_IC: return Builtins::kLoadIC_Miss;
- case Code::STORE_IC: return Builtins::kStoreIC_Miss;
- case Code::KEYED_LOAD_IC: return Builtins::kKeyedLoadIC_Miss;
- case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Miss;
- default: UNREACHABLE();
- }
- return Builtins::kLoadIC_Miss;
- }
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback);
+
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization);
+
+ static void GenerateStoreViaSetter(MacroAssembler* masm,
+ Handle<JSFunction> setter);
+
+ Handle<Code> CompileStoreViaSetter(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<JSFunction> setter);
+
+ Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
+ Handle<Name> name);
+
static Builtins::Name SlowBuiltin(Code::Kind kind) {
switch (kind) {
case Code::STORE_IC: return Builtins::kStoreIC_Slow;
@@ -964,19 +819,13 @@ class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler {
}
protected:
- virtual Register HandlerFrontendHeader(Handle<JSObject> object,
+ virtual Register HandlerFrontendHeader(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
Label* miss);
- virtual void HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss);
- Handle<Code> GetCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name);
-
+ virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss);
void GenerateRestoreName(MacroAssembler* masm,
Label* label,
Handle<Name> name);
@@ -987,60 +836,20 @@ class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler {
virtual Register scratch1() { return registers_[3]; }
virtual Register scratch2() { return registers_[4]; }
virtual Register scratch3() { return registers_[5]; }
- StrictModeFlag strict_mode() { return strict_mode_; }
- virtual Code::ExtraICState extra_state() { return strict_mode_; }
-
- private:
- StrictModeFlag strict_mode_;
-};
-
-
-class StoreStubCompiler: public BaseStoreStubCompiler {
- public:
- StoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode)
- : BaseStoreStubCompiler(isolate, strict_mode, registers()) { }
-
-
- Handle<Code> CompileStoreCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback);
-
- Handle<Code> CompileStoreCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization);
-
- static void GenerateStoreViaSetter(MacroAssembler* masm,
- Handle<JSFunction> setter);
-
- Handle<Code> CompileStoreViaSetter(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Handle<JSFunction> setter);
- Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
- Handle<Name> name);
+ protected:
+ static Register* registers();
private:
- static Register* registers();
- virtual Code::Kind kind() { return Code::STORE_IC; }
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- if (!code->is_inline_cache_stub()) return Logger::STUB_TAG;
- return code->ic_state() == MONOMORPHIC
- ? Logger::STORE_IC_TAG : Logger::STORE_POLYMORPHIC_IC_TAG;
- }
- virtual void JitEvent(Handle<Name> name, Handle<Code> code);
+ friend class BaseLoadStoreStubCompiler;
};
-class KeyedStoreStubCompiler: public BaseStoreStubCompiler {
+class KeyedStoreStubCompiler: public StoreStubCompiler {
public:
KeyedStoreStubCompiler(Isolate* isolate,
- StrictModeFlag strict_mode,
- KeyedAccessStoreMode store_mode)
- : BaseStoreStubCompiler(isolate, strict_mode, registers()),
- store_mode_(store_mode) { }
+ ExtraICState extra_ic_state)
+ : StoreStubCompiler(isolate, extra_ic_state, Code::KEYED_STORE_IC) {}
Handle<Code> CompileStoreElement(Handle<Map> receiver_map);
@@ -1053,8 +862,10 @@ class KeyedStoreStubCompiler: public BaseStoreStubCompiler {
static void GenerateStoreDictionaryElement(MacroAssembler* masm);
protected:
- virtual Code::ExtraICState extra_state() {
- return Code::ComputeExtraICState(store_mode_, strict_mode());
+ static Register* registers();
+
+ KeyedAccessStoreMode store_mode() {
+ return KeyedStoreIC::GetKeyedAccessStoreMode(extra_state());
}
private:
@@ -1062,18 +873,10 @@ class KeyedStoreStubCompiler: public BaseStoreStubCompiler {
return registers()[3];
}
- static Register* registers();
- virtual Code::Kind kind() { return Code::KEYED_STORE_IC; }
- virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- if (!code->is_inline_cache_stub()) return Logger::STUB_TAG;
- return code->ic_state() == MONOMORPHIC
- ? Logger::KEYED_STORE_IC_TAG : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
- }
- virtual void JitEvent(Handle<Name> name, Handle<Code> code);
virtual void GenerateNameCheck(Handle<Name> name,
Register name_reg,
Label* miss);
- KeyedAccessStoreMode store_mode_;
+ friend class BaseLoadStoreStubCompiler;
};
@@ -1099,21 +902,37 @@ class CallStubCompiler: public StubCompiler {
CallStubCompiler(Isolate* isolate,
int argc,
Code::Kind kind,
- Code::ExtraICState extra_state,
- InlineCacheHolderFlag cache_holder);
+ ExtraICState extra_state,
+ InlineCacheHolderFlag cache_holder = OWN_MAP);
Handle<Code> CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
PropertyIndex index,
Handle<Name> name);
- void CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* success);
+ // Patch the global proxy over the global object if the global object is the
+ // receiver.
+ void PatchGlobalProxy(Handle<Object> object);
- void CompileHandlerBackend(Handle<JSFunction> function);
+ // Returns the register containing the holder of |name|.
+ Register HandlerFrontendHeader(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Label* miss);
+ void HandlerFrontendFooter(Label* miss);
+
+ void GenerateJumpFunctionIgnoreReceiver(Handle<JSFunction> function);
+ void GenerateJumpFunction(Handle<Object> object,
+ Handle<JSFunction> function);
+ void GenerateJumpFunction(Handle<Object> object,
+ Register function,
+ Label* miss);
+ // Use to call |actual_closure|, a closure with the same shared function info
+ // as |function|.
+ void GenerateJumpFunction(Handle<Object> object,
+ Register actual_closure,
+ Handle<JSFunction> function);
Handle<Code> CompileCallConstant(Handle<Object> object,
Handle<JSObject> holder,
@@ -1162,6 +981,8 @@ class CallStubCompiler: public StubCompiler {
Handle<JSFunction> function,
Handle<String> name);
+ CallKind call_kind();
+
Handle<Code> GetCode(Code::StubType type, Handle<Name> name);
Handle<Code> GetCode(Handle<JSFunction> function);
@@ -1169,23 +990,19 @@ class CallStubCompiler: public StubCompiler {
void GenerateNameCheck(Handle<Name> name, Label* miss);
- void GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* miss);
-
// Generates code to load the function from the cell checking that
// it still contains the same function.
void GenerateLoadFunctionFromCell(Handle<Cell> cell,
Handle<JSFunction> function,
Label* miss);
+ void GenerateFunctionCheck(Register function, Register scratch, Label* miss);
+
// Generates a jump to CallIC miss stub.
void GenerateMissBranch();
const ParameterCount arguments_;
const Code::Kind kind_;
- const Code::ExtraICState extra_state_;
const InlineCacheHolderFlag cache_holder_;
};
@@ -1228,7 +1045,7 @@ class CallOptimization BASE_EMBEDDED {
bool IsCompatibleReceiver(Object* receiver) {
ASSERT(is_simple_api_call());
if (expected_receiver_type_.is_null()) return true;
- return receiver->IsInstanceOf(*expected_receiver_type_);
+ return expected_receiver_type_->IsTemplateFor(receiver);
}
private:
diff --git a/chromium/v8/src/sweeper-thread.cc b/chromium/v8/src/sweeper-thread.cc
index 58c684a54f6..6f3baed11f8 100644
--- a/chromium/v8/src/sweeper-thread.cc
+++ b/chromium/v8/src/sweeper-thread.cc
@@ -105,4 +105,14 @@ void SweeperThread::StartSweeping() {
void SweeperThread::WaitForSweeperThread() {
end_sweeping_semaphore_.Wait();
}
+
+
+int SweeperThread::NumberOfThreads(int max_available) {
+ if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) return 0;
+ if (FLAG_sweeper_threads > 0) return FLAG_sweeper_threads;
+ if (FLAG_concurrent_sweeping) return max_available - 1;
+ ASSERT(FLAG_parallel_sweeping);
+ return max_available;
+}
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/sweeper-thread.h b/chromium/v8/src/sweeper-thread.h
index c36cfc39a29..96255a0972a 100644
--- a/chromium/v8/src/sweeper-thread.h
+++ b/chromium/v8/src/sweeper-thread.h
@@ -51,6 +51,8 @@ class SweeperThread : public Thread {
void WaitForSweeperThread();
intptr_t StealMemory(PagedSpace* space);
+ static int NumberOfThreads(int max_available);
+
private:
Isolate* isolate_;
Heap* heap_;
diff --git a/chromium/v8/src/token.h b/chromium/v8/src/token.h
index 992adaa77c2..39bcc24074a 100644
--- a/chromium/v8/src/token.h
+++ b/chromium/v8/src/token.h
@@ -213,6 +213,10 @@ class Token {
return COMMA <= op && op <= MOD;
}
+ static bool IsTruncatingBinaryOp(Value op) {
+ return BIT_OR <= op && op <= ROR;
+ }
+
static bool IsCompareOp(Value op) {
return EQ <= op && op <= IN;
}
diff --git a/chromium/v8/src/transitions-inl.h b/chromium/v8/src/transitions-inl.h
index c4825fcf734..5c7c28b6e5d 100644
--- a/chromium/v8/src/transitions-inl.h
+++ b/chromium/v8/src/transitions-inl.h
@@ -162,9 +162,7 @@ void TransitionArray::SetTarget(int transition_number, Map* value) {
PropertyDetails TransitionArray::GetTargetDetails(int transition_number) {
Map* map = GetTarget(transition_number);
- DescriptorArray* descriptors = map->instance_descriptors();
- int descriptor = map->LastAdded();
- return descriptors->GetDetails(descriptor);
+ return map->GetLastDescriptorDetails();
}
diff --git a/chromium/v8/src/trig-table.h b/chromium/v8/src/trig-table.h
new file mode 100644
index 00000000000..081c0389ae2
--- /dev/null
+++ b/chromium/v8/src/trig-table.h
@@ -0,0 +1,61 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TRIG_TABLE_H_
+#define V8_TRIG_TABLE_H_
+
+
+namespace v8 {
+namespace internal {
+
+class TrigonometricLookupTable : public AllStatic {
+ public:
+ // Casting away const-ness to use as argument for typed array constructor.
+ static void* sin_table() {
+ return const_cast<double*>(&kSinTable[0]);
+ }
+
+ static void* cos_x_interval_table() {
+ return const_cast<double*>(&kCosXIntervalTable[0]);
+ }
+
+ static double samples_over_pi_half() { return kSamplesOverPiHalf; }
+ static int samples() { return kSamples; }
+ static int table_num_bytes() { return kTableSize * sizeof(*kSinTable); }
+ static int table_size() { return kTableSize; }
+
+ private:
+ static const double kSinTable[];
+ static const double kCosXIntervalTable[];
+ static const int kSamples;
+ static const int kTableSize;
+ static const double kSamplesOverPiHalf;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_TRIG_TABLE_H_
diff --git a/chromium/v8/src/type-info.cc b/chromium/v8/src/type-info.cc
index 190eb3e6fff..eed54ce2bcd 100644
--- a/chromium/v8/src/type-info.cc
+++ b/chromium/v8/src/type-info.cc
@@ -99,77 +99,48 @@ Handle<Cell> TypeFeedbackOracle::GetInfoCell(
}
-bool TypeFeedbackOracle::LoadIsUninitialized(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsMap()) return false;
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
+bool TypeFeedbackOracle::LoadIsUninitialized(TypeFeedbackId id) {
+ Handle<Object> maybe_code = GetInfo(id);
+ if (maybe_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(maybe_code);
return code->is_inline_cache_stub() && code->ic_state() == UNINITIALIZED;
}
return false;
}
-bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsMap()) return true;
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- bool preliminary_checks = code->is_keyed_load_stub() &&
- code->ic_state() == MONOMORPHIC &&
- Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
- if (!preliminary_checks) return false;
- Map* map = code->FindFirstMap();
- if (map == NULL) return false;
- map = map->CurrentMapForDeprecated();
- return map != NULL && !CanRetainOtherContext(map, *native_context_);
- }
- return false;
-}
-
-
-bool TypeFeedbackOracle::LoadIsPolymorphic(Property* expr) {
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- return code->is_keyed_load_stub() && code->ic_state() == POLYMORPHIC;
+bool TypeFeedbackOracle::LoadIsPreMonomorphic(TypeFeedbackId id) {
+ Handle<Object> maybe_code = GetInfo(id);
+ if (maybe_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(maybe_code);
+ return code->is_inline_cache_stub() && code->ic_state() == PREMONOMORPHIC;
}
return false;
}
bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsMap()) return false;
- if (!map_or_code->IsCode()) return false;
- Handle<Code> code = Handle<Code>::cast(map_or_code);
+ Handle<Object> maybe_code = GetInfo(ast_id);
+ if (!maybe_code->IsCode()) return false;
+ Handle<Code> code = Handle<Code>::cast(maybe_code);
return code->ic_state() == UNINITIALIZED;
}
-bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsMap()) return true;
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- bool preliminary_checks =
- code->is_keyed_store_stub() &&
- code->ic_state() == MONOMORPHIC &&
- Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
- if (!preliminary_checks) return false;
- Map* map = code->FindFirstMap();
- if (map == NULL) return false;
- map = map->CurrentMapForDeprecated();
- return map != NULL && !CanRetainOtherContext(map, *native_context_);
+bool TypeFeedbackOracle::StoreIsPreMonomorphic(TypeFeedbackId ast_id) {
+ Handle<Object> maybe_code = GetInfo(ast_id);
+ if (maybe_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(maybe_code);
+ return code->ic_state() == PREMONOMORPHIC;
}
return false;
}
bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
+ Handle<Object> maybe_code = GetInfo(ast_id);
+ if (maybe_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(maybe_code);
return code->is_keyed_store_stub() &&
code->ic_state() == POLYMORPHIC;
}
@@ -177,118 +148,68 @@ bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
}
-bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
- Handle<Object> value = GetInfo(expr->CallFeedbackId());
- return value->IsMap() || value->IsAllocationSite() || value->IsJSFunction() ||
- value->IsSmi();
+bool TypeFeedbackOracle::CallIsMonomorphic(TypeFeedbackId id) {
+ Handle<Object> value = GetInfo(id);
+ return value->IsAllocationSite() || value->IsJSFunction() || value->IsSmi() ||
+ (value->IsCode() && Handle<Code>::cast(value)->ic_state() == MONOMORPHIC);
}
-bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
- Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
- return info->IsAllocationSite() || info->IsJSFunction();
+bool TypeFeedbackOracle::KeyedArrayCallIsHoley(TypeFeedbackId id) {
+ Handle<Object> value = GetInfo(id);
+ Handle<Code> code = Handle<Code>::cast(value);
+ return KeyedArrayCallStub::IsHoley(code);
}
-bool TypeFeedbackOracle::ObjectLiteralStoreIsMonomorphic(
- ObjectLiteral::Property* prop) {
- Handle<Object> map_or_code = GetInfo(prop->key()->LiteralFeedbackId());
- return map_or_code->IsMap();
+bool TypeFeedbackOracle::CallNewIsMonomorphic(TypeFeedbackId id) {
+ Handle<Object> info = GetInfo(id);
+ return info->IsAllocationSite() || info->IsJSFunction();
}
-byte TypeFeedbackOracle::ForInType(ForInStatement* stmt) {
- Handle<Object> value = GetInfo(stmt->ForInFeedbackId());
+byte TypeFeedbackOracle::ForInType(TypeFeedbackId id) {
+ Handle<Object> value = GetInfo(id);
return value->IsSmi() &&
Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker
? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN;
}
-Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
- ASSERT(LoadIsMonomorphicNormal(expr));
- Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- Map* map = code->FindFirstMap()->CurrentMapForDeprecated();
- return map == NULL || CanRetainOtherContext(map, *native_context_)
- ? Handle<Map>::null()
- : Handle<Map>(map);
- }
- return Handle<Map>::cast(map_or_code);
-}
-
-
-Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(
- TypeFeedbackId ast_id) {
- ASSERT(StoreIsMonomorphicNormal(ast_id));
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
- Map* map = code->FindFirstMap()->CurrentMapForDeprecated();
- return map == NULL || CanRetainOtherContext(map, *native_context_)
- ? Handle<Map>::null()
- : Handle<Map>(map);
- }
- return Handle<Map>::cast(map_or_code);
-}
-
-
KeyedAccessStoreMode TypeFeedbackOracle::GetStoreMode(
TypeFeedbackId ast_id) {
- Handle<Object> map_or_code = GetInfo(ast_id);
- if (map_or_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(map_or_code);
+ Handle<Object> maybe_code = GetInfo(ast_id);
+ if (maybe_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(maybe_code);
if (code->kind() == Code::KEYED_STORE_IC) {
- return Code::GetKeyedAccessStoreMode(code->extra_ic_state());
+ return KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state());
}
}
return STANDARD_STORE;
}
-void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
- Handle<String> name,
- SmallMapList* types) {
- Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
- Code::NORMAL, Code::LOAD_IC);
- CollectReceiverTypes(expr->PropertyFeedbackId(), name, flags, types);
-}
-
-
-void TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
- Handle<String> name,
- SmallMapList* types) {
- Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
- Code::NORMAL, Code::STORE_IC);
- CollectReceiverTypes(expr->AssignmentFeedbackId(), name, flags, types);
-}
-
-
-void TypeFeedbackOracle::CallReceiverTypes(Call* expr,
+void TypeFeedbackOracle::CallReceiverTypes(TypeFeedbackId id,
Handle<String> name,
+ int arity,
CallKind call_kind,
SmallMapList* types) {
- int arity = expr->arguments()->length();
-
// Note: Currently we do not take string extra ic data into account
// here.
- Code::ExtraICState extra_ic_state =
- CallIC::Contextual::encode(call_kind == CALL_AS_FUNCTION);
+ ContextualMode contextual_mode = call_kind == CALL_AS_FUNCTION
+ ? CONTEXTUAL
+ : NOT_CONTEXTUAL;
+ ExtraICState extra_ic_state =
+ CallIC::Contextual::encode(contextual_mode);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
- extra_ic_state,
- Code::NORMAL,
- arity,
- OWN_MAP);
- CollectReceiverTypes(expr->CallFeedbackId(), name, flags, types);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ Code::CALL_IC, extra_ic_state, OWN_MAP, Code::NORMAL, arity);
+ CollectReceiverTypes(id, name, flags, types);
}
-CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
- Handle<Object> value = GetInfo(expr->CallFeedbackId());
+CheckType TypeFeedbackOracle::GetCallCheckType(TypeFeedbackId id) {
+ Handle<Object> value = GetInfo(id);
if (!value->IsSmi()) return RECEIVER_MAP_CHECK;
CheckType check = static_cast<CheckType>(Smi::cast(*value)->value());
ASSERT(check != RECEIVER_MAP_CHECK);
@@ -296,8 +217,8 @@ CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
- Handle<Object> info = GetInfo(expr->CallFeedbackId());
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(TypeFeedbackId id) {
+ Handle<Object> info = GetInfo(id);
if (info->IsAllocationSite()) {
return Handle<JSFunction>(isolate_->global_context()->array_function());
} else {
@@ -306,8 +227,8 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
- Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
+Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(TypeFeedbackId id) {
+ Handle<Object> info = GetInfo(id);
if (info->IsAllocationSite()) {
return Handle<JSFunction>(isolate_->global_context()->array_function());
} else {
@@ -316,26 +237,20 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
}
-Handle<Cell> TypeFeedbackOracle::GetCallNewAllocationInfoCell(CallNew* expr) {
- return GetInfoCell(expr->CallNewFeedbackId());
+Handle<Cell> TypeFeedbackOracle::GetCallNewAllocationInfoCell(
+ TypeFeedbackId id) {
+ return GetInfoCell(id);
}
-Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap(
- ObjectLiteral::Property* prop) {
- ASSERT(ObjectLiteralStoreIsMonomorphic(prop));
- return Handle<Map>::cast(GetInfo(prop->key()->LiteralFeedbackId()));
+bool TypeFeedbackOracle::LoadIsBuiltin(
+ TypeFeedbackId id, Builtins::Name builtin) {
+ return *GetInfo(id) == isolate_->builtins()->builtin(builtin);
}
-bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
- return *GetInfo(expr->PropertyFeedbackId()) ==
- isolate_->builtins()->builtin(id);
-}
-
-
-bool TypeFeedbackOracle::LoadIsStub(Property* expr, ICStub* stub) {
- Handle<Object> object = GetInfo(expr->PropertyFeedbackId());
+bool TypeFeedbackOracle::LoadIsStub(TypeFeedbackId id, ICStub* stub) {
+ Handle<Object> object = GetInfo(id);
if (!object->IsCode()) return false;
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_load_stub()) return false;
@@ -359,9 +274,9 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
Handle<Map> map;
Map* raw_map = code->FindFirstMap();
if (raw_map != NULL) {
- raw_map = raw_map->CurrentMapForDeprecated();
- if (raw_map != NULL && !CanRetainOtherContext(raw_map, *native_context_)) {
- map = handle(raw_map, isolate_);
+ map = Map::CurrentMapForDeprecated(handle(raw_map));
+ if (!map.is_null() && CanRetainOtherContext(*map, *native_context_)) {
+ map = Handle<Map>::null();
}
}
@@ -381,20 +296,27 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* result,
- Maybe<int>* fixed_right_arg) {
+ Maybe<int>* fixed_right_arg,
+ Token::Value op) {
Handle<Object> object = GetInfo(id);
if (!object->IsCode()) {
- // For some binary ops we don't have ICs, e.g. Token::COMMA.
+ // For some binary ops we don't have ICs, e.g. Token::COMMA, but for the
+ // operations covered by the BinaryOpIC we should always have them.
+ ASSERT(op < BinaryOpIC::State::FIRST_TOKEN ||
+ op > BinaryOpIC::State::LAST_TOKEN);
*left = *right = *result = handle(Type::None(), isolate_);
+ *fixed_right_arg = Maybe<int>();
return;
}
Handle<Code> code = Handle<Code>::cast(object);
- ASSERT(code->is_binary_op_stub());
+ ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
+ BinaryOpIC::State state(code->extended_extra_ic_state());
+ ASSERT_EQ(op, state.op());
- int minor_key = code->stub_info();
- BinaryOpIC::StubInfoToType(minor_key, left, right, result, isolate());
- *fixed_right_arg =
- BinaryOpStub::decode_fixed_right_arg_from_minor_key(minor_key);
+ *left = state.GetLeftType(isolate());
+ *right = state.GetRightType(isolate());
+ *result = state.GetResultType(isolate());
+ *fixed_right_arg = state.fixed_right_arg();
}
@@ -410,53 +332,69 @@ Handle<Type> TypeFeedbackOracle::ClauseType(TypeFeedbackId id) {
}
-TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
- Handle<Object> object = GetInfo(expr->CountBinOpFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
+Handle<Type> TypeFeedbackOracle::CountType(TypeFeedbackId id) {
+ Handle<Object> object = GetInfo(id);
+ if (!object->IsCode()) return handle(Type::None(), isolate_);
Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_binary_op_stub()) return unknown;
-
- BinaryOpIC::TypeInfo left_type, right_type, unused_result_type;
- BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
- &right_type, &unused_result_type);
- // CountOperations should always have +1 or -1 as their right input.
- ASSERT(right_type == BinaryOpIC::SMI ||
- right_type == BinaryOpIC::UNINITIALIZED);
-
- switch (left_type) {
- case BinaryOpIC::UNINITIALIZED:
- case BinaryOpIC::SMI:
- return TypeInfo::Smi();
- case BinaryOpIC::INT32:
- return TypeInfo::Integer32();
- case BinaryOpIC::NUMBER:
- return TypeInfo::Double();
- case BinaryOpIC::STRING:
- case BinaryOpIC::GENERIC:
- return unknown;
- default:
- return unknown;
+ ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
+ BinaryOpIC::State state(code->extended_extra_ic_state());
+ return state.GetLeftType(isolate());
+}
+
+
+void TypeFeedbackOracle::PropertyReceiverTypes(
+ TypeFeedbackId id, Handle<String> name,
+ SmallMapList* receiver_types, bool* is_prototype) {
+ receiver_types->Clear();
+ FunctionPrototypeStub proto_stub(Code::LOAD_IC);
+ *is_prototype = LoadIsStub(id, &proto_stub);
+ if (!*is_prototype) {
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, kNoExtraICState,
+ Code::NORMAL, Code::LOAD_IC);
+ CollectReceiverTypes(id, name, flags, receiver_types);
}
- UNREACHABLE();
- return unknown;
}
-void TypeFeedbackOracle::CollectPolymorphicMaps(Handle<Code> code,
- SmallMapList* types) {
- MapHandleList maps;
- code->FindAllMaps(&maps);
- types->Reserve(maps.length(), zone());
- for (int i = 0; i < maps.length(); i++) {
- Handle<Map> map(maps.at(i));
- if (!CanRetainOtherContext(*map, *native_context_)) {
- types->AddMapIfMissing(map, zone());
- }
+void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
+ TypeFeedbackId id, SmallMapList* receiver_types, bool* is_string) {
+ receiver_types->Clear();
+ *is_string = false;
+ if (LoadIsBuiltin(id, Builtins::kKeyedLoadIC_String)) {
+ *is_string = true;
+ } else {
+ CollectReceiverTypes(id, receiver_types);
}
}
+void TypeFeedbackOracle::AssignmentReceiverTypes(
+ TypeFeedbackId id, Handle<String> name, SmallMapList* receiver_types) {
+ receiver_types->Clear();
+ Code::Flags flags = Code::ComputeFlags(
+ Code::HANDLER, MONOMORPHIC, kNoExtraICState,
+ Code::NORMAL, Code::STORE_IC);
+ CollectReceiverTypes(id, name, flags, receiver_types);
+}
+
+
+void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
+ TypeFeedbackId id, SmallMapList* receiver_types,
+ KeyedAccessStoreMode* store_mode) {
+ receiver_types->Clear();
+ CollectReceiverTypes(id, receiver_types);
+ *store_mode = GetStoreMode(id);
+}
+
+
+void TypeFeedbackOracle::CountReceiverTypes(TypeFeedbackId id,
+ SmallMapList* receiver_types) {
+ receiver_types->Clear();
+ CollectReceiverTypes(id, receiver_types);
+}
+
+
void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
Handle<String> name,
Code::Flags flags,
@@ -464,24 +402,16 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
Handle<Object> object = GetInfo(ast_id);
if (object->IsUndefined() || object->IsSmi()) return;
- if (object.is_identical_to(isolate_->builtins()->StoreIC_GlobalProxy())) {
- // TODO(fschneider): We could collect the maps and signal that
- // we need a generic store (or load) here.
- ASSERT(Handle<Code>::cast(object)->ic_state() == GENERIC);
- } else if (object->IsMap()) {
- types->AddMapIfMissing(Handle<Map>::cast(object), zone());
- } else if (Handle<Code>::cast(object)->ic_state() == POLYMORPHIC ||
- Handle<Code>::cast(object)->ic_state() == MONOMORPHIC) {
- CollectPolymorphicMaps(Handle<Code>::cast(object), types);
- } else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
- Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
+ ASSERT(object->IsCode());
+ Handle<Code> code(Handle<Code>::cast(object));
+
+ if (FLAG_collect_megamorphic_maps_from_stub_cache &&
+ code->ic_state() == MEGAMORPHIC) {
types->Reserve(4, zone());
- ASSERT(object->IsCode());
- isolate_->stub_cache()->CollectMatchingMaps(types,
- name,
- flags,
- native_context_,
- zone());
+ isolate_->stub_cache()->CollectMatchingMaps(
+ types, name, flags, native_context_, zone());
+ } else {
+ CollectReceiverTypes(ast_id, types);
}
}
@@ -520,26 +450,26 @@ bool TypeFeedbackOracle::CanRetainOtherContext(JSFunction* function,
}
-void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
- SmallMapList* types) {
+void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
+ SmallMapList* types) {
Handle<Object> object = GetInfo(ast_id);
if (!object->IsCode()) return;
Handle<Code> code = Handle<Code>::cast(object);
- if (code->kind() == Code::KEYED_LOAD_IC ||
- code->kind() == Code::KEYED_STORE_IC) {
- CollectPolymorphicMaps(code, types);
+ MapHandleList maps;
+ if (code->ic_state() == MONOMORPHIC) {
+ Map* map = code->FindFirstMap();
+ if (map != NULL) maps.Add(handle(map));
+ } else if (code->ic_state() == POLYMORPHIC) {
+ code->FindAllMaps(&maps);
+ } else {
+ return;
}
-}
-
-
-void TypeFeedbackOracle::CollectPolymorphicStoreReceiverTypes(
- TypeFeedbackId ast_id,
- SmallMapList* types) {
- Handle<Object> object = GetInfo(ast_id);
- if (!object->IsCode()) return;
- Handle<Code> code = Handle<Code>::cast(object);
- if (code->kind() == Code::STORE_IC && code->ic_state() == POLYMORPHIC) {
- CollectPolymorphicMaps(code, types);
+ types->Reserve(maps.length(), zone());
+ for (int i = 0; i < maps.length(); i++) {
+ Handle<Map> map(maps.at(i));
+ if (!CanRetainOtherContext(*map, *native_context_)) {
+ types->AddMapIfMissing(map, zone());
+ }
}
}
@@ -609,37 +539,17 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
TypeFeedbackId(static_cast<unsigned>((*infos)[i].data()));
Code* target = Code::GetCodeFromTargetAddress(target_address);
switch (target->kind()) {
+ case Code::CALL_IC:
+ if (target->ic_state() == MONOMORPHIC &&
+ target->check_type() != RECEIVER_MAP_CHECK) {
+ SetInfo(ast_id, Smi::FromInt(target->check_type()));
+ break;
+ }
case Code::LOAD_IC:
case Code::STORE_IC:
- case Code::CALL_IC:
case Code::KEYED_CALL_IC:
- if (target->ic_state() == MONOMORPHIC) {
- if (target->kind() == Code::CALL_IC &&
- target->check_type() != RECEIVER_MAP_CHECK) {
- SetInfo(ast_id, Smi::FromInt(target->check_type()));
- } else {
- Object* map = target->FindFirstMap();
- if (map == NULL) {
- SetInfo(ast_id, static_cast<Object*>(target));
- } else if (!CanRetainOtherContext(Map::cast(map),
- *native_context_)) {
- Map* feedback = Map::cast(map)->CurrentMapForDeprecated();
- if (feedback != NULL) SetInfo(ast_id, feedback);
- }
- }
- } else {
- SetInfo(ast_id, target);
- }
- break;
-
case Code::KEYED_LOAD_IC:
case Code::KEYED_STORE_IC:
- if (target->ic_state() == MONOMORPHIC ||
- target->ic_state() == POLYMORPHIC) {
- SetInfo(ast_id, target);
- }
- break;
-
case Code::BINARY_OP_IC:
case Code::COMPARE_IC:
case Code::TO_BOOLEAN_IC:
diff --git a/chromium/v8/src/type-info.h b/chromium/v8/src/type-info.h
index 4b376c84bdc..0ff99e994d9 100644
--- a/chromium/v8/src/type-info.h
+++ b/chromium/v8/src/type-info.h
@@ -218,20 +218,9 @@ enum StringStubFeedback {
// Forward declarations.
-// TODO(rossberg): these should all go away eventually.
-class Assignment;
-class Call;
-class CallNew;
-class CaseClause;
class CompilationInfo;
-class CountOperation;
-class Expression;
-class ForInStatement;
class ICStub;
-class Property;
class SmallMapList;
-class ObjectLiteral;
-class ObjectLiteralProperty;
class TypeFeedbackOracle: public ZoneObject {
@@ -241,55 +230,58 @@ class TypeFeedbackOracle: public ZoneObject {
Isolate* isolate,
Zone* zone);
- bool LoadIsMonomorphicNormal(Property* expr);
- bool LoadIsUninitialized(Property* expr);
- bool LoadIsPolymorphic(Property* expr);
- bool StoreIsUninitialized(TypeFeedbackId ast_id);
- bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id);
- bool StoreIsKeyedPolymorphic(TypeFeedbackId ast_id);
- bool CallIsMonomorphic(Call* expr);
- bool CallNewIsMonomorphic(CallNew* expr);
- bool ObjectLiteralStoreIsMonomorphic(ObjectLiteralProperty* prop);
+ bool LoadIsUninitialized(TypeFeedbackId id);
+ bool LoadIsPreMonomorphic(TypeFeedbackId id);
+ bool StoreIsUninitialized(TypeFeedbackId id);
+ bool StoreIsPreMonomorphic(TypeFeedbackId id);
+ bool StoreIsKeyedPolymorphic(TypeFeedbackId id);
+ bool CallIsMonomorphic(TypeFeedbackId aid);
+ bool KeyedArrayCallIsHoley(TypeFeedbackId id);
+ bool CallNewIsMonomorphic(TypeFeedbackId id);
// TODO(1571) We can't use ForInStatement::ForInType as the return value due
// to various cycles in our headers.
- byte ForInType(ForInStatement* expr);
+ // TODO(rossberg): once all oracle access is removed from ast.cc, it should
+ // be possible.
+ byte ForInType(TypeFeedbackId id);
- Handle<Map> LoadMonomorphicReceiverType(Property* expr);
- Handle<Map> StoreMonomorphicReceiverType(TypeFeedbackId id);
+ KeyedAccessStoreMode GetStoreMode(TypeFeedbackId id);
- KeyedAccessStoreMode GetStoreMode(TypeFeedbackId ast_id);
-
- void LoadReceiverTypes(Property* expr,
- Handle<String> name,
- SmallMapList* types);
- void StoreReceiverTypes(Assignment* expr,
- Handle<String> name,
- SmallMapList* types);
- void CallReceiverTypes(Call* expr,
+ void CallReceiverTypes(TypeFeedbackId id,
Handle<String> name,
+ int arity,
CallKind call_kind,
SmallMapList* types);
- void CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
- SmallMapList* types);
- void CollectPolymorphicStoreReceiverTypes(TypeFeedbackId ast_id,
- SmallMapList* types);
+ void PropertyReceiverTypes(TypeFeedbackId id,
+ Handle<String> name,
+ SmallMapList* receiver_types,
+ bool* is_prototype);
+ void KeyedPropertyReceiverTypes(TypeFeedbackId id,
+ SmallMapList* receiver_types,
+ bool* is_string);
+ void AssignmentReceiverTypes(TypeFeedbackId id,
+ Handle<String> name,
+ SmallMapList* receiver_types);
+ void KeyedAssignmentReceiverTypes(TypeFeedbackId id,
+ SmallMapList* receiver_types,
+ KeyedAccessStoreMode* store_mode);
+ void CountReceiverTypes(TypeFeedbackId id,
+ SmallMapList* receiver_types);
+
+ void CollectReceiverTypes(TypeFeedbackId id,
+ SmallMapList* types);
static bool CanRetainOtherContext(Map* map, Context* native_context);
static bool CanRetainOtherContext(JSFunction* function,
Context* native_context);
- void CollectPolymorphicMaps(Handle<Code> code, SmallMapList* types);
+ CheckType GetCallCheckType(TypeFeedbackId id);
+ Handle<JSFunction> GetCallTarget(TypeFeedbackId id);
+ Handle<JSFunction> GetCallNewTarget(TypeFeedbackId id);
+ Handle<Cell> GetCallNewAllocationInfoCell(TypeFeedbackId id);
- CheckType GetCallCheckType(Call* expr);
- Handle<JSFunction> GetCallTarget(Call* expr);
- Handle<JSFunction> GetCallNewTarget(CallNew* expr);
- Handle<Cell> GetCallNewAllocationInfoCell(CallNew* expr);
-
- Handle<Map> GetObjectLiteralStoreMap(ObjectLiteralProperty* prop);
-
- bool LoadIsBuiltin(Property* expr, Builtins::Name id);
- bool LoadIsStub(Property* expr, ICStub* stub);
+ bool LoadIsBuiltin(TypeFeedbackId id, Builtins::Name builtin_id);
+ bool LoadIsStub(TypeFeedbackId id, ICStub* stub);
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
// of various cycles in our headers. Death to tons of implementations in
@@ -301,27 +293,28 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* result,
- Maybe<int>* fixed_right_arg);
+ Maybe<int>* fixed_right_arg,
+ Token::Value operation);
void CompareType(TypeFeedbackId id,
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* combined);
- Handle<Type> ClauseType(TypeFeedbackId id);
+ Handle<Type> CountType(TypeFeedbackId id);
- TypeInfo IncrementType(CountOperation* expr);
+ Handle<Type> ClauseType(TypeFeedbackId id);
Zone* zone() const { return zone_; }
Isolate* isolate() const { return isolate_; }
private:
- void CollectReceiverTypes(TypeFeedbackId ast_id,
+ void CollectReceiverTypes(TypeFeedbackId id,
Handle<String> name,
Code::Flags flags,
SmallMapList* types);
- void SetInfo(TypeFeedbackId ast_id, Object* target);
+ void SetInfo(TypeFeedbackId id, Object* target);
void BuildDictionary(Handle<Code> code);
void GetRelocInfos(Handle<Code> code, ZoneList<RelocInfo>* infos);
@@ -334,10 +327,10 @@ class TypeFeedbackOracle: public ZoneObject {
// Returns an element from the backing store. Returns undefined if
// there is no information.
- Handle<Object> GetInfo(TypeFeedbackId ast_id);
+ Handle<Object> GetInfo(TypeFeedbackId id);
// Return the cell that contains type feedback.
- Handle<Cell> GetInfoCell(TypeFeedbackId ast_id);
+ Handle<Cell> GetInfoCell(TypeFeedbackId id);
private:
Handle<Context> native_context_;
diff --git a/chromium/v8/src/typedarray.js b/chromium/v8/src/typedarray.js
index ec9849df699..21dd9c82d14 100644
--- a/chromium/v8/src/typedarray.js
+++ b/chromium/v8/src/typedarray.js
@@ -30,78 +30,105 @@
// This file relies on the fact that the following declaration has been made
// in runtime.js:
// var $Array = global.Array;
-
+var $ArrayBuffer = global.ArrayBuffer;
// --------------- Typed Arrays ---------------------
+macro TYPED_ARRAYS(FUNCTION)
+// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
+FUNCTION(1, Uint8Array, 1)
+FUNCTION(2, Int8Array, 1)
+FUNCTION(3, Uint16Array, 2)
+FUNCTION(4, Int16Array, 2)
+FUNCTION(5, Uint32Array, 4)
+FUNCTION(6, Int32Array, 4)
+FUNCTION(7, Float32Array, 4)
+FUNCTION(8, Float64Array, 8)
+FUNCTION(9, Uint8ClampedArray, 1)
+endmacro
+
+macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
+ function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
+ var bufferByteLength = buffer.byteLength;
+ var offset;
+ if (IS_UNDEFINED(byteOffset)) {
+ offset = 0;
+ } else {
+ offset = ToPositiveInteger(byteOffset, "invalid_typed_array_length");
-function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
- function ConstructByArrayBuffer(obj, buffer, byteOffset, length) {
- var offset = ToPositiveInteger(byteOffset, "invalid_typed_array_length")
-
- if (offset % elementSize !== 0) {
- throw MakeRangeError("invalid_typed_array_alignment",
- "start offset", name, elementSize);
- }
- var bufferByteLength = %ArrayBufferGetByteLength(buffer);
- if (offset > bufferByteLength) {
- throw MakeRangeError("invalid_typed_array_offset");
+ if (offset % ELEMENT_SIZE !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ "start offset", "NAME", ELEMENT_SIZE);
+ }
+ if (offset > bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_offset");
+ }
}
var newByteLength;
var newLength;
if (IS_UNDEFINED(length)) {
- if (bufferByteLength % elementSize !== 0) {
+ if (bufferByteLength % ELEMENT_SIZE !== 0) {
throw MakeRangeError("invalid_typed_array_alignment",
- "byte length", name, elementSize);
+ "byte length", "NAME", ELEMENT_SIZE);
}
newByteLength = bufferByteLength - offset;
- newLength = newByteLength / elementSize;
+ newLength = newByteLength / ELEMENT_SIZE;
} else {
var newLength = ToPositiveInteger(length, "invalid_typed_array_length");
- newByteLength = newLength * elementSize;
+ newByteLength = newLength * ELEMENT_SIZE;
}
- if (offset + newByteLength > bufferByteLength) {
+ if ((offset + newByteLength > bufferByteLength)
+ || (newLength > %MaxSmi())) {
throw MakeRangeError("invalid_typed_array_length");
}
- %TypedArrayInitialize(obj, arrayId, buffer, offset, newByteLength);
+ %TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength);
}
- function ConstructByLength(obj, length) {
- var l = ToPositiveInteger(length, "invalid_typed_array_length");
+ function NAMEConstructByLength(obj, length) {
+ var l = IS_UNDEFINED(length) ?
+ 0 : ToPositiveInteger(length, "invalid_typed_array_length");
if (l > %MaxSmi()) {
throw MakeRangeError("invalid_typed_array_length");
}
- var byteLength = l * elementSize;
- var buffer = new global.ArrayBuffer(byteLength);
- %TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
+ var byteLength = l * ELEMENT_SIZE;
+ var buffer = new $ArrayBuffer(byteLength);
+ %TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength);
}
- function ConstructByArrayLike(obj, arrayLike) {
+ function NAMEConstructByArrayLike(obj, arrayLike) {
var length = arrayLike.length;
- var l = ToPositiveInteger(length, "invalid_typed_array_length");
- if(!%TypedArrayInitializeFromArrayLike(obj, arrayId, arrayLike, l)) {
+ var l = ToPositiveInteger(length, "invalid_typed_array_length");
+ if (l > %MaxSmi()) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ if(!%TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l)) {
for (var i = 0; i < l; i++) {
+ // It is crucial that we let any execptions from arrayLike[i]
+ // propagate outside the function.
obj[i] = arrayLike[i];
}
}
}
- return function (arg1, arg2, arg3) {
+ function NAMEConstructor(arg1, arg2, arg3) {
+
if (%_IsConstructCall()) {
if (IS_ARRAYBUFFER(arg1)) {
- ConstructByArrayBuffer(this, arg1, arg2, arg3);
+ NAMEConstructByArrayBuffer(this, arg1, arg2, arg3);
} else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
- ConstructByLength(this, arg1);
+ NAMEConstructByLength(this, arg1);
} else {
- ConstructByArrayLike(this, arg1);
+ NAMEConstructByArrayLike(this, arg1);
}
} else {
- throw MakeTypeError("constructor_not_function", [name])
+ throw MakeTypeError("constructor_not_function", ["NAME"])
}
}
-}
+endmacro
+
+TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR)
function TypedArrayGetBuffer() {
return %TypedArrayGetBuffer(this);
@@ -248,10 +275,8 @@ function TypedArraySet(obj, offset) {
// -------------------------------------------------------------------
-function SetupTypedArray(arrayId, name, constructor, elementSize) {
+function SetupTypedArray(constructor, fun, elementSize) {
%CheckIsBootstrapping();
- var fun = CreateTypedArrayConstructor(name, elementSize,
- arrayId, constructor);
%SetCode(constructor, fun);
%FunctionSetPrototype(constructor, new $Object());
@@ -273,17 +298,11 @@ function SetupTypedArray(arrayId, name, constructor, elementSize) {
));
}
-// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
-SetupTypedArray(1, "Uint8Array", global.Uint8Array, 1);
-SetupTypedArray(2, "Int8Array", global.Int8Array, 1);
-SetupTypedArray(3, "Uint16Array", global.Uint16Array, 2);
-SetupTypedArray(4, "Int16Array", global.Int16Array, 2);
-SetupTypedArray(5, "Uint32Array", global.Uint32Array, 4);
-SetupTypedArray(6, "Int32Array", global.Int32Array, 4);
-SetupTypedArray(7, "Float32Array", global.Float32Array, 4);
-SetupTypedArray(8, "Float64Array", global.Float64Array, 8);
-SetupTypedArray(9, "Uint8ClampedArray", global.Uint8ClampedArray, 1);
+macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
+ SetupTypedArray (global.NAME, NAMEConstructor, ELEMENT_SIZE);
+endmacro
+TYPED_ARRAYS(SETUP_TYPED_ARRAY)
// --------------------------- DataView -----------------------------
@@ -294,8 +313,9 @@ function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
if (!IS_ARRAYBUFFER(buffer)) {
throw MakeTypeError('data_view_not_array_buffer', []);
}
- var bufferByteLength = %ArrayBufferGetByteLength(buffer);
- var offset = ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
+ var bufferByteLength = buffer.byteLength;
+ var offset = IS_UNDEFINED(byteOffset) ?
+ 0 : ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
if (offset > bufferByteLength) {
throw MakeRangeError('invalid_data_view_offset');
}
@@ -334,225 +354,52 @@ function DataViewGetByteLength() {
return %DataViewGetByteLength(this);
}
+macro DATA_VIEW_TYPES(FUNCTION)
+ FUNCTION(Int8)
+ FUNCTION(Uint8)
+ FUNCTION(Int16)
+ FUNCTION(Uint16)
+ FUNCTION(Int32)
+ FUNCTION(Uint32)
+ FUNCTION(Float32)
+ FUNCTION(Float64)
+endmacro
+
function ToPositiveDataViewOffset(offset) {
return ToPositiveInteger(offset, 'invalid_data_view_accessor_offset');
}
-function DataViewGetInt8(offset, little_endian) {
+
+macro DATA_VIEW_GETTER_SETTER(TYPENAME)
+function DataViewGetTYPENAME(offset, little_endian) {
if (!IS_DATAVIEW(this)) {
throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getInt8', this]);
+ ['DataView.getTYPENAME', this]);
}
if (%_ArgumentsLength() < 1) {
throw MakeTypeError('invalid_argument');
}
- return %DataViewGetInt8(this,
+ return %DataViewGetTYPENAME(this,
ToPositiveDataViewOffset(offset),
!!little_endian);
}
-function DataViewSetInt8(offset, value, little_endian) {
+function DataViewSetTYPENAME(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setInt8', this]);
+ ['DataView.setTYPENAME', this]);
}
if (%_ArgumentsLength() < 2) {
throw MakeTypeError('invalid_argument');
}
- %DataViewSetInt8(this,
+ %DataViewSetTYPENAME(this,
ToPositiveDataViewOffset(offset),
TO_NUMBER_INLINE(value),
!!little_endian);
}
+endmacro
-function DataViewGetUint8(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getUint8', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetUint8(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetUint8(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setUint8', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetUint8(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
-
-function DataViewGetInt16(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getInt16', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetInt16(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetInt16(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setInt16', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetInt16(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
-
-function DataViewGetUint16(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getUint16', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetUint16(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetUint16(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setUint16', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetUint16(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
-
-function DataViewGetInt32(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getInt32', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetInt32(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetInt32(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setInt32', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetInt32(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
-
-function DataViewGetUint32(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getUint32', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetUint32(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetUint32(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setUint32', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetUint32(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
-
-function DataViewGetFloat32(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getFloat32', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetFloat32(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetFloat32(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setFloat32', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetFloat32(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
-
-function DataViewGetFloat64(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.getFloat64', this]);
- }
- if (%_ArgumentsLength() < 1) {
- throw MakeTypeError('invalid_argument');
- }
- return %DataViewGetFloat64(this,
- ToPositiveDataViewOffset(offset),
- !!little_endian);
-}
-
-function DataViewSetFloat64(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['DataView.setFloat64', this]);
- }
- if (%_ArgumentsLength() < 2) {
- throw MakeTypeError('invalid_argument');
- }
- %DataViewSetFloat64(this,
- ToPositiveDataViewOffset(offset),
- TO_NUMBER_INLINE(value),
- !!little_endian);
-}
+DATA_VIEW_TYPES(DATA_VIEW_GETTER_SETTER)
function SetupDataView() {
%CheckIsBootstrapping();
diff --git a/chromium/v8/src/types.cc b/chromium/v8/src/types.cc
index 70ddccd6a74..485ba885187 100644
--- a/chromium/v8/src/types.cc
+++ b/chromium/v8/src/types.cc
@@ -70,23 +70,23 @@ Handle<Type> Type::Iterator<T>::get_type() {
}
template<>
-Handle<Map> Type::Iterator<Map>::Current() {
+Handle<i::Map> Type::Iterator<i::Map>::Current() {
return get_type()->as_class();
}
template<>
-Handle<v8::internal::Object> Type::Iterator<v8::internal::Object>::Current() {
+Handle<i::Object> Type::Iterator<i::Object>::Current() {
return get_type()->as_constant();
}
template<>
-bool Type::Iterator<Map>::matches(Handle<Type> type) {
+bool Type::Iterator<i::Map>::matches(Handle<Type> type) {
return type->is_class();
}
template<>
-bool Type::Iterator<v8::internal::Object>::matches(Handle<Type> type) {
+bool Type::Iterator<i::Object>::matches(Handle<Type> type) {
return type->is_constant();
}
@@ -105,8 +105,8 @@ void Type::Iterator<T>::Advance() {
index_ = -1;
}
-template class Type::Iterator<Map>;
-template class Type::Iterator<v8::internal::Object>;
+template class Type::Iterator<i::Map>;
+template class Type::Iterator<i::Object>;
// Get the smallest bitset subsuming this type.
@@ -120,98 +120,112 @@ int Type::LubBitset() {
bitset |= union_get(unioned, i)->LubBitset();
}
return bitset;
+ } else if (this->is_class()) {
+ return LubBitset(*this->as_class());
} else {
- Map* map = NULL;
- if (this->is_class()) {
- map = *this->as_class();
- } else {
- Handle<v8::internal::Object> value = this->as_constant();
- if (value->IsSmi()) return kSmi;
- map = HeapObject::cast(*value)->map();
- if (map->instance_type() == ODDBALL_TYPE) {
- if (value->IsUndefined()) return kUndefined;
- if (value->IsNull()) return kNull;
- if (value->IsTrue() || value->IsFalse()) return kBoolean;
- if (value->IsTheHole()) return kAny;
- }
- }
- switch (map->instance_type()) {
- case STRING_TYPE:
- case ASCII_STRING_TYPE:
- case CONS_STRING_TYPE:
- case CONS_ASCII_STRING_TYPE:
- case SLICED_STRING_TYPE:
- case SLICED_ASCII_STRING_TYPE:
- case EXTERNAL_STRING_TYPE:
- case EXTERNAL_ASCII_STRING_TYPE:
- case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_STRING_TYPE:
- case SHORT_EXTERNAL_ASCII_STRING_TYPE:
- case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case INTERNALIZED_STRING_TYPE:
- case ASCII_INTERNALIZED_STRING_TYPE:
- case CONS_INTERNALIZED_STRING_TYPE:
- case CONS_ASCII_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return kString;
- case SYMBOL_TYPE:
- return kSymbol;
- case ODDBALL_TYPE:
- return kOddball;
- case HEAP_NUMBER_TYPE:
- return kDouble;
- case JS_VALUE_TYPE:
- case JS_DATE_TYPE:
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_ARRAY_BUFFER_TYPE:
- case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE:
- case JS_SET_TYPE:
- case JS_MAP_TYPE:
- case JS_WEAK_MAP_TYPE:
- case JS_WEAK_SET_TYPE:
- if (map->is_undetectable()) return kUndetectable;
- return kOtherObject;
- case JS_ARRAY_TYPE:
- return kArray;
- case JS_FUNCTION_TYPE:
- return kFunction;
- case JS_REGEXP_TYPE:
- return kRegExp;
- case JS_PROXY_TYPE:
- case JS_FUNCTION_PROXY_TYPE:
- return kProxy;
- case MAP_TYPE:
- // When compiling stub templates, the meta map is used as a place holder
- // for the actual map with which the template is later instantiated.
- // We treat it as a kind of type variable whose upper bound is Any.
- // TODO(rossberg): for caching of CompareNilIC stubs to work correctly,
- // we must exclude Undetectable here. This makes no sense, really,
- // because it means that the template isn't actually parametric.
- // Also, it doesn't apply elsewhere. 8-(
- // We ought to find a cleaner solution for compiling stubs parameterised
- // over type or class variables, esp ones with bounds...
- return kDetectable;
- case DECLARED_ACCESSOR_INFO_TYPE:
- case EXECUTABLE_ACCESSOR_INFO_TYPE:
- case ACCESSOR_PAIR_TYPE:
- case FIXED_ARRAY_TYPE:
- return kInternal;
- default:
- UNREACHABLE();
- return kNone;
- }
+ return LubBitset(*this->as_constant());
+ }
+}
+
+
+int Type::LubBitset(i::Object* value) {
+ if (value->IsSmi()) return kSmi;
+ i::Map* map = i::HeapObject::cast(value)->map();
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
+ int32_t i;
+ uint32_t u;
+ if (value->ToInt32(&i)) return Smi::IsValid(i) ? kSmi : kOtherSigned32;
+ if (value->ToUint32(&u)) return kUnsigned32;
+ return kDouble;
+ }
+ if (map->instance_type() == ODDBALL_TYPE) {
+ if (value->IsUndefined()) return kUndefined;
+ if (value->IsNull()) return kNull;
+ if (value->IsBoolean()) return kBoolean;
+ if (value->IsTheHole()) return kAny; // TODO(rossberg): kNone?
+ UNREACHABLE();
+ }
+ return Type::LubBitset(map);
+}
+
+
+int Type::LubBitset(i::Map* map) {
+ switch (map->instance_type()) {
+ case STRING_TYPE:
+ case ASCII_STRING_TYPE:
+ case CONS_STRING_TYPE:
+ case CONS_ASCII_STRING_TYPE:
+ case SLICED_STRING_TYPE:
+ case SLICED_ASCII_STRING_TYPE:
+ case EXTERNAL_STRING_TYPE:
+ case EXTERNAL_ASCII_STRING_TYPE:
+ case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_STRING_TYPE:
+ case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+ case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case INTERNALIZED_STRING_TYPE:
+ case ASCII_INTERNALIZED_STRING_TYPE:
+ case CONS_INTERNALIZED_STRING_TYPE:
+ case CONS_ASCII_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ return kString;
+ case SYMBOL_TYPE:
+ return kSymbol;
+ case ODDBALL_TYPE:
+ return kOddball;
+ case HEAP_NUMBER_TYPE:
+ return kDouble;
+ case JS_VALUE_TYPE:
+ case JS_DATE_TYPE:
+ case JS_OBJECT_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_BUILTINS_OBJECT_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_ARRAY_BUFFER_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ if (map->is_undetectable()) return kUndetectable;
+ return kOtherObject;
+ case JS_ARRAY_TYPE:
+ return kArray;
+ case JS_FUNCTION_TYPE:
+ return kFunction;
+ case JS_REGEXP_TYPE:
+ return kRegExp;
+ case JS_PROXY_TYPE:
+ case JS_FUNCTION_PROXY_TYPE:
+ return kProxy;
+ case MAP_TYPE:
+ // When compiling stub templates, the meta map is used as a place holder
+ // for the actual map with which the template is later instantiated.
+ // We treat it as a kind of type variable whose upper bound is Any.
+ // TODO(rossberg): for caching of CompareNilIC stubs to work correctly,
+ // we must exclude Undetectable here. This makes no sense, really,
+ // because it means that the template isn't actually parametric.
+ // Also, it doesn't apply elsewhere. 8-(
+ // We ought to find a cleaner solution for compiling stubs parameterised
+ // over type or class variables, esp ones with bounds...
+ return kDetectable;
+ case DECLARED_ACCESSOR_INFO_TYPE:
+ case EXECUTABLE_ACCESSOR_INFO_TYPE:
+ case ACCESSOR_PAIR_TYPE:
+ case FIXED_ARRAY_TYPE:
+ return kInternal;
+ default:
+ UNREACHABLE();
+ return kNone;
}
}
@@ -229,9 +243,22 @@ int Type::GlbBitset() {
}
+// Most precise _current_ type of a value (usually its class).
+Type* Type::OfCurrently(Handle<i::Object> value) {
+ if (value->IsSmi()) return Smi();
+ i::Map* map = i::HeapObject::cast(*value)->map();
+ if (map->instance_type() == HEAP_NUMBER_TYPE ||
+ map->instance_type() == ODDBALL_TYPE) {
+ return Type::Of(value);
+ }
+ return Class(i::handle(map));
+}
+
+
// Check this <= that.
-bool Type::IsSlowCase(Type* that) {
+bool Type::SlowIs(Type* that) {
// Fast path for bitsets.
+ if (this->is_none()) return true;
if (that->is_bitset()) {
return (this->LubBitset() | that->as_bitset()) == that->as_bitset();
}
@@ -270,6 +297,14 @@ bool Type::IsSlowCase(Type* that) {
}
+bool Type::IsCurrently(Type* that) {
+ return this->Is(that) ||
+ (this->is_constant() && that->is_class() &&
+ this->as_constant()->IsHeapObject() &&
+ i::HeapObject::cast(*this->as_constant())->map() == *that->as_class());
+}
+
+
// Check this overlaps that.
bool Type::Maybe(Type* that) {
// Fast path for bitsets.
@@ -365,11 +400,11 @@ Type* Type::Union(Handle<Type> type1, Handle<Type> type2) {
Isolate* isolate = NULL;
int size = type1->is_bitset() || type2->is_bitset() ? 1 : 0;
if (!type1->is_bitset()) {
- isolate = HeapObject::cast(*type1)->GetIsolate();
+ isolate = i::HeapObject::cast(*type1)->GetIsolate();
size += (type1->is_union() ? type1->as_union()->length() : 1);
}
if (!type2->is_bitset()) {
- isolate = HeapObject::cast(*type2)->GetIsolate();
+ isolate = i::HeapObject::cast(*type2)->GetIsolate();
size += (type2->is_union() ? type2->as_union()->length() : 1);
}
ASSERT(isolate != NULL);
@@ -441,11 +476,11 @@ Type* Type::Intersect(Handle<Type> type1, Handle<Type> type2) {
Isolate* isolate = NULL;
int size = 0;
if (!type1->is_bitset()) {
- isolate = HeapObject::cast(*type1)->GetIsolate();
+ isolate = i::HeapObject::cast(*type1)->GetIsolate();
size = (type1->is_union() ? type1->as_union()->length() : 2);
}
if (!type2->is_bitset()) {
- isolate = HeapObject::cast(*type2)->GetIsolate();
+ isolate = i::HeapObject::cast(*type2)->GetIsolate();
int size2 = (type2->is_union() ? type2->as_union()->length() : 2);
size = (size == 0 ? size2 : Min(size, size2));
}
@@ -498,38 +533,52 @@ void Type::TypePrint() {
}
+const char* Type::bitset_name(int bitset) {
+ switch (bitset) {
+ #define PRINT_COMPOSED_TYPE(type, value) case k##type: return #type;
+ BITSET_TYPE_LIST(PRINT_COMPOSED_TYPE)
+ #undef PRINT_COMPOSED_TYPE
+ default:
+ return NULL;
+ }
+}
+
+
void Type::TypePrint(FILE* out) {
if (is_bitset()) {
- int val = as_bitset();
- const char* composed_name = GetComposedName(val);
- if (composed_name != NULL) {
- PrintF(out, "%s", composed_name);
- return;
- }
- bool first_entry = true;
- PrintF(out, "{");
- for (unsigned i = 0; i < sizeof(val)*8; ++i) {
- int mask = (1 << i);
- if ((val & mask) != 0) {
- if (!first_entry) PrintF(out, ",");
- first_entry = false;
- PrintF(out, "%s", GetPrimitiveName(mask));
+ int bitset = as_bitset();
+ const char* name = bitset_name(bitset);
+ if (name != NULL) {
+ PrintF(out, "%s", name);
+ } else {
+ bool is_first = true;
+ PrintF(out, "(");
+ for (int mask = 1; mask != 0; mask = mask << 1) {
+ if ((bitset & mask) != 0) {
+ if (!is_first) PrintF(out, " | ");
+ is_first = false;
+ PrintF(out, "%s", bitset_name(mask));
+ }
}
+ PrintF(out, ")");
}
- PrintF(out, "}");
} else if (is_constant()) {
- PrintF(out, "Constant(%p)", static_cast<void*>(*as_constant()));
+ PrintF(out, "Constant(%p : ", static_cast<void*>(*as_constant()));
+ from_bitset(LubBitset())->TypePrint(out);
+ PrintF(")");
} else if (is_class()) {
- PrintF(out, "Class(%p)", static_cast<void*>(*as_class()));
+ PrintF(out, "Class(%p < ", static_cast<void*>(*as_class()));
+ from_bitset(LubBitset())->TypePrint(out);
+ PrintF(")");
} else if (is_union()) {
- PrintF(out, "{");
+ PrintF(out, "(");
Handle<Unioned> unioned = as_union();
for (int i = 0; i < unioned->length(); ++i) {
Handle<Type> type_i = union_get(unioned, i);
- if (i > 0) PrintF(out, ",");
+ if (i > 0) PrintF(out, " | ");
type_i->TypePrint(out);
}
- PrintF(out, "}");
+ PrintF(out, ")");
}
}
#endif
diff --git a/chromium/v8/src/types.h b/chromium/v8/src/types.h
index 2810ffc8a17..1dc79dd6b33 100644
--- a/chromium/v8/src/types.h
+++ b/chromium/v8/src/types.h
@@ -95,7 +95,7 @@ namespace internal {
// a concurrent compilation context.
-#define PRIMITIVE_TYPE_LIST(V) \
+#define BITSET_TYPE_LIST(V) \
V(None, 0) \
V(Null, 1 << 0) \
V(Undefined, 1 << 1) \
@@ -113,9 +113,8 @@ namespace internal {
V(RegExp, 1 << 13) \
V(OtherObject, 1 << 14) \
V(Proxy, 1 << 15) \
- V(Internal, 1 << 16)
-
-#define COMPOSED_TYPE_LIST(V) \
+ V(Internal, 1 << 16) \
+ \
V(Oddball, kBoolean | kNull | kUndefined) \
V(Signed32, kSmi | kOtherSigned32) \
V(Number, kSigned32 | kUnsigned32 | kDouble) \
@@ -128,26 +127,22 @@ namespace internal {
V(Receiver, kObject | kProxy) \
V(Allocated, kDouble | kName | kReceiver) \
V(Any, kOddball | kNumber | kAllocated | kInternal) \
+ V(NonNumber, kAny - kNumber) \
V(Detectable, kAllocated - kUndetectable)
-#define TYPE_LIST(V) \
- PRIMITIVE_TYPE_LIST(V) \
- COMPOSED_TYPE_LIST(V)
-
-
class Type : public Object {
public:
#define DEFINE_TYPE_CONSTRUCTOR(type, value) \
static Type* type() { return from_bitset(k##type); }
- TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
+ BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
#undef DEFINE_TYPE_CONSTRUCTOR
- static Type* Class(Handle<Map> map) { return from_handle(map); }
- static Type* Constant(Handle<HeapObject> value) {
+ static Type* Class(Handle<i::Map> map) { return from_handle(map); }
+ static Type* Constant(Handle<i::HeapObject> value) {
return Constant(value, value->GetIsolate());
}
- static Type* Constant(Handle<v8::internal::Object> value, Isolate* isolate) {
+ static Type* Constant(Handle<i::Object> value, Isolate* isolate) {
return from_handle(isolate->factory()->NewBox(value));
}
@@ -155,15 +150,25 @@ class Type : public Object {
static Type* Intersect(Handle<Type> type1, Handle<Type> type2);
static Type* Optional(Handle<Type> type); // type \/ Undefined
- bool Is(Type* that) { return (this == that) ? true : IsSlowCase(that); }
+ static Type* Of(Handle<i::Object> value) {
+ return from_bitset(LubBitset(*value));
+ }
+
+ bool Is(Type* that) { return this == that || SlowIs(that); }
bool Is(Handle<Type> that) { return this->Is(*that); }
bool Maybe(Type* that);
bool Maybe(Handle<Type> that) { return this->Maybe(*that); }
+ // State-dependent versions of Of and Is that consider subtyping between
+ // a constant and its map class.
+ static Type* OfCurrently(Handle<i::Object> value);
+ bool IsCurrently(Type* that);
+ bool IsCurrently(Handle<Type> that) { return this->IsCurrently(*that); }
+
bool IsClass() { return is_class(); }
bool IsConstant() { return is_constant(); }
- Handle<Map> AsClass() { return as_class(); }
- Handle<v8::internal::Object> AsConstant() { return as_constant(); }
+ Handle<i::Map> AsClass() { return as_class(); }
+ Handle<i::Object> AsConstant() { return as_constant(); }
int NumClasses();
int NumConstants();
@@ -190,16 +195,16 @@ class Type : public Object {
int index_;
};
- Iterator<Map> Classes() {
- if (this->is_bitset()) return Iterator<Map>();
- return Iterator<Map>(this->handle());
+ Iterator<i::Map> Classes() {
+ if (this->is_bitset()) return Iterator<i::Map>();
+ return Iterator<i::Map>(this->handle());
}
- Iterator<v8::internal::Object> Constants() {
- if (this->is_bitset()) return Iterator<v8::internal::Object>();
- return Iterator<v8::internal::Object>(this->handle());
+ Iterator<i::Object> Constants() {
+ if (this->is_bitset()) return Iterator<i::Object>();
+ return Iterator<i::Object>(this->handle());
}
- static Type* cast(v8::internal::Object* object) {
+ static Type* cast(i::Object* object) {
Type* t = static_cast<Type*>(object);
ASSERT(t->is_bitset() || t->is_class() ||
t->is_constant() || t->is_union());
@@ -220,37 +225,38 @@ class Type : public Object {
enum {
#define DECLARE_TYPE(type, value) k##type = (value),
- TYPE_LIST(DECLARE_TYPE)
+ BITSET_TYPE_LIST(DECLARE_TYPE)
#undef DECLARE_TYPE
kUnusedEOL = 0
};
+ bool is_none() { return this == None(); }
bool is_bitset() { return this->IsSmi(); }
bool is_class() { return this->IsMap(); }
bool is_constant() { return this->IsBox(); }
bool is_union() { return this->IsFixedArray(); }
- bool IsSlowCase(Type* that);
+ bool SlowIs(Type* that);
int as_bitset() { return Smi::cast(this)->value(); }
- Handle<Map> as_class() { return Handle<Map>::cast(handle()); }
- Handle<v8::internal::Object> as_constant() {
- Handle<Box> box = Handle<Box>::cast(handle());
- return v8::internal::handle(box->value(), box->GetIsolate());
+ Handle<i::Map> as_class() { return Handle<i::Map>::cast(handle()); }
+ Handle<i::Object> as_constant() {
+ Handle<i::Box> box = Handle<i::Box>::cast(handle());
+ return i::handle(box->value(), box->GetIsolate());
}
Handle<Unioned> as_union() { return Handle<Unioned>::cast(handle()); }
Handle<Type> handle() { return handle_via_isolate_of(this); }
Handle<Type> handle_via_isolate_of(Type* type) {
ASSERT(type->IsHeapObject());
- return v8::internal::handle(this, HeapObject::cast(type)->GetIsolate());
+ return i::handle(this, i::HeapObject::cast(type)->GetIsolate());
}
static Type* from_bitset(int bitset) {
- return static_cast<Type*>(Object::cast(Smi::FromInt(bitset)));
+ return static_cast<Type*>(i::Object::cast(i::Smi::FromInt(bitset)));
}
- static Type* from_handle(Handle<HeapObject> handle) {
- return static_cast<Type*>(Object::cast(*handle));
+ static Type* from_handle(Handle<i::HeapObject> handle) {
+ return static_cast<Type*>(i::Object::cast(*handle));
}
static Handle<Type> union_get(Handle<Unioned> unioned, int i) {
@@ -261,34 +267,16 @@ class Type : public Object {
int LubBitset(); // least upper bound that's a bitset
int GlbBitset(); // greatest lower bound that's a bitset
+
+ static int LubBitset(i::Object* value);
+ static int LubBitset(i::Map* map);
+
bool InUnion(Handle<Unioned> unioned, int current_size);
int ExtendUnion(Handle<Unioned> unioned, int current_size);
int ExtendIntersection(
Handle<Unioned> unioned, Handle<Type> type, int current_size);
- static const char* GetComposedName(int type) {
- switch (type) {
- #define PRINT_COMPOSED_TYPE(type, value) \
- case k##type: \
- return # type;
- COMPOSED_TYPE_LIST(PRINT_COMPOSED_TYPE)
- #undef PRINT_COMPOSED_TYPE
- }
- return NULL;
- }
-
- static const char* GetPrimitiveName(int type) {
- switch (type) {
- #define PRINT_PRIMITIVE_TYPE(type, value) \
- case k##type: \
- return # type;
- PRIMITIVE_TYPE_LIST(PRINT_PRIMITIVE_TYPE)
- #undef PRINT_PRIMITIVE_TYPE
- default:
- UNREACHABLE();
- return "InvalidType";
- }
- }
+ static const char* bitset_name(int bitset);
};
@@ -298,10 +286,18 @@ struct Bounds {
Handle<Type> upper;
Bounds() {}
- Bounds(Handle<Type> l, Handle<Type> u) : lower(l), upper(u) {}
- Bounds(Type* l, Type* u, Isolate* isl) : lower(l, isl), upper(u, isl) {}
- explicit Bounds(Handle<Type> t) : lower(t), upper(t) {}
- Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) {}
+ Bounds(Handle<Type> l, Handle<Type> u) : lower(l), upper(u) {
+ ASSERT(lower->Is(upper));
+ }
+ Bounds(Type* l, Type* u, Isolate* isl) : lower(l, isl), upper(u, isl) {
+ ASSERT(lower->Is(upper));
+ }
+ explicit Bounds(Handle<Type> t) : lower(t), upper(t) {
+ ASSERT(lower->Is(upper));
+ }
+ Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) {
+ ASSERT(lower->Is(upper));
+ }
// Unrestricted bounds.
static Bounds Unbounded(Isolate* isl) {
@@ -310,9 +306,11 @@ struct Bounds {
// Meet: both b1 and b2 are known to hold.
static Bounds Both(Bounds b1, Bounds b2, Isolate* isl) {
- return Bounds(
- handle(Type::Union(b1.lower, b2.lower), isl),
- handle(Type::Intersect(b1.upper, b2.upper), isl));
+ Handle<Type> lower(Type::Union(b1.lower, b2.lower), isl);
+ Handle<Type> upper(Type::Intersect(b1.upper, b2.upper), isl);
+ // Lower bounds are considered approximate, correct as necessary.
+ lower = handle(Type::Intersect(lower, upper), isl);
+ return Bounds(lower, upper);
}
// Join: either b1 or b2 is known to hold.
@@ -323,10 +321,14 @@ struct Bounds {
}
static Bounds NarrowLower(Bounds b, Handle<Type> t, Isolate* isl) {
+ // Lower bounds are considered approximate, correct as necessary.
+ t = handle(Type::Intersect(t, b.upper), isl);
return Bounds(handle(Type::Union(b.lower, t), isl), b.upper);
}
static Bounds NarrowUpper(Bounds b, Handle<Type> t, Isolate* isl) {
- return Bounds(b.lower, handle(Type::Intersect(b.upper, t), isl));
+ return Bounds(
+ handle(Type::Intersect(b.lower, t), isl),
+ handle(Type::Intersect(b.upper, t), isl));
}
};
diff --git a/chromium/v8/src/typing.cc b/chromium/v8/src/typing.cc
index 34bb64bd7de..9458d6dc2fe 100644
--- a/chromium/v8/src/typing.cc
+++ b/chromium/v8/src/typing.cc
@@ -200,12 +200,17 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
for (int i = 0; i < clauses->length(); ++i) {
CaseClause* clause = clauses->at(i);
if (!clause->is_default())
- clause->RecordTypeFeedback(oracle());
+ clause->set_compare_type(oracle()->ClauseType(clause->CompareId()));
}
}
}
+void AstTyper::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+
void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
// Collect type feedback.
if (!stmt->cond()->ToBooleanIsTrue()) {
@@ -247,8 +252,8 @@ void AstTyper::VisitForStatement(ForStatement* stmt) {
RECURSE(Visit(stmt->cond()));
}
RECURSE(Visit(stmt->body()));
- store_.Forget(); // Control may transfer here via 'continue'.
if (stmt->next() != NULL) {
+ store_.Forget(); // Control may transfer here via 'continue'.
RECURSE(Visit(stmt->next()));
}
store_.Forget(); // Control may transfer here via termination or 'break'.
@@ -257,7 +262,8 @@ void AstTyper::VisitForStatement(ForStatement* stmt) {
void AstTyper::VisitForInStatement(ForInStatement* stmt) {
// Collect type feedback.
- stmt->RecordTypeFeedback(oracle());
+ stmt->set_for_in_type(static_cast<ForInStatement::ForInType>(
+ oracle()->ForInType(stmt->ForInFeedbackId())));
RECURSE(Visit(stmt->enumerable()));
store_.Forget(); // Control may transfer here via looping or 'continue'.
@@ -305,7 +311,7 @@ void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void AstTyper::VisitSharedFunctionInfoLiteral(SharedFunctionInfoLiteral* expr) {
+void AstTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
}
@@ -381,31 +387,34 @@ void AstTyper::VisitArrayLiteral(ArrayLiteral* expr) {
void AstTyper::VisitAssignment(Assignment* expr) {
- // TODO(rossberg): Can we clean this up?
- if (expr->is_compound()) {
- // Collect type feedback.
- Expression* target = expr->target();
- Property* prop = target->AsProperty();
- if (prop != NULL) {
- prop->RecordTypeFeedback(oracle(), zone());
- expr->RecordTypeFeedback(oracle(), zone());
- }
-
- RECURSE(Visit(expr->binary_operation()));
-
- NarrowType(expr, expr->binary_operation()->bounds());
- } else {
- // Collect type feedback.
- if (expr->target()->IsProperty()) {
- expr->RecordTypeFeedback(oracle(), zone());
+ // Collect type feedback.
+ Property* prop = expr->target()->AsProperty();
+ if (prop != NULL) {
+ TypeFeedbackId id = expr->AssignmentFeedbackId();
+ expr->set_is_uninitialized(oracle()->StoreIsUninitialized(id));
+ if (!expr->IsUninitialized()) {
+ expr->set_is_pre_monomorphic(oracle()->StoreIsPreMonomorphic(id));
+ if (prop->key()->IsPropertyName()) {
+ Literal* lit_key = prop->key()->AsLiteral();
+ ASSERT(lit_key != NULL && lit_key->value()->IsString());
+ Handle<String> name = Handle<String>::cast(lit_key->value());
+ oracle()->AssignmentReceiverTypes(id, name, expr->GetReceiverTypes());
+ } else {
+ KeyedAccessStoreMode store_mode;
+ oracle()->KeyedAssignmentReceiverTypes(
+ id, expr->GetReceiverTypes(), &store_mode);
+ expr->set_store_mode(store_mode);
+ }
+ ASSERT(!expr->IsPreMonomorphic() || !expr->IsMonomorphic());
}
-
- RECURSE(Visit(expr->target()));
- RECURSE(Visit(expr->value()));
-
- NarrowType(expr, expr->value()->bounds());
}
+ Expression* rhs =
+ expr->is_compound() ? expr->binary_operation() : expr->value();
+ RECURSE(Visit(expr->target()));
+ RECURSE(Visit(rhs));
+ NarrowType(expr, rhs->bounds());
+
VariableProxy* proxy = expr->target()->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsStackAllocated()) {
store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
@@ -431,7 +440,26 @@ void AstTyper::VisitThrow(Throw* expr) {
void AstTyper::VisitProperty(Property* expr) {
// Collect type feedback.
- expr->RecordTypeFeedback(oracle(), zone());
+ TypeFeedbackId id = expr->PropertyFeedbackId();
+ expr->set_is_uninitialized(oracle()->LoadIsUninitialized(id));
+ if (!expr->IsUninitialized()) {
+ expr->set_is_pre_monomorphic(oracle()->LoadIsPreMonomorphic(id));
+ if (expr->key()->IsPropertyName()) {
+ Literal* lit_key = expr->key()->AsLiteral();
+ ASSERT(lit_key != NULL && lit_key->value()->IsString());
+ Handle<String> name = Handle<String>::cast(lit_key->value());
+ bool is_prototype;
+ oracle()->PropertyReceiverTypes(
+ id, name, expr->GetReceiverTypes(), &is_prototype);
+ expr->set_is_function_prototype(is_prototype);
+ } else {
+ bool is_string;
+ oracle()->KeyedPropertyReceiverTypes(
+ id, expr->GetReceiverTypes(), &is_string);
+ expr->set_is_string_access(is_string);
+ }
+ ASSERT(!expr->IsPreMonomorphic() || !expr->IsMonomorphic());
+ }
RECURSE(Visit(expr->obj()));
RECURSE(Visit(expr->key()));
@@ -445,8 +473,7 @@ void AstTyper::VisitCall(Call* expr) {
Expression* callee = expr->expression();
Property* prop = callee->AsProperty();
if (prop != NULL) {
- if (prop->key()->IsPropertyName())
- expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
+ expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
} else {
expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
}
@@ -521,11 +548,11 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
void AstTyper::VisitCountOperation(CountOperation* expr) {
// Collect type feedback.
- expr->RecordTypeFeedback(oracle(), zone());
- Property* prop = expr->expression()->AsProperty();
- if (prop != NULL) {
- prop->RecordTypeFeedback(oracle(), zone());
- }
+ TypeFeedbackId store_id = expr->CountStoreFeedbackId();
+ expr->set_store_mode(oracle()->GetStoreMode(store_id));
+ oracle()->CountReceiverTypes(store_id, expr->GetReceiverTypes());
+ expr->set_type(oracle()->CountType(expr->CountBinOpFeedbackId()));
+ // TODO(rossberg): merge the count type with the generic expression type.
RECURSE(Visit(expr->expression()));
@@ -543,7 +570,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
Handle<Type> type, left_type, right_type;
Maybe<int> fixed_right_arg;
oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
- &left_type, &right_type, &type, &fixed_right_arg);
+ &left_type, &right_type, &type, &fixed_right_arg, expr->op());
NarrowLowerType(expr, type);
NarrowLowerType(expr->left(), left_type);
NarrowLowerType(expr->right(), right_type);
@@ -577,10 +604,15 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::BIT_AND: {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- Type* upper = Type::Union(
- expr->left()->bounds().upper, expr->right()->bounds().upper);
- if (!upper->Is(Type::Signed32())) upper = Type::Signed32();
- NarrowType(expr, Bounds(Type::Smi(), upper, isolate_));
+ Handle<Type> upper(
+ Type::Union(
+ expr->left()->bounds().upper, expr->right()->bounds().upper),
+ isolate_);
+ if (!upper->Is(Type::Signed32()))
+ upper = handle(Type::Signed32(), isolate_);
+ Handle<Type> lower(Type::Intersect(
+ handle(Type::Smi(), isolate_), upper), isolate_);
+ NarrowType(expr, Bounds(lower, upper));
break;
}
case Token::BIT_XOR:
@@ -593,7 +625,10 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::SHR:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Smi(), Type::Unsigned32(), isolate_));
+ // TODO(rossberg): The upper bound would be Unsigned32, but since there
+ // is no 'positive Smi' type for the lower bound, we use the smallest
+ // union of Smi and Unsigned32 as upper bound instead.
+ NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
break;
case Token::ADD: {
RECURSE(Visit(expr->left()));
@@ -601,15 +636,17 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
Bounds l = expr->left()->bounds();
Bounds r = expr->right()->bounds();
Type* lower =
- l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
- Type::Smi() :
+ l.lower->Is(Type::None()) || r.lower->Is(Type::None()) ?
+ Type::None() :
l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ?
- Type::String() : Type::None();
+ Type::String() :
+ l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
+ Type::Smi() : Type::None();
Type* upper =
- l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ?
- Type::Number() :
l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ?
- Type::String() : Type::NumberOrString();
+ Type::String() :
+ l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ?
+ Type::Number() : Type::NumberOrString();
NarrowType(expr, Bounds(lower, upper, isolate_));
break;
}
diff --git a/chromium/v8/src/unicode.h b/chromium/v8/src/unicode.h
index a7061745a2a..6ba61d0e17b 100644
--- a/chromium/v8/src/unicode.h
+++ b/chromium/v8/src/unicode.h
@@ -29,7 +29,7 @@
#define V8_UNICODE_H_
#include <sys/types.h>
-#include <globals.h>
+#include "globals.h"
/**
* \file
* Definitions and convenience functions for working with unicode.
diff --git a/chromium/v8/src/unique.h b/chromium/v8/src/unique.h
index 7ae704a26ad..a93b0469935 100644
--- a/chromium/v8/src/unique.h
+++ b/chromium/v8/src/unique.h
@@ -29,6 +29,7 @@
#define V8_HYDROGEN_UNIQUE_H_
#include "handles.h"
+#include "objects.h"
#include "utils.h"
#include "zone.h"
@@ -53,19 +54,30 @@ class UniqueSet;
template <typename T>
class Unique V8_FINAL {
public:
- // TODO(titzer): make private and introduce some builder/owner class.
+ // TODO(titzer): make private and introduce a uniqueness scope.
explicit Unique(Handle<T> handle) {
if (handle.is_null()) {
raw_address_ = NULL;
} else {
+ // This is a best-effort check to prevent comparing Unique<T>'s created
+ // in different GC eras; we require heap allocation to be disallowed at
+ // creation time.
+ // NOTE: we currently consider maps to be non-movable, so no special
+ // assurance is required for creating a Unique<Map>.
+ // TODO(titzer): other immortable immovable objects are also fine.
+ ASSERT(!AllowHeapAllocation::IsAllowed() || handle->IsMap());
raw_address_ = reinterpret_cast<Address>(*handle);
- ASSERT_NE(raw_address_, NULL);
+ ASSERT_NE(raw_address_, NULL); // Non-null should imply non-zero address.
}
handle_ = handle;
}
+ // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
+ Unique(Address raw_address, Handle<T> handle)
+ : raw_address_(raw_address), handle_(handle) { }
+
// Constructor for handling automatic up casting.
- // Ex. Unique<JSFunction> can be passed when Unique<Object> is expected.
+ // Eg. Unique<JSFunction> can be passed when Unique<Object> is expected.
template <class S> Unique(Unique<S> uniq) {
#ifdef DEBUG
T* a = NULL;
@@ -74,34 +86,57 @@ class Unique V8_FINAL {
USE(a);
#endif
raw_address_ = uniq.raw_address_;
- handle_ = uniq.handle_; // Creates a new handle sharing the same location.
+ handle_ = uniq.handle_;
}
template <typename U>
- bool operator==(const Unique<U>& other) const {
+ inline bool operator==(const Unique<U>& other) const {
+ ASSERT(IsInitialized() && other.IsInitialized());
return raw_address_ == other.raw_address_;
}
template <typename U>
- bool operator!=(const Unique<U>& other) const {
+ inline bool operator!=(const Unique<U>& other) const {
+ ASSERT(IsInitialized() && other.IsInitialized());
return raw_address_ != other.raw_address_;
}
- intptr_t Hashcode() const {
+ inline intptr_t Hashcode() const {
+ ASSERT(IsInitialized());
return reinterpret_cast<intptr_t>(raw_address_);
}
- bool IsNull() {
+ inline bool IsNull() const {
+ ASSERT(IsInitialized());
return raw_address_ == NULL;
}
- // Don't do this unless you have access to the heap!
- // No, seriously! You can compare and hash and set-ify uniques that were
- // all created at the same time; please don't dereference.
- Handle<T> handle() {
+ inline bool IsKnownGlobal(void* global) const {
+ ASSERT(IsInitialized());
+ return raw_address_ == reinterpret_cast<Address>(global);
+ }
+
+ inline Handle<T> handle() const {
return handle_;
}
+ template <class S> static Unique<T> cast(Unique<S> that) {
+ return Unique<T>(that.raw_address_, Handle<T>::cast(that.handle_));
+ }
+
+ inline bool IsInitialized() const {
+ return raw_address_ != NULL || handle_.is_null();
+ }
+
+ // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
+ static Unique<T> CreateUninitialized(Handle<T> handle) {
+ return Unique<T>(reinterpret_cast<Address>(NULL), handle);
+ }
+
+ static Unique<T> CreateImmovable(Handle<T> handle) {
+ return Unique<T>(reinterpret_cast<Address>(*handle), handle);
+ }
+
friend class UniqueSet<T>; // Uses internal details for speed.
template <class U>
friend class Unique; // For comparing raw_address values.
@@ -120,6 +155,7 @@ class UniqueSet V8_FINAL : public ZoneObject {
// Add a new element to this unique set. Mutates this set. O(|this|).
void Add(Unique<T> uniq, Zone* zone) {
+ ASSERT(uniq.IsInitialized());
// Keep the set sorted by the {raw_address} of the unique elements.
for (int i = 0; i < size_; i++) {
if (array_[i] == uniq) return;
@@ -137,8 +173,19 @@ class UniqueSet V8_FINAL : public ZoneObject {
array_[size_++] = uniq;
}
+ // Remove an element from this set. Mutates this set. O(|this|)
+ void Remove(Unique<T> uniq) {
+ for (int i = 0; i < size_; i++) {
+ if (array_[i] == uniq) {
+ while (++i < size_) array_[i - 1] = array_[i];
+ size_--;
+ return;
+ }
+ }
+ }
+
// Compare this set against another set. O(|this|).
- bool Equals(UniqueSet<T>* that) {
+ bool Equals(UniqueSet<T>* that) const {
if (that->size_ != this->size_) return false;
for (int i = 0; i < this->size_; i++) {
if (this->array_[i] != that->array_[i]) return false;
@@ -146,8 +193,18 @@ class UniqueSet V8_FINAL : public ZoneObject {
return true;
}
+ // Check whether this set contains the given element. O(|this|)
+ // TODO(titzer): use binary search for large sets to make this O(log|this|)
+ template <typename U>
+ bool Contains(Unique<U> elem) const {
+ for (int i = 0; i < size_; i++) {
+ if (this->array_[i] == elem) return true;
+ }
+ return false;
+ }
+
// Check if this set is a subset of the given set. O(|this| + |that|).
- bool IsSubset(UniqueSet<T>* that) {
+ bool IsSubset(UniqueSet<T>* that) const {
if (that->size_ < this->size_) return false;
int j = 0;
for (int i = 0; i < this->size_; i++) {
@@ -163,7 +220,7 @@ class UniqueSet V8_FINAL : public ZoneObject {
// Returns a new set representing the intersection of this set and the other.
// O(|this| + |that|).
- UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) {
+ UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) const {
if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>();
UniqueSet<T>* out = new(zone) UniqueSet<T>();
@@ -190,7 +247,7 @@ class UniqueSet V8_FINAL : public ZoneObject {
// Returns a new set representing the union of this set and the other.
// O(|this| + |that|).
- UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) {
+ UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) const {
if (that->size_ == 0) return this->Copy(zone);
if (this->size_ == 0) return that->Copy(zone);
@@ -222,7 +279,7 @@ class UniqueSet V8_FINAL : public ZoneObject {
}
// Makes an exact copy of this set. O(|this| + |that|).
- UniqueSet<T>* Copy(Zone* zone) {
+ UniqueSet<T>* Copy(Zone* zone) const {
UniqueSet<T>* copy = new(zone) UniqueSet<T>();
copy->size_ = this->size_;
copy->capacity_ = this->size_;
@@ -231,10 +288,19 @@ class UniqueSet V8_FINAL : public ZoneObject {
return copy;
}
- inline int size() {
+ void Clear() {
+ size_ = 0;
+ }
+
+ inline int size() const {
return size_;
}
+ inline Unique<T> at(int index) const {
+ ASSERT(index >= 0 && index < size_);
+ return array_[index];
+ }
+
private:
// These sets should be small, since operations are implemented with simple
// linear algorithms. Enforce a maximum size.
diff --git a/chromium/v8/src/utils.h b/chromium/v8/src/utils.h
index 4a08319044b..3a0936eaa63 100644
--- a/chromium/v8/src/utils.h
+++ b/chromium/v8/src/utils.h
@@ -419,8 +419,8 @@ class Vector {
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
Vector<T> SubVector(int from, int to) {
- ASSERT(to <= length_);
- ASSERT(from < to);
+ SLOW_ASSERT(to <= length_);
+ SLOW_ASSERT(from < to);
ASSERT(0 <= from);
return Vector<T>(start() + from, to - from);
}
@@ -1083,7 +1083,7 @@ class EnumSet {
// The strange typing in ASSERT is necessary to avoid stupid warnings, see:
// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43680
ASSERT(static_cast<int>(element) < static_cast<int>(sizeof(T) * CHAR_BIT));
- return 1 << element;
+ return static_cast<T>(1) << element;
}
T bits_;
diff --git a/chromium/v8/src/utils/random-number-generator.cc b/chromium/v8/src/utils/random-number-generator.cc
index 1e03ee24499..fe273315a7a 100644
--- a/chromium/v8/src/utils/random-number-generator.cc
+++ b/chromium/v8/src/utils/random-number-generator.cc
@@ -28,6 +28,7 @@
#include "utils/random-number-generator.h"
#include <cstdio>
+#include <cstdlib>
#include "flags.h"
#include "platform/mutex.h"
@@ -67,6 +68,16 @@ RandomNumberGenerator::RandomNumberGenerator() {
}
}
+#if V8_OS_CYGWIN || V8_OS_WIN
+ // Use rand_s() to gather entropy on Windows. See:
+ // https://code.google.com/p/v8/issues/detail?id=2905
+ unsigned first_half, second_half;
+ errno_t result = rand_s(&first_half);
+ ASSERT_EQ(0, result);
+ result = rand_s(&second_half);
+ ASSERT_EQ(0, result);
+ SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
+#else
// Gather entropy from /dev/urandom if available.
FILE* fp = fopen("/dev/urandom", "rb");
if (fp != NULL) {
@@ -82,10 +93,16 @@ RandomNumberGenerator::RandomNumberGenerator() {
// We cannot assume that random() or rand() were seeded
// properly, so instead of relying on random() or rand(),
// we just seed our PRNG using timing data as fallback.
+ // This is weak entropy, but it's sufficient, because
+ // it is the responsibility of the embedder to install
+ // an entropy source using v8::V8::SetEntropySource(),
+ // which provides reasonable entropy, see:
+ // https://code.google.com/p/v8/issues/detail?id=2905
int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24;
- seed ^= TimeTicks::HighResNow().ToInternalValue() << 16;
+ seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16;
seed ^= TimeTicks::Now().ToInternalValue() << 8;
SetSeed(seed);
+#endif // V8_OS_CYGWIN || V8_OS_WIN
}
diff --git a/chromium/v8/src/utils/random-number-generator.h b/chromium/v8/src/utils/random-number-generator.h
index bd7dca7e651..cc7d7395e6a 100644
--- a/chromium/v8/src/utils/random-number-generator.h
+++ b/chromium/v8/src/utils/random-number-generator.h
@@ -42,6 +42,10 @@ namespace internal {
// If two instances of RandomNumberGenerator are created with the same seed, and
// the same sequence of method calls is made for each, they will generate and
// return identical sequences of numbers.
+// This class uses (probably) weak entropy by default, but it's sufficient,
+// because it is the responsibility of the embedder to install an entropy source
+// using v8::V8::SetEntropySource(), which provides reasonable entropy, see:
+// https://code.google.com/p/v8/issues/detail?id=2905
// This class is neither reentrant nor threadsafe.
class RandomNumberGenerator V8_FINAL {
diff --git a/chromium/v8/src/v8-counters.cc b/chromium/v8/src/v8-counters.cc
index 6711c80203e..c899b289a53 100644
--- a/chromium/v8/src/v8-counters.cc
+++ b/chromium/v8/src/v8-counters.cc
@@ -76,6 +76,14 @@ Counters::Counters(Isolate* isolate) {
StatsCounter(isolate, "c:" "V8.SizeOf_FIXED_ARRAY-" #name);
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
+
+#define SC(name) \
+ count_of_CODE_AGE_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.CountOf_CODE_AGE-" #name); \
+ size_of_CODE_AGE_##name##_ = \
+ StatsCounter(isolate, "c:" "V8.SizeOf_CODE_AGE-" #name);
+ CODE_AGE_LIST_COMPLETE(SC)
+#undef SC
}
diff --git a/chromium/v8/src/v8-counters.h b/chromium/v8/src/v8-counters.h
index ff2247cba16..9178046d6ed 100644
--- a/chromium/v8/src/v8-counters.h
+++ b/chromium/v8/src/v8-counters.h
@@ -51,6 +51,7 @@ namespace internal {
HT(compile_lazy, V8.CompileLazy)
#define HISTOGRAM_PERCENTAGE_LIST(HP) \
+ /* Heap fragmentation. */ \
HP(external_fragmentation_total, \
V8.MemoryExternalFragmentationTotal) \
HP(external_fragmentation_old_pointer_space, \
@@ -67,12 +68,26 @@ namespace internal {
V8.MemoryExternalFragmentationPropertyCellSpace) \
HP(external_fragmentation_lo_space, \
V8.MemoryExternalFragmentationLoSpace) \
+ /* Percentages of heap committed to each space. */ \
+ HP(heap_fraction_new_space, \
+ V8.MemoryHeapFractionNewSpace) \
+ HP(heap_fraction_old_pointer_space, \
+ V8.MemoryHeapFractionOldPointerSpace) \
+ HP(heap_fraction_old_data_space, \
+ V8.MemoryHeapFractionOldDataSpace) \
+ HP(heap_fraction_code_space, \
+ V8.MemoryHeapFractionCodeSpace) \
HP(heap_fraction_map_space, \
V8.MemoryHeapFractionMapSpace) \
HP(heap_fraction_cell_space, \
V8.MemoryHeapFractionCellSpace) \
HP(heap_fraction_property_cell_space, \
V8.MemoryHeapFractionPropertyCellSpace) \
+ HP(heap_fraction_lo_space, \
+ V8.MemoryHeapFractionLoSpace) \
+ /* Percentage of crankshafted codegen. */ \
+ HP(codegen_fraction_crankshaft, \
+ V8.CodegenFractionCrankshaft) \
#define HISTOGRAM_MEMORY_LIST(HM) \
@@ -84,6 +99,10 @@ namespace internal {
V8.MemoryHeapSampleCellSpaceCommitted) \
HM(heap_sample_property_cell_space_committed, \
V8.MemoryHeapSamplePropertyCellSpaceCommitted) \
+ HM(heap_sample_code_space_committed, \
+ V8.MemoryHeapSampleCodeSpaceCommitted) \
+ HM(heap_sample_maximum_committed, \
+ V8.MemoryHeapSampleMaximumCommitted) \
// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
@@ -204,7 +223,6 @@ namespace internal {
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
- SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
SC(fast_new_closure_total, V8.FastNewClosureTotal) \
SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
@@ -224,7 +242,6 @@ namespace internal {
SC(math_asin, V8.MathAsin) \
SC(math_atan, V8.MathAtan) \
SC(math_atan2, V8.MathAtan2) \
- SC(math_ceil, V8.MathCeil) \
SC(math_cos, V8.MathCos) \
SC(math_exp, V8.MathExp) \
SC(math_floor, V8.MathFloor) \
@@ -243,6 +260,9 @@ namespace internal {
SC(soft_deopts_requested, V8.SoftDeoptsRequested) \
SC(soft_deopts_inserted, V8.SoftDeoptsInserted) \
SC(soft_deopts_executed, V8.SoftDeoptsExecuted) \
+ /* Number of write barriers in generated code. */ \
+ SC(write_barriers_dynamic, V8.WriteBarriersDynamic) \
+ SC(write_barriers_static, V8.WriteBarriersStatic) \
SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable) \
SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted) \
SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed) \
@@ -320,6 +340,14 @@ class Counters {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
+#define SC(name) \
+ StatsCounter* count_of_CODE_AGE_##name() \
+ { return &count_of_CODE_AGE_##name##_; } \
+ StatsCounter* size_of_CODE_AGE_##name() \
+ { return &size_of_CODE_AGE_##name##_; }
+ CODE_AGE_LIST_COMPLETE(SC)
+#undef SC
+
enum Id {
#define RATE_ID(name, caption) k_##name,
HISTOGRAM_TIMER_LIST(RATE_ID)
@@ -345,6 +373,10 @@ class Counters {
kSizeOfFIXED_ARRAY__##name,
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID)
#undef COUNTER_ID
+#define COUNTER_ID(name) kCountOfCODE_AGE__##name, \
+ kSizeOfCODE_AGE__##name,
+ CODE_AGE_LIST_COMPLETE(COUNTER_ID)
+#undef COUNTER_ID
stats_counter_count
};
@@ -390,6 +422,12 @@ class Counters {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
+#define SC(name) \
+ StatsCounter size_of_CODE_AGE_##name##_; \
+ StatsCounter count_of_CODE_AGE_##name##_;
+ CODE_AGE_LIST_COMPLETE(SC)
+#undef SC
+
friend class Isolate;
explicit Counters(Isolate* isolate);
diff --git a/chromium/v8/src/v8.cc b/chromium/v8/src/v8.cc
index e894164cd16..004a3394614 100644
--- a/chromium/v8/src/v8.cc
+++ b/chromium/v8/src/v8.cc
@@ -32,6 +32,9 @@
#include "elements.h"
#include "bootstrapper.h"
#include "debug.h"
+#ifdef V8_USE_DEFAULT_PLATFORM
+#include "default-platform.h"
+#endif
#include "deoptimizer.h"
#include "frames.h"
#include "heap-profiler.h"
@@ -52,6 +55,7 @@ V8_DECLARE_ONCE(init_once);
List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
v8::ArrayBuffer::Allocator* V8::array_buffer_allocator_ = NULL;
+v8::Platform* V8::platform_ = NULL;
bool V8::Initialize(Deserializer* des) {
@@ -100,6 +104,12 @@ void V8::TearDown() {
call_completed_callbacks_ = NULL;
Sampler::TearDown();
+
+#ifdef V8_USE_DEFAULT_PLATFORM
+ DefaultPlatform* platform = static_cast<DefaultPlatform*>(platform_);
+ platform_ = NULL;
+ delete platform;
+#endif
}
@@ -109,25 +119,6 @@ void V8::SetReturnAddressLocationResolver(
}
-// Used by JavaScript APIs
-uint32_t V8::Random(Context* context) {
- ASSERT(context->IsNativeContext());
- ByteArray* seed = context->random_seed();
- uint32_t* state = reinterpret_cast<uint32_t*>(seed->GetDataStartAddress());
-
- // When we get here, the RNG must have been initialized,
- // see the Genesis constructor in file bootstrapper.cc.
- ASSERT_NE(0, state[0]);
- ASSERT_NE(0, state[1]);
-
- // Mix the bits. Never replaces state[i] with 0 if it is nonzero.
- state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
- state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
-
- return (state[0] << 14) + (state[1] & 0x3FFFF);
-}
-
-
void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
if (call_completed_callbacks_ == NULL) { // Lazy init.
call_completed_callbacks_ = new List<CallCompletedCallback>();
@@ -151,17 +142,15 @@ void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
void V8::FireCallCompletedCallback(Isolate* isolate) {
bool has_call_completed_callbacks = call_completed_callbacks_ != NULL;
- bool observer_delivery_pending =
- FLAG_harmony_observation && isolate->observer_delivery_pending();
- if (!has_call_completed_callbacks && !observer_delivery_pending) return;
+ bool microtask_pending = isolate->microtask_pending();
+ if (!has_call_completed_callbacks && !microtask_pending) return;
+
HandleScopeImplementer* handle_scope_implementer =
isolate->handle_scope_implementer();
if (!handle_scope_implementer->CallDepthIsZero()) return;
// Fire callbacks. Increase call depth to prevent recursive callbacks.
handle_scope_implementer->IncrementCallDepth();
- if (observer_delivery_pending) {
- JSObject::DeliverChangeRecords(isolate);
- }
+ if (microtask_pending) Execution::RunMicrotasks(isolate);
if (has_call_completed_callbacks) {
for (int i = 0; i < call_completed_callbacks_->length(); i++) {
call_completed_callbacks_->at(i)();
@@ -171,30 +160,6 @@ void V8::FireCallCompletedCallback(Isolate* isolate) {
}
-// Use a union type to avoid type-aliasing optimizations in GCC.
-typedef union {
- double double_value;
- uint64_t uint64_t_value;
-} double_int_union;
-
-
-Object* V8::FillHeapNumberWithRandom(Object* heap_number,
- Context* context) {
- double_int_union r;
- uint64_t random_bits = Random(context);
- // Convert 32 random bits to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- static const double binary_million = 1048576.0;
- r.double_value = binary_million;
- r.uint64_t_value |= random_bits;
- r.double_value -= binary_million;
-
- HeapNumber::cast(heap_number)->set_value(r.double_value);
- return heap_number;
-}
-
-
void V8::InitializeOncePerProcessImpl() {
FlagList::EnforceFlagImplications();
if (FLAG_stress_compaction) {
@@ -203,49 +168,9 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
}
- if (FLAG_concurrent_recompilation &&
- (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs)) {
- FLAG_concurrent_recompilation = false;
- PrintF("Concurrent recompilation has been disabled for tracing.\n");
- }
-
- if (FLAG_sweeper_threads <= 0) {
- if (FLAG_concurrent_sweeping) {
- FLAG_sweeper_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::CONCURRENT_SWEEPING);
- } else if (FLAG_parallel_sweeping) {
- FLAG_sweeper_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_SWEEPING);
- }
- if (FLAG_sweeper_threads == 0) {
- FLAG_concurrent_sweeping = false;
- FLAG_parallel_sweeping = false;
- }
- } else if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) {
- FLAG_sweeper_threads = 0;
- }
-
- if (FLAG_parallel_marking) {
- if (FLAG_marking_threads <= 0) {
- FLAG_marking_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_MARKING);
- }
- if (FLAG_marking_threads == 0) {
- FLAG_parallel_marking = false;
- }
- } else {
- FLAG_marking_threads = 0;
- }
-
- if (FLAG_concurrent_recompilation &&
- SystemThreadManager::NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_RECOMPILATION) == 0) {
- FLAG_concurrent_recompilation = false;
- }
-
+#ifdef V8_USE_DEFAULT_PLATFORM
+ platform_ = new DefaultPlatform;
+#endif
Sampler::SetUp();
CPU::SetUp();
OS::PostSetUp();
@@ -261,4 +186,23 @@ void V8::InitializeOncePerProcess() {
CallOnce(&init_once, &InitializeOncePerProcessImpl);
}
+
+void V8::InitializePlatform(v8::Platform* platform) {
+ ASSERT(!platform_);
+ ASSERT(platform);
+ platform_ = platform;
+}
+
+
+void V8::ShutdownPlatform() {
+ ASSERT(platform_);
+ platform_ = NULL;
+}
+
+
+v8::Platform* V8::GetCurrentPlatform() {
+ ASSERT(platform_);
+ return platform_;
+}
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/v8.h b/chromium/v8/src/v8.h
index 5848f748185..8069e8adda2 100644
--- a/chromium/v8/src/v8.h
+++ b/chromium/v8/src/v8.h
@@ -50,6 +50,7 @@
// Basic includes
#include "../include/v8.h"
+#include "../include/v8-platform.h"
#include "v8globals.h"
#include "v8checks.h"
#include "allocation.h"
@@ -95,10 +96,6 @@ class V8 : public AllStatic {
ReturnAddressLocationResolver resolver);
// Support for entry hooking JITed code.
static void SetFunctionEntryHook(FunctionEntryHook entry_hook);
- // Random number generation support. Not cryptographically safe.
- static uint32_t Random(Context* context);
- static Object* FillHeapNumberWithRandom(Object* heap_number,
- Context* context);
static void AddCallCompletedCallback(CallCompletedCallback callback);
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
@@ -113,6 +110,10 @@ class V8 : public AllStatic {
array_buffer_allocator_ = allocator;
}
+ static void InitializePlatform(v8::Platform* platform);
+ static void ShutdownPlatform();
+ static v8::Platform* GetCurrentPlatform();
+
private:
static void InitializeOncePerProcessImpl();
static void InitializeOncePerProcess();
@@ -121,6 +122,8 @@ class V8 : public AllStatic {
static List<CallCompletedCallback>* call_completed_callbacks_;
// Allocator for external array buffers.
static v8::ArrayBuffer::Allocator* array_buffer_allocator_;
+ // v8::Platform to use.
+ static v8::Platform* platform_;
};
diff --git a/chromium/v8/src/v8conversions.h b/chromium/v8/src/v8conversions.h
index 3a7b5242ab7..68107de97a2 100644
--- a/chromium/v8/src/v8conversions.h
+++ b/chromium/v8/src/v8conversions.h
@@ -55,19 +55,41 @@ double StringToDouble(UnicodeCache* unicode_cache,
// Converts a string into an integer.
double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
-// Converts a number into size_t.
-inline size_t NumberToSize(Isolate* isolate,
- Object* number) {
+inline bool TryNumberToSize(Isolate* isolate,
+ Object* number, size_t* result) {
SealHandleScope shs(isolate);
if (number->IsSmi()) {
- return Smi::cast(number)->value();
+ int value = Smi::cast(number)->value();
+ ASSERT(
+ static_cast<unsigned>(Smi::kMaxValue)
+ <= std::numeric_limits<size_t>::max());
+ if (value >= 0) {
+ *result = static_cast<size_t>(value);
+ return true;
+ }
+ return false;
} else {
ASSERT(number->IsHeapNumber());
double value = HeapNumber::cast(number)->value();
- return static_cast<size_t>(value);
+ if (value >= 0 &&
+ value <= std::numeric_limits<size_t>::max()) {
+ *result = static_cast<size_t>(value);
+ return true;
+ } else {
+ return false;
+ }
}
}
+// Converts a number into size_t.
+inline size_t NumberToSize(Isolate* isolate,
+ Object* number) {
+ size_t result = 0;
+ bool is_valid = TryNumberToSize(isolate, number, &result);
+ CHECK(is_valid);
+ return result;
+}
+
} } // namespace v8::internal
#endif // V8_V8CONVERSIONS_H_
diff --git a/chromium/v8/src/v8globals.h b/chromium/v8/src/v8globals.h
index 7fa2fd62c56..4910cb7358d 100644
--- a/chromium/v8/src/v8globals.h
+++ b/chromium/v8/src/v8globals.h
@@ -199,6 +199,11 @@ const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
// allows).
enum PretenureFlag { NOT_TENURED, TENURED };
+enum MinimumCapacity {
+ USE_DEFAULT_MINIMUM_CAPACITY,
+ USE_CUSTOM_MINIMUM_CAPACITY
+};
+
enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
diff --git a/chromium/v8/src/v8natives.js b/chromium/v8/src/v8natives.js
index 76eeac6a58f..96b88c5285c 100644
--- a/chromium/v8/src/v8natives.js
+++ b/chromium/v8/src/v8natives.js
@@ -32,7 +32,6 @@
// var $Number = global.Number;
// var $Function = global.Function;
// var $Array = global.Array;
-// var $NaN = 0/0;
//
// in math.js:
// var $floor = MathFloor
@@ -95,7 +94,7 @@ function SetUpLockedPrototype(constructor, fields, methods) {
}
if (fields) {
for (var i = 0; i < fields.length; i++) {
- %SetProperty(prototype, fields[i], void 0, DONT_ENUM | DONT_DELETE);
+ %SetProperty(prototype, fields[i], UNDEFINED, DONT_ENUM | DONT_DELETE);
}
}
for (var i = 0; i < methods.length; i += 2) {
@@ -148,7 +147,7 @@ function GlobalParseInt(string, radix) {
string = TO_STRING_INLINE(string);
radix = TO_INT32(radix);
if (!(radix == 0 || (2 <= radix && radix <= 36))) {
- return $NaN;
+ return NAN;
}
}
@@ -171,19 +170,18 @@ function GlobalParseFloat(string) {
function GlobalEval(x) {
if (!IS_STRING(x)) return x;
- var global_receiver = %GlobalReceiver(global);
- var global_is_detached = (global === global_receiver);
-
// For consistency with JSC we require the global object passed to
// eval to be the global object from which 'eval' originated. This
// is not mandated by the spec.
// We only throw if the global has been detached, since we need the
// receiver as this-value for the call.
- if (global_is_detached) {
+ if (!%IsAttachedGlobal(global)) {
throw new $EvalError('The "this" value passed to eval must ' +
'be the global object from which eval originated');
}
+ var global_receiver = %GlobalReceiver(global);
+
var f = %CompileString(x, false);
if (!IS_FUNCTION(f)) return f;
@@ -197,15 +195,16 @@ function GlobalEval(x) {
function SetUpGlobal() {
%CheckIsBootstrapping();
+ var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
+
// ECMA 262 - 15.1.1.1.
- %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty(global, "NaN", NAN, attributes);
// ECMA-262 - 15.1.1.2.
- %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty(global, "Infinity", INFINITY, attributes);
// ECMA-262 - 15.1.1.3.
- %SetProperty(global, "undefined", void 0,
- DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty(global, "undefined", UNDEFINED, attributes);
// Set up non-enumerable function on the global object.
InstallFunctions(global, DONT_ENUM, $Array(
@@ -475,12 +474,12 @@ function ToPropertyDescriptor(obj) {
function ToCompletePropertyDescriptor(obj) {
var desc = ToPropertyDescriptor(obj);
if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
- if (!desc.hasValue()) desc.setValue(void 0);
+ if (!desc.hasValue()) desc.setValue(UNDEFINED);
if (!desc.hasWritable()) desc.setWritable(false);
} else {
// Is accessor descriptor.
- if (!desc.hasGetter()) desc.setGet(void 0);
- if (!desc.hasSetter()) desc.setSet(void 0);
+ if (!desc.hasGetter()) desc.setGet(UNDEFINED);
+ if (!desc.hasSetter()) desc.setSet(UNDEFINED);
}
if (!desc.hasEnumerable()) desc.setEnumerable(false);
if (!desc.hasConfigurable()) desc.setConfigurable(false);
@@ -491,7 +490,7 @@ function ToCompletePropertyDescriptor(obj) {
function PropertyDescriptor() {
// Initialize here so they are all in-object and have the same map.
// Default values from ES5 8.6.1.
- this.value_ = void 0;
+ this.value_ = UNDEFINED;
this.hasValue_ = false;
this.writable_ = false;
this.hasWritable_ = false;
@@ -499,9 +498,9 @@ function PropertyDescriptor() {
this.hasEnumerable_ = false;
this.configurable_ = false;
this.hasConfigurable_ = false;
- this.get_ = void 0;
+ this.get_ = UNDEFINED;
this.hasGetter_ = false;
- this.set_ = void 0;
+ this.set_ = UNDEFINED;
this.hasSetter_ = false;
}
@@ -593,7 +592,7 @@ function ConvertDescriptorArrayToDescriptor(desc_array) {
}
if (IS_UNDEFINED(desc_array)) {
- return void 0;
+ return UNDEFINED;
}
var desc = new PropertyDescriptor();
@@ -647,10 +646,11 @@ function GetOwnProperty(obj, v) {
var p = ToName(v);
if (%IsJSProxy(obj)) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(v)) return void 0;
+ if (IS_SYMBOL(v)) return UNDEFINED;
var handler = %GetHandler(obj);
- var descriptor = CallTrap1(handler, "getOwnPropertyDescriptor", void 0, p);
+ var descriptor = CallTrap1(
+ handler, "getOwnPropertyDescriptor", UNDEFINED, p);
if (IS_UNDEFINED(descriptor)) return descriptor;
var desc = ToCompletePropertyDescriptor(descriptor);
if (!desc.isConfigurable()) {
@@ -666,7 +666,7 @@ function GetOwnProperty(obj, v) {
var props = %GetOwnProperty(ToObject(obj), p);
// A false value here means that access checks failed.
- if (props === false) return void 0;
+ if (props === false) return UNDEFINED;
return ConvertDescriptorArrayToDescriptor(props);
}
@@ -693,7 +693,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
if (IS_SYMBOL(p)) return false;
var handler = %GetHandler(obj);
- var result = CallTrap2(handler, "defineProperty", void 0, p, attributes);
+ var result = CallTrap2(handler, "defineProperty", UNDEFINED, p, attributes);
if (!ToBoolean(result)) {
if (should_throw) {
throw MakeTypeError("handler_returned_false",
@@ -710,7 +710,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
function DefineObjectProperty(obj, p, desc, should_throw) {
var current_or_access = %GetOwnProperty(ToObject(obj), ToName(p));
// A false value here means that access checks failed.
- if (current_or_access === false) return void 0;
+ if (current_or_access === false) return UNDEFINED;
var current = ConvertDescriptorArrayToDescriptor(current_or_access);
var extensible = %IsExtensible(ToObject(obj));
@@ -841,7 +841,7 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
flag |= READ_ONLY;
}
- var value = void 0; // Default value is undefined.
+ var value = UNDEFINED; // Default value is undefined.
if (desc.hasValue()) {
value = desc.getValue();
} else if (!IS_UNDEFINED(current) && IsDataDescriptor(current)) {
@@ -920,7 +920,7 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
// For the time being, we need a hack to prevent Object.observe from
// generating two change records.
obj.length = new_length;
- desc.value_ = void 0;
+ desc.value_ = UNDEFINED;
desc.hasValue_ = false;
threw = !DefineObjectProperty(obj, "length", desc, should_throw) || threw;
if (emit_splice) {
@@ -1045,7 +1045,7 @@ function ObjectGetOwnPropertyNames(obj) {
// Special handling for proxies.
if (%IsJSProxy(obj)) {
var handler = %GetHandler(obj);
- var names = CallTrap0(handler, "getOwnPropertyNames", void 0);
+ var names = CallTrap0(handler, "getOwnPropertyNames", UNDEFINED);
return ToNameArray(names, "getOwnPropertyNames", false);
}
@@ -1194,7 +1194,7 @@ function ObjectDefineProperties(obj, properties) {
// Harmony proxies.
function ProxyFix(obj) {
var handler = %GetHandler(obj);
- var props = CallTrap0(handler, "fix", void 0);
+ var props = CallTrap0(handler, "fix", UNDEFINED);
if (IS_UNDEFINED(props)) {
throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
}
@@ -1248,7 +1248,7 @@ function ObjectFreeze(obj) {
throw MakeTypeError("called_on_non_object", ["Object.freeze"]);
}
var isProxy = %IsJSProxy(obj);
- if (isProxy || %HasNonStrictArgumentsElements(obj)) {
+ if (isProxy || %HasNonStrictArgumentsElements(obj) || %IsObserved(obj)) {
if (isProxy) {
ProxyFix(obj);
}
@@ -1560,8 +1560,8 @@ function NumberToFixed(fractionDigits) {
}
if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == 1/0) return "Infinity";
- if (x == -1/0) return "-Infinity";
+ if (x == INFINITY) return "Infinity";
+ if (x == -INFINITY) return "-Infinity";
return %NumberToFixed(x, f);
}
@@ -1578,11 +1578,11 @@ function NumberToExponential(fractionDigits) {
// Get the value of this number in case it's an object.
x = %_ValueOf(this);
}
- var f = IS_UNDEFINED(fractionDigits) ? void 0 : TO_INTEGER(fractionDigits);
+ var f = IS_UNDEFINED(fractionDigits) ? UNDEFINED : TO_INTEGER(fractionDigits);
if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == 1/0) return "Infinity";
- if (x == -1/0) return "-Infinity";
+ if (x == INFINITY) return "Infinity";
+ if (x == -INFINITY) return "-Infinity";
if (IS_UNDEFINED(f)) {
f = -1; // Signal for runtime function that f is not defined.
@@ -1608,8 +1608,8 @@ function NumberToPrecision(precision) {
var p = TO_INTEGER(precision);
if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == 1/0) return "Infinity";
- if (x == -1/0) return "-Infinity";
+ if (x == INFINITY) return "Infinity";
+ if (x == -INFINITY) return "-Infinity";
if (p < 1 || p > 21) {
throw new $RangeError("toPrecision() argument must be between 1 and 21");
@@ -1654,18 +1654,18 @@ function SetUpNumber() {
DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 section 15.7.3.3.
- %SetProperty($Number, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetProperty($Number, "NaN", NAN, DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 section 15.7.3.4.
%SetProperty($Number,
"NEGATIVE_INFINITY",
- -1/0,
+ -INFINITY,
DONT_ENUM | DONT_DELETE | READ_ONLY);
// ECMA-262 section 15.7.3.5.
%SetProperty($Number,
"POSITIVE_INFINITY",
- 1/0,
+ INFINITY,
DONT_ENUM | DONT_DELETE | READ_ONLY);
%ToFastProperties($Number);
@@ -1836,3 +1836,18 @@ function SetUpFunction() {
}
SetUpFunction();
+
+
+//----------------------------------------------------------------------------
+
+// TODO(rossberg): very simple abstraction for generic microtask queue.
+// Eventually, we should move to a real event queue that allows to maintain
+// relative ordering of different kinds of tasks.
+
+RunMicrotasks.runners = new InternalArray;
+
+function RunMicrotasks() {
+ while (%SetMicrotaskPending(false)) {
+ for (var i in RunMicrotasks.runners) RunMicrotasks.runners[i]();
+ }
+}
diff --git a/chromium/v8/src/v8threads.cc b/chromium/v8/src/v8threads.cc
index 33b620d8eab..1de9d4fd761 100644
--- a/chromium/v8/src/v8threads.cc
+++ b/chromium/v8/src/v8threads.cc
@@ -42,11 +42,6 @@ namespace v8 {
bool Locker::active_ = false;
-Locker::Locker() {
- Initialize(i::Isolate::GetDefaultIsolateForLocking());
-}
-
-
// Once the Locker is initialized, the current thread will be guaranteed to have
// the lock for a given isolate.
void Locker::Initialize(v8::Isolate* isolate) {
@@ -116,11 +111,6 @@ Locker::~Locker() {
}
-Unlocker::Unlocker() {
- Initialize(i::Isolate::GetDefaultIsolateForLocking());
-}
-
-
void Unlocker::Initialize(v8::Isolate* isolate) {
ASSERT(isolate != NULL);
isolate_ = reinterpret_cast<i::Isolate*>(isolate);
@@ -143,17 +133,6 @@ Unlocker::~Unlocker() {
}
-void Locker::StartPreemption(int every_n_ms) {
- v8::internal::ContextSwitcher::StartPreemption(
- i::Isolate::Current(), every_n_ms);
-}
-
-
-void Locker::StopPreemption() {
- v8::internal::ContextSwitcher::StopPreemption(i::Isolate::Current());
-}
-
-
namespace internal {
@@ -428,64 +407,5 @@ void ThreadManager::TerminateExecution(ThreadId thread_id) {
}
-ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
- : Thread("v8:CtxtSwitcher"),
- keep_going_(true),
- sleep_ms_(every_n_ms),
- isolate_(isolate) {
-}
-
-
-// Set the scheduling interval of V8 threads. This function starts the
-// ContextSwitcher thread if needed.
-void ContextSwitcher::StartPreemption(Isolate* isolate, int every_n_ms) {
- ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
- if (isolate->context_switcher() == NULL) {
- // If the ContextSwitcher thread is not running at the moment start it now.
- isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
- isolate->context_switcher()->Start();
- } else {
- // ContextSwitcher thread is already running, so we just change the
- // scheduling interval.
- isolate->context_switcher()->sleep_ms_ = every_n_ms;
- }
-}
-
-
-// Disable preemption of V8 threads. If multiple threads want to use V8 they
-// must cooperatively schedule amongst them from this point on.
-void ContextSwitcher::StopPreemption(Isolate* isolate) {
- ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
- if (isolate->context_switcher() != NULL) {
- // The ContextSwitcher thread is running. We need to stop it and release
- // its resources.
- isolate->context_switcher()->keep_going_ = false;
- // Wait for the ContextSwitcher thread to exit.
- isolate->context_switcher()->Join();
- // Thread has exited, now we can delete it.
- delete(isolate->context_switcher());
- isolate->set_context_switcher(NULL);
- }
-}
-
-
-// Main loop of the ContextSwitcher thread: Preempt the currently running V8
-// thread at regular intervals.
-void ContextSwitcher::Run() {
- while (keep_going_) {
- OS::Sleep(sleep_ms_);
- isolate()->stack_guard()->Preempt();
- }
-}
-
-
-// Acknowledge the preemption by the receiving thread.
-void ContextSwitcher::PreemptionReceived() {
- ASSERT(Locker::IsLocked(i::Isolate::GetDefaultIsolateForLocking()));
- // There is currently no accounting being done for this. But could be in the
- // future, which is why we leave this in.
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/v8threads.h b/chromium/v8/src/v8threads.h
index 1edacfc3bb7..a20700a5c9e 100644
--- a/chromium/v8/src/v8threads.h
+++ b/chromium/v8/src/v8threads.h
@@ -139,34 +139,6 @@ class ThreadManager {
};
-// The ContextSwitcher thread is used to schedule regular preemptions to
-// multiple running V8 threads. Generally it is necessary to call
-// StartPreemption if there is more than one thread running. If not, a single
-// JavaScript can take full control of V8 and not allow other threads to run.
-class ContextSwitcher: public Thread {
- public:
- // Set the preemption interval for the ContextSwitcher thread.
- static void StartPreemption(Isolate* isolate, int every_n_ms);
-
- // Stop sending preemption requests to threads.
- static void StopPreemption(Isolate* isolate);
-
- // Preempted thread needs to call back to the ContextSwitcher to acknowledge
- // the handling of a preemption request.
- static void PreemptionReceived();
-
- private:
- ContextSwitcher(Isolate* isolate, int every_n_ms);
-
- Isolate* isolate() const { return isolate_; }
-
- void Run();
-
- bool keep_going_;
- int sleep_ms_;
- Isolate* isolate_;
-};
-
} } // namespace v8::internal
#endif // V8_V8THREADS_H_
diff --git a/chromium/v8/src/v8utils.h b/chromium/v8/src/v8utils.h
index fd3f4a50954..02e57ebe727 100644
--- a/chromium/v8/src/v8utils.h
+++ b/chromium/v8/src/v8utils.h
@@ -194,61 +194,6 @@ inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
}
-// Copies data from |src| to |dst|. No restrictions.
-template <typename T>
-inline void MoveBytes(T* dst, const T* src, size_t num_bytes) {
- STATIC_ASSERT(sizeof(T) == 1);
- switch (num_bytes) {
- case 0: return;
- case 1:
- *dst = *src;
- return;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- case 2:
- *reinterpret_cast<uint16_t*>(dst) = *reinterpret_cast<const uint16_t*>(src);
- return;
- case 3: {
- uint16_t part1 = *reinterpret_cast<const uint16_t*>(src);
- byte part2 = *(src + 2);
- *reinterpret_cast<uint16_t*>(dst) = part1;
- *(dst + 2) = part2;
- return;
- }
- case 4:
- *reinterpret_cast<uint32_t*>(dst) = *reinterpret_cast<const uint32_t*>(src);
- return;
- case 5:
- case 6:
- case 7:
- case 8: {
- uint32_t part1 = *reinterpret_cast<const uint32_t*>(src);
- uint32_t part2 = *reinterpret_cast<const uint32_t*>(src + num_bytes - 4);
- *reinterpret_cast<uint32_t*>(dst) = part1;
- *reinterpret_cast<uint32_t*>(dst + num_bytes - 4) = part2;
- return;
- }
- case 9:
- case 10:
- case 11:
- case 12:
- case 13:
- case 14:
- case 15:
- case 16: {
- double part1 = *reinterpret_cast<const double*>(src);
- double part2 = *reinterpret_cast<const double*>(src + num_bytes - 8);
- *reinterpret_cast<double*>(dst) = part1;
- *reinterpret_cast<double*>(dst + num_bytes - 8) = part2;
- return;
- }
-#endif
- default:
- OS::MemMove(dst, src, num_bytes);
- return;
- }
-}
-
-
template <typename T, typename U>
inline void MemsetPointer(T** dest, U* value, int counter) {
#ifdef DEBUG
diff --git a/chromium/v8/src/version.cc b/chromium/v8/src/version.cc
index 7f7cb0156d0..17793d535d1 100644
--- a/chromium/v8/src/version.cc
+++ b/chromium/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 21
-#define BUILD_NUMBER 18
-#define PATCH_LEVEL 13
+#define MINOR_VERSION 23
+#define BUILD_NUMBER 17
+#define PATCH_LEVEL 22
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/chromium/v8/src/win32-math.cc b/chromium/v8/src/win32-math.cc
index 88fa3a684be..8f6d0774312 100644
--- a/chromium/v8/src/win32-math.cc
+++ b/chromium/v8/src/win32-math.cc
@@ -29,7 +29,7 @@
// refer to The Open Group Base Specification for specification of the correct
// semantics for these functions.
// (http://www.opengroup.org/onlinepubs/000095399/)
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && (_MSC_VER < 1800)
#include "win32-headers.h"
#include <limits.h> // Required for INT_MAX etc.
diff --git a/chromium/v8/src/win32-math.h b/chromium/v8/src/win32-math.h
index 0397c7e14ea..fd9312b0f54 100644
--- a/chromium/v8/src/win32-math.h
+++ b/chromium/v8/src/win32-math.h
@@ -37,6 +37,8 @@
#error Wrong environment, expected MSVC.
#endif // _MSC_VER
+// MSVC 2013+ provides implementations of all standard math functions.
+#if (_MSC_VER < 1800)
enum {
FP_NAN,
FP_INFINITE,
@@ -58,4 +60,6 @@ int signbit(double x);
} // namespace std
+#endif // _MSC_VER < 1800
+
#endif // V8_WIN32_MATH_H_
diff --git a/chromium/v8/src/x64/assembler-x64-inl.h b/chromium/v8/src/x64/assembler-x64-inl.h
index 07d07033e95..073fcbe8e94 100644
--- a/chromium/v8/src/x64/assembler-x64-inl.h
+++ b/chromium/v8/src/x64/assembler-x64-inl.h
@@ -43,6 +43,7 @@ namespace internal {
static const byte kCallOpcode = 0xE8;
+static const int kNoCodeAgeSequenceLength = 6;
void Assembler::emitl(uint32_t x) {
@@ -61,11 +62,8 @@ void Assembler::emitp(void* x, RelocInfo::Mode rmode) {
}
-void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
+void Assembler::emitq(uint64_t x) {
Memory::uint64_at(pc_) = x;
- if (!RelocInfo::IsNone(rmode)) {
- RecordRelocInfo(rmode, x);
- }
pc_ += sizeof(uint64_t);
}
@@ -79,7 +77,8 @@ void Assembler::emitw(uint16_t x) {
void Assembler::emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+ rmode == RelocInfo::CODE_AGE_SEQUENCE);
if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt());
} else {
@@ -304,15 +303,9 @@ Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
}
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(pc_);
-}
-
-
-Address* RelocInfo::target_reference_address() {
+Address RelocInfo::target_reference() {
ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
+ return Memory::Address_at(pc_);
}
@@ -371,6 +364,18 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
}
+void RelocInfo::WipeOut() {
+ if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
+ Memory::Address_at(pc_) = NULL;
+ } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
+ // Effectively write zero into the relocation.
+ Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
bool RelocInfo::IsPatchedReturnSequence() {
// The recognized call sequence is:
// movq(kScratchRegister, address); call(kScratchRegister);
@@ -392,6 +397,13 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return origin->code_target_object_handle_at(pc_ + 1);
+}
+
+
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
diff --git a/chromium/v8/src/x64/assembler-x64.cc b/chromium/v8/src/x64/assembler-x64.cc
index 41bf297b387..bc875d67e83 100644
--- a/chromium/v8/src/x64/assembler-x64.cc
+++ b/chromium/v8/src/x64/assembler-x64.cc
@@ -44,7 +44,7 @@ bool CpuFeatures::initialized_ = false;
#endif
uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
-
+uint64_t CpuFeatures::cross_compile_ = 0;
ExternalReference ExternalReference::cpu_features() {
ASSERT(CpuFeatures::initialized_);
@@ -76,7 +76,7 @@ void CpuFeatures::Probe() {
ASSERT(cpu.has_sse2());
probed_features |= static_cast<uint64_t>(1) << SSE2;
- // CMOD must be available on every x64 CPU.
+ // CMOV must be available on every x64 CPU.
ASSERT(cpu.has_cmov());
probed_features |= static_cast<uint64_t>(1) << CMOV;
@@ -110,8 +110,8 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
- patcher.masm()->movq(r10, target, RelocInfo::NONE64);
- patcher.masm()->call(r10);
+ patcher.masm()->movq(kScratchRegister, target, RelocInfo::NONE64);
+ patcher.masm()->call(kScratchRegister);
// Check that the size of the code generated is as expected.
ASSERT_EQ(Assembler::kCallSequenceLength,
@@ -140,12 +140,12 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
const int
Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
- // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
- 0, 3, 2, 1, 7, 8, 9, 11, 14, 15
+ // rax, rbx, rdx, rcx, rsi, rdi, r8, r9, r11, r14, r15
+ 0, 3, 2, 1, 6, 7, 8, 9, 11, 14, 15
};
const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
- 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, 8, 9
+ 0, 3, 2, 1, -1, -1, 4, 5, 6, 7, -1, 8, -1, -1, 9, 10
};
@@ -1357,98 +1357,94 @@ void Assembler::movb(const Operand& dst, Register src) {
}
-void Assembler::movw(const Operand& dst, Register src) {
+void Assembler::movb(const Operand& dst, Immediate imm) {
EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_operand(src, dst);
+ emit_optional_rex_32(dst);
+ emit(0xC6);
+ emit_operand(0x0, dst);
+ emit(static_cast<byte>(imm.value_));
}
-void Assembler::movl(Register dst, const Operand& src) {
+void Assembler::movw(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
+ emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x8B);
emit_operand(dst, src);
}
-void Assembler::movl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_modrm(src, dst);
- } else {
- emit_optional_rex_32(dst, src);
- emit(0x8B);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::movl(const Operand& dst, Register src) {
+void Assembler::movw(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
+ emit(0x66);
emit_optional_rex_32(src, dst);
emit(0x89);
emit_operand(src, dst);
}
-void Assembler::movl(const Operand& dst, Immediate value) {
+void Assembler::movw(const Operand& dst, Immediate imm) {
EnsureSpace ensure_space(this);
+ emit(0x66);
emit_optional_rex_32(dst);
emit(0xC7);
emit_operand(0x0, dst);
- emit(value);
-}
-
-
-void Assembler::movl(Register dst, Immediate value) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xB8 + dst.low_bits());
- emit(value);
+ emit(static_cast<byte>(imm.value_ & 0xff));
+ emit(static_cast<byte>(imm.value_ >> 8));
}
-void Assembler::movq(Register dst, const Operand& src) {
+void Assembler::emit_mov(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
+ emit_rex(dst, src, size);
emit(0x8B);
emit_operand(dst, src);
}
-void Assembler::movq(Register dst, Register src) {
+void Assembler::emit_mov(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
+ emit_rex(src, dst, size);
emit(0x89);
emit_modrm(src, dst);
} else {
- emit_rex_64(dst, src);
+ emit_rex(dst, src, size);
emit(0x8B);
emit_modrm(dst, src);
}
}
-void Assembler::movq(Register dst, Immediate value) {
+void Assembler::emit_mov(const Operand& dst, Register src, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xC7);
- emit_modrm(0x0, dst);
- emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
+ emit_rex(src, dst, size);
+ emit(0x89);
+ emit_operand(src, dst);
}
-void Assembler::movq(const Operand& dst, Register src) {
+void Assembler::emit_mov(Register dst, Immediate value, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(src, dst);
- emit(0x89);
- emit_operand(src, dst);
+ emit_rex(dst, size);
+ if (size == kInt64Size) {
+ emit(0xC7);
+ emit_modrm(0x0, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ emit(0xB8 + dst.low_bits());
+ }
+ emit(value);
+}
+
+
+void Assembler::emit_mov(const Operand& dst, Immediate value, int size) {
+ EnsureSpace ensure_space(this);
+ emit_rex(dst, size);
+ emit(0xC7);
+ emit_operand(0x0, dst);
+ emit(value);
}
@@ -1456,45 +1452,27 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
// This method must not be used with heap object references. The stored
// address is not GC safe. Use the handle version instead.
ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitp(value, rmode);
+ if (RelocInfo::IsNone(rmode)) {
+ movq(dst, reinterpret_cast<int64_t>(value));
+ } else {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst);
+ emit(0xB8 | dst.low_bits());
+ emitp(value, rmode);
+ }
}
-void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
- // Non-relocatable values might not need a 64-bit representation.
- if (RelocInfo::IsNone(rmode)) {
- if (is_uint32(value)) {
- movl(dst, Immediate(static_cast<int32_t>(value)));
- return;
- } else if (is_int32(value)) {
- movq(dst, Immediate(static_cast<int32_t>(value)));
- return;
- }
- // Value cannot be represented by 32 bits, so do a full 64 bit immediate
- // value.
- }
+void Assembler::movq(Register dst, int64_t value) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
- emitq(value, rmode);
-}
-
-
-void Assembler::movq(Register dst, ExternalReference ref) {
- int64_t value = reinterpret_cast<int64_t>(ref.address());
- movq(dst, value, RelocInfo::EXTERNAL_REFERENCE);
+ emitq(value);
}
-void Assembler::movq(const Operand& dst, Immediate value) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xC7);
- emit_operand(0, dst);
- emit(value);
+void Assembler::movq(Register dst, uint64_t value) {
+ movq(dst, static_cast<int64_t>(value));
}
@@ -1523,21 +1501,13 @@ void Assembler::movl(const Operand& dst, Label* src) {
void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
AllowDeferredHandleDereference using_raw_address;
- // If there is no relocation info, emit the value of the handle efficiently
- // (possibly using less that 8 bytes for the value).
- if (RelocInfo::IsNone(mode)) {
- // There is no possible reason to store a heap pointer without relocation
- // info, so it must be a smi.
- ASSERT(value->IsSmi());
- movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE64);
- } else {
- EnsureSpace ensure_space(this);
- ASSERT(value->IsHeapObject());
- ASSERT(!isolate()->heap()->InNewSpace(*value));
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitp(value.location(), mode);
- }
+ ASSERT(!RelocInfo::IsNone(mode));
+ EnsureSpace ensure_space(this);
+ ASSERT(value->IsHeapObject());
+ ASSERT(!isolate()->heap()->InNewSpace(*value));
+ emit_rex_64(dst);
+ emit(0xB8 | dst.low_bits());
+ emitp(value.location(), mode);
}
@@ -1899,7 +1869,7 @@ void Assembler::shrd(Register dst, Register src) {
}
-void Assembler::xchg(Register dst, Register src) {
+void Assembler::xchgq(Register dst, Register src) {
EnsureSpace ensure_space(this);
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
@@ -1917,6 +1887,24 @@ void Assembler::xchg(Register dst, Register src) {
}
+void Assembler::xchgl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
+ Register other = src.is(rax) ? dst : src;
+ emit_optional_rex_32(other);
+ emit(0x90 | other.low_bits());
+ } else if (dst.low_bits() == 4) {
+ emit_optional_rex_32(dst, src);
+ emit(0x87);
+ emit_modrm(dst, src);
+ } else {
+ emit_optional_rex_32(src, dst);
+ emit(0x87);
+ emit_modrm(src, dst);
+ }
+}
+
+
void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
emit(0x48); // REX.W
@@ -2035,6 +2023,14 @@ void Assembler::testl(const Operand& op, Immediate mask) {
}
+void Assembler::testl(const Operand& op, Register reg) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(reg, op);
+ emit(0x85);
+ emit_operand(reg, op);
+}
+
+
void Assembler::testq(const Operand& op, Register reg) {
EnsureSpace ensure_space(this);
emit_rex_64(reg, op);
@@ -2058,6 +2054,10 @@ void Assembler::testq(Register dst, Register src) {
void Assembler::testq(Register dst, Immediate mask) {
+ if (is_uint8(mask.value_)) {
+ testb(dst, mask);
+ return;
+ }
EnsureSpace ensure_space(this);
if (dst.is(rax)) {
emit_rex_64();
@@ -2448,6 +2448,134 @@ void Assembler::emit_farith(int b1, int b2, int i) {
}
+// SSE operations.
+
+void Assembler::andps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::andps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::orps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::orps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::xorps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
// SSE 2 operations.
void Assembler::movd(XMMRegister dst, Register src) {
@@ -2550,15 +2678,15 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) {
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(IsEnabled(SSE4_1));
ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
- emit_optional_rex_32(dst, src);
+ emit_optional_rex_32(src, dst);
emit(0x0F);
emit(0x3A);
emit(0x17);
- emit_sse_operand(dst, src);
+ emit_sse_operand(src, dst);
emit(imm8);
}
@@ -2610,6 +2738,17 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
}
+void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0xC6);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+
void Assembler::movapd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
@@ -2879,15 +3018,6 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::xorps(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x57);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3000,8 +3130,8 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ASSERT(!RelocInfo::IsNone(rmode));
- // Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ // Don't record external references unless the heap will be serialized.
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
@@ -3010,6 +3140,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!Serializer::enabled() && !emit_debug_code()) {
return;
}
+ } else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) {
+ // Don't record psuedo relocation info for code age sequence mode.
+ return;
}
RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
diff --git a/chromium/v8/src/x64/assembler-x64.h b/chromium/v8/src/x64/assembler-x64.h
index f2e37fe863f..1f1316fa829 100644
--- a/chromium/v8/src/x64/assembler-x64.h
+++ b/chromium/v8/src/x64/assembler-x64.h
@@ -91,11 +91,10 @@ struct Register {
// The non-allocatable registers are:
// rsp - stack pointer
// rbp - frame pointer
- // rsi - context register
// r10 - fixed scratch register
// r12 - smi constant register
// r13 - root register
- static const int kMaxNumAllocatableRegisters = 10;
+ static const int kMaxNumAllocatableRegisters = 11;
static int NumAllocatableRegisters() {
return kMaxNumAllocatableRegisters;
}
@@ -118,6 +117,7 @@ struct Register {
"rbx",
"rdx",
"rcx",
+ "rsi",
"rdi",
"r8",
"r9",
@@ -471,26 +471,45 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
+ if (Check(f, cross_compile_)) return true;
ASSERT(initialized_);
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
if (f == SAHF && !FLAG_enable_sahf) return false;
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, supported_);
}
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- return (found_by_runtime_probing_only_ &
- (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
static bool IsSafeForSnapshot(CpuFeature f) {
- return (IsSupported(f) &&
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ uint64_t mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
+
private:
+ static bool Check(CpuFeature f, uint64_t set) {
+ return (set & flag2set(f)) != 0;
+ }
+
+ static uint64_t flag2set(CpuFeature f) {
+ return static_cast<uint64_t>(1) << f;
+ }
+
// Safe defaults include CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
@@ -503,11 +522,18 @@ class CpuFeatures : public AllStatic {
static uint64_t supported_;
static uint64_t found_by_runtime_probing_only_;
+ static uint64_t cross_compile_;
+
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
+#define ASSEMBLER_INSTRUCTION_LIST(V) \
+ V(mov)
+
+
class Assembler : public AssemblerBase {
private:
// We check before assembling an instruction that there is sufficient
@@ -564,13 +590,6 @@ class Assembler : public AssemblerBase {
set_target_address_at(instruction_payload, target);
}
- // This sets the branch destination (which is a load instruction on x64).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- *reinterpret_cast<Address*>(instruction_payload) = target;
- }
-
inline Handle<Object> code_target_object_handle_at(Address pc);
inline Address runtime_entry_at(Address pc);
// Number of bytes taken up by the branch target in the code.
@@ -643,6 +662,24 @@ class Assembler : public AssemblerBase {
// Some mnemonics, such as "and", are the same as C++ keywords.
// Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
+#define DECLARE_INSTRUCTION(instruction) \
+ template<class P1, class P2> \
+ void instruction##p(P1 p1, P2 p2) { \
+ emit_##instruction(p1, p2, kPointerSize); \
+ } \
+ \
+ template<class P1, class P2> \
+ void instruction##l(P1 p1, P2 p2) { \
+ emit_##instruction(p1, p2, kInt32Size); \
+ } \
+ \
+ template<class P1, class P2> \
+ void instruction##q(P1 p1, P2 p2) { \
+ emit_##instruction(p1, p2, kInt64Size); \
+ }
+ ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
+#undef DECLARE_INSTRUCTION
+
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
// of m, where m must be a power of 2.
@@ -672,38 +709,23 @@ class Assembler : public AssemblerBase {
void movb(Register dst, const Operand& src);
void movb(Register dst, Immediate imm);
void movb(const Operand& dst, Register src);
+ void movb(const Operand& dst, Immediate imm);
// Move the low 16 bits of a 64-bit register value to a 16-bit
// memory location.
+ void movw(Register dst, const Operand& src);
void movw(const Operand& dst, Register src);
+ void movw(const Operand& dst, Immediate imm);
- void movl(Register dst, Register src);
- void movl(Register dst, const Operand& src);
- void movl(const Operand& dst, Register src);
- void movl(const Operand& dst, Immediate imm);
- // Load a 32-bit immediate value, zero-extended to 64 bits.
- void movl(Register dst, Immediate imm32);
-
- // Move 64 bit register value to 64-bit memory location.
- void movq(const Operand& dst, Register src);
- // Move 64 bit memory location to 64-bit register value.
- void movq(Register dst, const Operand& src);
- void movq(Register dst, Register src);
- // Sign extends immediate 32-bit value to 64 bits.
- void movq(Register dst, Immediate x);
// Move the offset of the label location relative to the current
// position (after the move) to the destination.
void movl(const Operand& dst, Label* src);
- // Move sign extended immediate to memory location.
- void movq(const Operand& dst, Immediate value);
- // Instructions to load a 64-bit immediate into a register.
- // All 64-bit immediates must have a relocation mode.
+ // Loads a pointer into a register with a relocation mode.
void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
- void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
- void movq(Register dst, const char* s, RelocInfo::Mode rmode);
- // Moves the address of the external reference into the register.
- void movq(Register dst, ExternalReference ext);
+ // Loads a 64-bit immediate into a register.
+ void movq(Register dst, int64_t value);
+ void movq(Register dst, uint64_t value);
void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
void movsxbq(Register dst, const Operand& src);
@@ -734,7 +756,8 @@ class Assembler : public AssemblerBase {
void cmovl(Condition cc, Register dst, const Operand& src);
// Exchange two registers
- void xchg(Register dst, Register src);
+ void xchgq(Register dst, Register src);
+ void xchgl(Register dst, Register src);
// Arithmetics
void addl(Register dst, Register src) {
@@ -969,6 +992,10 @@ class Assembler : public AssemblerBase {
arithmetic_op(0x09, src, dst);
}
+ void orl(const Operand& dst, Register src) {
+ arithmetic_op_32(0x09, src, dst);
+ }
+
void or_(Register dst, Immediate src) {
immediate_arithmetic_op(0x1, dst, src);
}
@@ -994,6 +1021,10 @@ class Assembler : public AssemblerBase {
shift(dst, imm8, 0x0);
}
+ void roll(Register dst, Immediate imm8) {
+ shift_32(dst, imm8, 0x0);
+ }
+
void rcr(Register dst, Immediate imm8) {
shift(dst, imm8, 0x3);
}
@@ -1101,6 +1132,10 @@ class Assembler : public AssemblerBase {
arithmetic_op_32(0x2B, dst, src);
}
+ void subl(const Operand& dst, Register src) {
+ arithmetic_op_32(0x29, src, dst);
+ }
+
void subl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x5, dst, src);
}
@@ -1119,6 +1154,7 @@ class Assembler : public AssemblerBase {
void testb(const Operand& op, Register reg);
void testl(Register dst, Register src);
void testl(Register reg, Immediate mask);
+ void testl(const Operand& op, Register reg);
void testl(const Operand& op, Immediate mask);
void testq(const Operand& op, Register reg);
void testq(Register dst, Register src);
@@ -1144,6 +1180,10 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op_32(0x6, dst, src);
}
+ void xorl(const Operand& dst, Register src) {
+ arithmetic_op_32(0x31, src, dst);
+ }
+
void xorl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x6, dst, src);
}
@@ -1307,13 +1347,40 @@ class Assembler : public AssemblerBase {
void sahf();
+ // SSE instructions
+ void movaps(XMMRegister dst, XMMRegister src);
+ void movss(XMMRegister dst, const Operand& src);
+ void movss(const Operand& dst, XMMRegister src);
+ void shufps(XMMRegister dst, XMMRegister src, byte imm8);
+
+ void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, XMMRegister src);
+ void cvtlsi2ss(XMMRegister dst, Register src);
+
+ void andps(XMMRegister dst, XMMRegister src);
+ void andps(XMMRegister dst, const Operand& src);
+ void orps(XMMRegister dst, XMMRegister src);
+ void orps(XMMRegister dst, const Operand& src);
+ void xorps(XMMRegister dst, XMMRegister src);
+ void xorps(XMMRegister dst, const Operand& src);
+
+ void addps(XMMRegister dst, XMMRegister src);
+ void addps(XMMRegister dst, const Operand& src);
+ void subps(XMMRegister dst, XMMRegister src);
+ void subps(XMMRegister dst, const Operand& src);
+ void mulps(XMMRegister dst, XMMRegister src);
+ void mulps(XMMRegister dst, const Operand& src);
+ void divps(XMMRegister dst, XMMRegister src);
+ void divps(XMMRegister dst, const Operand& src);
+
+ void movmskps(Register dst, XMMRegister src);
+
// SSE2 instructions
void movd(XMMRegister dst, Register src);
void movd(Register dst, XMMRegister src);
void movq(XMMRegister dst, Register src);
void movq(Register dst, XMMRegister src);
void movq(XMMRegister dst, XMMRegister src);
- void extractps(Register dst, XMMRegister src, byte imm8);
// Don't use this unless it's important to keep the
// top half of the destination register unchanged.
@@ -1331,13 +1398,7 @@ class Assembler : public AssemblerBase {
void movdqu(XMMRegister dst, const Operand& src);
void movapd(XMMRegister dst, XMMRegister src);
- void movaps(XMMRegister dst, XMMRegister src);
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
-
- void cvttss2si(Register dst, const Operand& src);
- void cvttss2si(Register dst, XMMRegister src);
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src);
@@ -1347,7 +1408,6 @@ class Assembler : public AssemblerBase {
void cvtqsi2sd(XMMRegister dst, const Operand& src);
void cvtqsi2sd(XMMRegister dst, Register src);
- void cvtlsi2ss(XMMRegister dst, Register src);
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtss2sd(XMMRegister dst, const Operand& src);
@@ -1366,11 +1426,16 @@ class Assembler : public AssemblerBase {
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
+ void cmpltsd(XMMRegister dst, XMMRegister src);
+
+ void movmskpd(Register dst, XMMRegister src);
+
+ // SSE 4.1 instruction
+ void extractps(Register dst, XMMRegister src, byte imm8);
enum RoundingMode {
kRoundToNearest = 0x0,
@@ -1381,17 +1446,6 @@ class Assembler : public AssemblerBase {
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
- void movmskpd(Register dst, XMMRegister src);
- void movmskps(Register dst, XMMRegister src);
-
- void cmpltsd(XMMRegister dst, XMMRegister src);
-
- // The first argument is the reg field, the second argument is the r/m field.
- void emit_sse_operand(XMMRegister dst, XMMRegister src);
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
- void emit_sse_operand(XMMRegister dst, Register src);
- void emit_sse_operand(Register dst, XMMRegister src);
-
// Debugging
void Print();
@@ -1452,7 +1506,7 @@ class Assembler : public AssemblerBase {
void emit(byte x) { *pc_++ = x; }
inline void emitl(uint32_t x);
inline void emitp(void* x, RelocInfo::Mode rmode);
- inline void emitq(uint64_t x, RelocInfo::Mode rmode);
+ inline void emitq(uint64_t x);
inline void emitw(uint16_t x);
inline void emit_code_target(Handle<Code> target,
RelocInfo::Mode rmode,
@@ -1543,6 +1597,25 @@ class Assembler : public AssemblerBase {
// numbers have a high bit set.
inline void emit_optional_rex_32(const Operand& op);
+ template<class P1>
+ void emit_rex(P1 p1, int size) {
+ if (size == kInt64Size) {
+ emit_rex_64(p1);
+ } else {
+ ASSERT(size == kInt32Size);
+ emit_optional_rex_32(p1);
+ }
+ }
+
+ template<class P1, class P2>
+ void emit_rex(P1 p1, P2 p2, int size) {
+ if (size == kInt64Size) {
+ emit_rex_64(p1, p2);
+ } else {
+ ASSERT(size == kInt32Size);
+ emit_optional_rex_32(p1, p2);
+ }
+ }
// Emit the ModR/M byte, and optionally the SIB byte and
// 1- or 4-byte offset for a memory operand. Also encodes
@@ -1572,6 +1645,12 @@ class Assembler : public AssemblerBase {
// Emit the code-object-relative offset of the label's position
inline void emit_code_relative_offset(Label* label);
+ // The first argument is the reg field, the second argument is the r/m field.
+ void emit_sse_operand(XMMRegister dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister reg, const Operand& adr);
+ void emit_sse_operand(XMMRegister dst, Register src);
+ void emit_sse_operand(Register dst, XMMRegister src);
+
// Emit machine code for one of the operations ADD, ADC, SUB, SBC,
// AND, OR, XOR, or CMP. The encodings of these operations are all
// similar, differing just in the opcode or in the reg field of the
@@ -1622,6 +1701,12 @@ class Assembler : public AssemblerBase {
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ void emit_mov(Register dst, const Operand& src, int size);
+ void emit_mov(Register dst, Register src, int size);
+ void emit_mov(const Operand& dst, Register src, int size);
+ void emit_mov(Register dst, Immediate value, int size);
+ void emit_mov(const Operand& dst, Immediate value, int size);
+
friend class CodePatcher;
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
diff --git a/chromium/v8/src/x64/builtins-x64.cc b/chromium/v8/src/x64/builtins-x64.cc
index 81721c25e1a..f4864f899ef 100644
--- a/chromium/v8/src/x64/builtins-x64.cc
+++ b/chromium/v8/src/x64/builtins-x64.cc
@@ -158,7 +158,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ movq(kScratchRegister, debug_step_in_fp);
+ __ Move(kScratchRegister, debug_step_in_fp);
__ cmpq(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
#endif
@@ -600,6 +600,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// the stub returns.
__ subq(Operand(rsp, 0), Immediate(5));
__ Pushad();
+ __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
@@ -625,7 +626,44 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+ __ Pushad();
+ __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
+ __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+ __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(1);
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 1);
+ }
+ __ Popad();
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ PopReturnAddressTo(kScratchRegister);
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ __ push(rdi); // Callee's JS Function.
+ __ PushReturnAddressFrom(kScratchRegister);
+
+ // Jump to point after the code-age stub.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+ SaveFPRegsMode save_doubles) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -634,7 +672,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ Popad();
// Tear down internal frame.
}
@@ -644,6 +682,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
}
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+ Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
@@ -658,17 +706,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
}
// Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(r10, Operand(rsp, kPCOnStackSize));
+ __ SmiToInteger32(kScratchRegister, Operand(rsp, kPCOnStackSize));
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movq(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
- __ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
+ __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -692,21 +740,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ Pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ Popad();
- __ ret(0);
-}
-
-
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Stack Layout:
// rsp[0] : Return address
@@ -894,9 +927,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// rbp[16] : function arguments
// rbp[24] : receiver
// rbp[32] : function
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
+ static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
+ static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
+ static const int kFunctionOffset = kReceiverOffset + kPointerSize;
__ push(Operand(rbp, kFunctionOffset));
__ push(Operand(rbp, kArgumentsOffset));
@@ -1140,13 +1173,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- rax, // Input.
- rbx, // Result.
- rcx, // Scratch 1.
- rdx, // Scratch 2.
- &not_cached);
+ __ LookupNumberStringCache(rax, // Input.
+ rbx, // Result.
+ rcx, // Scratch 1.
+ rdx, // Scratch 2.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
@@ -1401,6 +1432,23 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ ret(0);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/x64/code-stubs-x64.cc b/chromium/v8/src/x64/code-stubs-x64.cc
index 51e1a5395cf..0c9a0f20cdd 100644
--- a/chromium/v8/src/x64/code-stubs-x64.cc
+++ b/chromium/v8/src/x64/code-stubs-x64.cc
@@ -60,6 +60,17 @@ void ToNumberStub::InitializeInterfaceDescriptor(
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -67,7 +78,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
}
@@ -78,7 +89,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -103,6 +114,17 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -123,6 +145,19 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void KeyedArrayCallStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rcx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -145,6 +180,18 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor(
}
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
static void InitializeArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
@@ -153,14 +200,21 @@ static void InitializeArrayConstructorDescriptor(
// rax -- number of arguments
// rdi -- function
// rbx -- type info cell with elements kind
- static Register registers[] = { rdi, rbx };
- descriptor->register_param_count_ = 2;
- if (constant_stack_parameter_count != 0) {
+ static Register registers_variable_args[] = { rdi, rbx, rax };
+ static Register registers_no_args[] = { rdi, rbx };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &rax;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = rax;
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
@@ -174,15 +228,21 @@ static void InitializeInternalArrayConstructorDescriptor(
// register state
// rax -- number of arguments
// rdi -- constructor function
- static Register registers[] = { rdi };
- descriptor->register_param_count_ = 1;
+ static Register registers_variable_args[] = { rdi, rax };
+ static Register registers_no_args[] = { rdi };
- if (constant_stack_parameter_count != 0) {
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &rax;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = rax;
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
@@ -279,6 +339,17 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
+void NewStringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -437,35 +508,8 @@ class FloatingPointHelper : public AllStatic {
// If the operands are not both numbers, jump to not_numbers.
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
// NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2SmiOperands(MacroAssembler* masm);
static void LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure,
- Register heap_number_map);
-
- // Tries to convert two values to smis losslessly.
- // This fails if either argument is not a Smi nor a HeapNumber,
- // or if it's a HeapNumber with a value that can't be converted
- // losslessly to a Smi. In that case, control transitions to the
- // on_not_smis label.
- // On success, either control goes to the on_success label (if one is
- // provided), or it falls through at the end of the code (if on_success
- // is NULL).
- // On success, both first and second holds Smi tagged values.
- // One of first or second must be non-Smi when entering.
- static void NumbersToSmis(MacroAssembler* masm,
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* on_success,
- Label* on_not_smis,
- ConvertUndefined convert_undefined);
};
@@ -553,569 +597,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-void BinaryOpStub::Initialize() {}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ PopReturnAddressTo(rcx);
- __ push(rdx);
- __ push(rax);
- // Left and right arguments are now on top.
- __ Push(Smi::FromInt(MinorKey()));
-
- __ PushReturnAddressFrom(rcx);
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-static void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
-
- // Arguments to BinaryOpStub are in rdx and rax.
- const Register left = rdx;
- const Register right = rax;
-
- // We only generate heapnumber answers for overflowing calculations
- // for the four basic arithmetic operations and logical right shift by 0.
- bool generate_inline_heapnumber_results =
- (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
- (op == Token::ADD || op == Token::SUB ||
- op == Token::MUL || op == Token::DIV || op == Token::SHR);
-
- // Smi check of both operands. If op is BIT_OR, the check is delayed
- // until after the OR operation.
- Label not_smis;
- Label use_fp_on_smis;
- Label fail;
-
- if (op != Token::BIT_OR) {
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, &not_smis);
- }
-
- Label smi_values;
- __ bind(&smi_values);
- // Perform the operation.
- Comment perform_smi(masm, "-- Perform smi operation");
- switch (op) {
- case Token::ADD:
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
-
- case Token::SUB:
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
-
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
- break;
-
- case Token::DIV:
- // SmiDiv will not accept left in rdx or right in rax.
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
- break;
-
- case Token::MOD:
- // SmiMod will not accept left in rdx or right in rax.
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
- break;
-
- case Token::BIT_OR: {
- ASSERT(right.is(rax));
- __ SmiOrIfSmis(right, right, left, &not_smis); // BIT_OR is commutative.
- break;
- }
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
-
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in rax. Some operations have registers pushed.
- __ ret(0);
-
- if (use_fp_on_smis.is_linked()) {
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- __ bind(&use_fp_on_smis);
- if (op == Token::DIV || op == Token::MOD) {
- // Restore left and right to rdx and rax.
- __ movq(rdx, rcx);
- __ movq(rax, rbx);
- }
-
- if (generate_inline_heapnumber_results) {
- __ AllocateHeapNumber(rcx, rbx, slow);
- Comment perform_float(masm, "-- Perform float operation on smis");
- if (op == Token::SHR) {
- __ SmiToInteger32(left, left);
- __ cvtqsi2sd(xmm0, left);
- } else {
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- }
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ jmp(&fail);
- }
- }
-
- // 7. Non-smi operands reach the end of the code generated by
- // GenerateSmiCode, and fall through to subsequent code,
- // with the operands in rdx and rax.
- // But first we check if non-smi values are HeapNumbers holding
- // values that could be smi.
- __ bind(&not_smis);
- Comment done_comment(masm, "-- Enter non-smi code");
- FloatingPointHelper::ConvertUndefined convert_undefined =
- FloatingPointHelper::BAILOUT_ON_UNDEFINED;
- // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
- if (op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR ||
- op == Token::SAR ||
- op == Token::SHL ||
- op == Token::SHR) {
- convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
- }
- FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
- &smi_values, &fail, convert_undefined);
- __ jmp(&smi_values);
- __ bind(&fail);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure,
- Token::Value op,
- OverwriteMode mode) {
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
-
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, allocation_failure, mode);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- break;
- }
- case Token::MOD: {
- // For MOD we jump to the allocation_failure label, to call runtime.
- __ jmp(allocation_failure);
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
- heap_number_map);
- switch (op) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- __ Ret();
-
- // Logical shift right can produce an unsigned int32 that is not
- // an int32, and so is not in the smi range. Allocate a heap number
- // in that case.
- if (op == Token::SHR) {
- __ bind(&non_smi_shr_result);
- Label allocation_failed;
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed,
- TAG_OBJECT);
- // Set the map.
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ Ret();
-
- __ bind(&allocation_failed);
- // We need tagged values in rdx and rax for the following code,
- // not int32 in rax and rcx.
- __ Integer32ToSmi(rax, rcx);
- __ Integer32ToSmi(rdx, rbx);
- __ jmp(allocation_failure);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
- // No fall-through from this generated code.
- if (FLAG_debug_code) {
- __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode);
- }
-}
-
-
-static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn(
- MacroAssembler* masm) {
- // Push arguments, but ensure they are under the return address
- // for a tail call.
- __ PopReturnAddressTo(rcx);
- __ push(rdx);
- __ push(rax);
- __ PushReturnAddressFrom(rcx);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &left_not_string, Label::kNear);
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ Cmp(rax, Smi::FromInt(fixed_right_arg_value()));
- __ j(not_equal, &right_arg_changed);
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
-
- if (call_runtime.is_linked()) {
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
- }
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- // The int32 case is identical to the Smi case. We avoid creating this
- // ic state on x64.
- UNREACHABLE();
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(rdx, rdx);
- } else {
- __ LoadRoot(rdx, Heap::kNanValueRootIndex);
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(rax, rax);
- } else {
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
- Register input,
- Label* fail) {
- Label ok;
- __ JumpIfSmi(input, &ok, Label::kNear);
- Register heap_number_map = r8;
- Register scratch1 = r9;
- Register scratch2 = r10;
- // HeapNumbers containing 32bit integer values are also allowed.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, fail);
- __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
- // Convert, convert back, and compare the two doubles' bits.
- __ cvttsd2siq(scratch2, xmm0);
- __ cvtlsi2sd(xmm1, scratch2);
- __ movq(scratch1, xmm0);
- __ movq(scratch2, xmm1);
- __ cmpq(scratch1, scratch2);
- __ j(not_equal, fail);
- __ bind(&ok);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label gc_required, not_number;
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
- }
-
- BinaryOpStub_GenerateFloatingPointCode(
- masm, &gc_required, &not_number, op_, mode_);
-
- __ bind(&not_number);
- GenerateTypeTransition(masm);
-
- __ bind(&gc_required);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
-
- BinaryOpStub_GenerateFloatingPointCode(
- masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
- Label skip_allocation;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in rdx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rdx, &skip_allocation);
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rdx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- // Use object in rdx as a result holder
- __ movq(rax, rdx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(rdx);
- __ push(rax);
-}
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
@@ -1145,7 +626,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Then load the bits of the double into rbx.
__ SmiToInteger32(rax, rax);
__ subq(rsp, Immediate(kDoubleSize));
- __ cvtlsi2sd(xmm1, rax);
+ __ Cvtlsi2sd(xmm1, rax);
__ movsd(Operand(rsp, 0), xmm1);
__ movq(rbx, xmm1);
__ movq(rdx, xmm1);
@@ -1161,7 +642,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Input is a HeapNumber. Push it on the FPU stack and load its
// bits into rbx.
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rdx, rbx);
__ bind(&loaded);
@@ -1198,7 +679,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// rcx = TranscendentalCache::hash(double value).
ExternalReference cache_array =
ExternalReference::transcendental_cache_array_address(masm->isolate());
- __ movq(rax, cache_array);
+ __ Move(rax, cache_array);
int cache_array_index =
type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
__ movq(rax, Operand(rax, cache_array_index));
@@ -1422,67 +903,6 @@ void TranscendentalCacheStub::GenerateOperation(
}
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-// Jump to conversion_failure: rdx and rax are unchanged.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure,
- Register heap_number_map) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(r8, rdx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ Set(r8, 0);
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the rdx heap number in r8.
- __ TruncateHeapNumberToI(r8, rdx);
-
- // Here r8 has the untagged integer, rax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rcx, rax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ Set(rcx, 0);
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the rax heap number in rcx.
- __ TruncateHeapNumberToI(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, r8);
-}
-
-
-void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-}
-
-
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
@@ -1503,89 +923,12 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
__ bind(&load_smi_rdx);
__ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ Cvtlsi2sd(xmm0, kScratchRegister);
__ JumpIfNotSmi(rax, &load_nonsmi_rax);
__ bind(&load_smi_rax);
__ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* on_success,
- Label* on_not_smis,
- ConvertUndefined convert_undefined) {
- Register heap_number_map = scratch3;
- Register smi_result = scratch1;
- Label done, maybe_undefined_first, maybe_undefined_second, first_done;
-
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label first_smi;
- __ JumpIfSmi(first, &first_smi, Label::kNear);
- __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal,
- (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
- ? &maybe_undefined_first
- : on_not_smis);
- // Convert HeapNumber to smi if possible.
- __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
- __ movq(scratch2, xmm0);
- __ cvttsd2siq(smi_result, xmm0);
- // Check if conversion was successful by converting back and
- // comparing to the original double's bits.
- __ cvtlsi2sd(xmm1, smi_result);
- __ movq(kScratchRegister, xmm1);
- __ cmpq(scratch2, kScratchRegister);
- __ j(not_equal, on_not_smis);
- __ Integer32ToSmi(first, smi_result);
-
- __ bind(&first_done);
- __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
- __ bind(&first_smi);
- __ AssertNotSmi(second);
- __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal,
- (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
- ? &maybe_undefined_second
- : on_not_smis);
- // Convert second to smi, if possible.
- __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
- __ movq(scratch2, xmm0);
- __ cvttsd2siq(smi_result, xmm0);
- __ cvtlsi2sd(xmm1, smi_result);
- __ movq(kScratchRegister, xmm1);
- __ cmpq(scratch2, kScratchRegister);
- __ j(not_equal, on_not_smis);
- __ Integer32ToSmi(second, smi_result);
- if (on_success != NULL) {
- __ jmp(on_success);
- } else {
- __ jmp(&done);
- }
-
- __ bind(&maybe_undefined_first);
- __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, on_not_smis);
- __ xor_(first, first);
- __ jmp(&first_done);
-
- __ bind(&maybe_undefined_second);
- __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, on_not_smis);
- __ xor_(second, second);
- if (on_success != NULL) {
- __ jmp(on_success);
- }
- // Else: fall through.
-
+ __ Cvtlsi2sd(xmm1, kScratchRegister);
__ bind(&done);
}
@@ -1603,7 +946,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Save 1 in double_result - we need this several times later on.
__ movq(scratch, Immediate(1));
- __ cvtlsi2sd(double_result, scratch);
+ __ Cvtlsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
@@ -1623,7 +966,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&base_is_smi);
__ SmiToInteger32(base, base);
- __ cvtlsi2sd(double_base, base);
+ __ Cvtlsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -1664,7 +1007,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label continue_sqrt, continue_rsqrt, not_plus_half;
// Test for 0.5.
// Load double_scratch with 0.5.
- __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64);
+ __ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
__ movq(double_scratch, scratch);
// Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent);
@@ -1674,7 +1017,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
@@ -1706,7 +1049,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
@@ -1812,7 +1155,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// and may not have contained the exponent value in the first place when the
// input was a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
- __ cvtlsi2sd(double_exponent, exponent);
+ __ Cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
@@ -1841,8 +1184,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
// Return value is in xmm0.
__ movsd(double_result, xmm0);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1);
@@ -1902,8 +1243,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
receiver = rax;
}
- StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -1977,11 +1317,6 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
// Check that the key is a smi.
Label slow;
__ JumpIfNotSmi(rdx, &slow);
@@ -2003,10 +1338,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ SmiSub(rax, rax, rdx);
+ __ SmiToInteger32(rax, rax);
+ StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0));
__ Ret();
// Arguments adaptor case: Check index against actual arguments
@@ -2018,10 +1353,11 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
- index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
- __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ SmiSub(rcx, rcx, rdx);
+ __ SmiToInteger32(rcx, rcx);
+ StackArgumentsAccessor adaptor_args(rbx, rcx,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, adaptor_args.GetArgumentOperand(0));
__ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments
@@ -2395,11 +1731,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rsp[24] : subject string
// rsp[32] : JSRegExp object
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
+ enum RegExpExecStubArgumentIndices {
+ JS_REG_EXP_OBJECT_ARGUMENT_INDEX,
+ SUBJECT_STRING_ARGUMENT_INDEX,
+ PREVIOUS_INDEX_ARGUMENT_INDEX,
+ LAST_MATCH_INFO_ARGUMENT_INDEX,
+ REG_EXP_EXEC_ARGUMENT_COUNT
+ };
+ StackArgumentsAccessor args(rsp, REG_EXP_EXEC_ARGUMENT_COUNT,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
Label runtime;
// Ensure that a RegExp stack is allocated.
Isolate* isolate = masm->isolate();
@@ -2412,7 +1753,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
__ JumpIfSmi(rax, &runtime);
__ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
@@ -2445,7 +1786,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Reset offset for possibly sliced string.
__ Set(r14, 0);
- __ movq(rdi, Operand(rsp, kSubjectOffset));
+ __ movq(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
__ JumpIfSmi(rdi, &runtime);
__ movq(r15, rdi); // Make a copy of the original subject string.
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
@@ -2547,7 +1888,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// We have to use r15 instead of rdi to load the length because rdi might
// have been only made to look like a sequential string when it actually
// is an external string.
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
+ __ movq(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
__ JumpIfNotSmi(rbx, &runtime);
__ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
__ j(above_equal, &runtime);
@@ -2578,9 +1919,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Immediate(1));
// Argument 7: Start (high end) of backtracking stack memory area.
- __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
+ __ Move(kScratchRegister, address_of_regexp_stack_memory_address);
__ movq(r9, Operand(kScratchRegister, 0));
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ Move(kScratchRegister, address_of_regexp_stack_memory_size);
__ addq(r9, Operand(kScratchRegister, 0));
__ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
@@ -2649,7 +1990,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(r11);
- __ LeaveApiExitFrame();
+ __ LeaveApiExitFrame(true);
// Check the result.
Label success;
@@ -2667,11 +2008,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For failure return null.
__ LoadRoot(rax, Heap::kNullValueRootIndex);
- __ ret(4 * kPointerSize);
+ __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
// Load RegExp data.
__ bind(&success);
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
__ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
__ SmiToInteger32(rax,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
@@ -2680,7 +2021,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rdx: Number of capture registers
// Check that the fourth object is a JSArray object.
- __ movq(r15, Operand(rsp, kLastMatchInfoOffset));
+ __ movq(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
__ JumpIfSmi(r15, &runtime);
__ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
@@ -2704,7 +2045,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
kScratchRegister);
// Store last subject and last input.
- __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
__ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
__ movq(rcx, rax);
__ RecordWriteField(rbx,
@@ -2747,7 +2088,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Return last match info.
__ movq(rax, r15);
- __ ret(4 * kPointerSize);
+ __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
__ bind(&exception);
// Result must now be exception. If there is no pending exception already a
@@ -2910,112 +2251,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ SmiToInteger32(
- mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1));
- __ subq(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- Factory* factory = masm->isolate()->factory();
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- factory->heap_number_map(),
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- Register probe = mask;
- __ movq(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
-
- __ bind(&is_smi);
- __ SmiToInteger32(scratch, object);
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- // Check if the entry is the smi we are looking for.
- __ cmpq(object,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ movq(result,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask) {
- __ and_(hash, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- __ shl(hash, Immediate(kPointerSizeLog2 + 1));
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rbx, args.GetArgumentOperand(0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -3033,7 +2268,7 @@ static void CheckInputType(MacroAssembler* masm,
__ JumpIfNotSmi(input, fail);
} else if (expected == CompareIC::NUMBER) {
__ JumpIfSmi(input, &ok);
- __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ CompareMap(input, masm->isolate()->factory()->heap_number_map());
__ j(not_equal, fail);
}
// We could be strict about internalized/non-internalized here, but as long as
@@ -3322,6 +2557,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // rax : number of arguments to the construct function
// rbx : cache cell for call target
// rdi : the function to call
Isolate* isolate = masm->isolate();
@@ -3341,9 +2577,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in rcx.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ Cmp(FieldOperand(rcx, 0), allocation_site_map);
__ j(not_equal, &miss);
@@ -3379,6 +2614,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments register must be smi-tagged to call out.
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ push(rdi);
@@ -3545,23 +2781,14 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated(Isolate* isolate) {
-#ifdef _WIN64
- return result_size_ == 1;
-#else
- return true;
-#endif
-}
-
-
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
}
@@ -3571,9 +2798,9 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
CEntryStub save_doubles(1, kSaveFPRegs);
- save_doubles.GetCode(isolate)->set_is_pregenerated(true);
+ save_doubles.GetCode(isolate);
}
@@ -3619,8 +2846,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
// stack is known to be aligned. This function takes one argument which is
// passed in register.
+ __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movq(arg_reg_1, rax);
- __ movq(kScratchRegister,
+ __ Move(kScratchRegister,
ExternalReference::perform_gc_function(masm->isolate()));
__ call(kScratchRegister);
}
@@ -3642,7 +2870,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Return result in single register (rax).
__ movq(rcx, r14); // argc.
__ movq(rdx, r15); // argv.
- __ movq(r8, ExternalReference::isolate_address(masm->isolate()));
+ __ Move(r8, ExternalReference::isolate_address(masm->isolate()));
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
@@ -3650,14 +2878,14 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Pass a pointer to the Arguments object as the second argument.
__ movq(rdx, r14); // argc.
__ movq(r8, r15); // argv.
- __ movq(r9, ExternalReference::isolate_address(masm->isolate()));
+ __ Move(r9, ExternalReference::isolate_address(masm->isolate()));
}
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
__ movq(rsi, r15); // argv.
- __ movq(rdx, ExternalReference::isolate_address(masm->isolate()));
+ __ Move(rdx, ExternalReference::isolate_address(masm->isolate()));
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
@@ -3838,9 +3066,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Scratch register is neither callee-save, nor an argument register on any
// platform. It's free to use at this point.
// Cannot use smi-register for loading yet.
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE64);
+ __ movq(kScratchRegister, Smi::FromInt(marker), RelocInfo::NONE64);
__ push(kScratchRegister); // context slot
__ push(kScratchRegister); // function slot
// Save callee-saved registers (X64/Win64 calling conventions).
@@ -3947,7 +3173,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ pop(rbx);
__ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
- __ movq(kScratchRegister, js_entry_sp);
+ __ Move(kScratchRegister, js_entry_sp);
__ movq(Operand(kScratchRegister, 0), Immediate(0));
__ bind(&not_outermost_js_2);
@@ -4016,7 +3242,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// __ j(not_equal, &cache_miss);
// __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
// before the offset of the hole value in the root array.
- static const unsigned int kWordBeforeResultValue = 0x458B4909;
+ static const unsigned int kWordBeforeResultValue = 0x458B4906;
// Only the inline check flag is supported on X64.
ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
@@ -4583,34 +3809,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
-
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm, rcx);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ CallRuntime(Runtime::kStringAdd, 2);
- }
- __ Ret();
- } else {
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
- }
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm, rcx);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
- }
- __ Ret();
- } else {
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
@@ -4646,12 +3849,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
// Check the number to string cache.
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
__ movq(arg, scratch1);
__ movq(Operand(rsp, stack_offset), arg);
__ bind(&done);
@@ -4935,13 +4133,18 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rsp[16] : from
// rsp[24] : string
- const int kToOffset = 1 * kPointerSize;
- const int kFromOffset = kToOffset + kPointerSize;
- const int kStringOffset = kFromOffset + kPointerSize;
- const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
+ enum SubStringStubArgumentIndices {
+ STRING_ARGUMENT_INDEX,
+ FROM_ARGUMENT_INDEX,
+ TO_ARGUMENT_INDEX,
+ SUB_STRING_ARGUMENT_COUNT
+ };
+
+ StackArgumentsAccessor args(rsp, SUB_STRING_ARGUMENT_COUNT,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
// Make sure first argument is a string.
- __ movq(rax, Operand(rsp, kStringOffset));
+ __ movq(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
STATIC_ASSERT(kSmiTag == 0);
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
@@ -4951,8 +4154,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rax: string
// rbx: instance type
// Calculate length of sub string using the smi values.
- __ movq(rcx, Operand(rsp, kToOffset));
- __ movq(rdx, Operand(rsp, kFromOffset));
+ __ movq(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
+ __ movq(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
__ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
@@ -4965,7 +4168,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Return original string.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
+ __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
__ bind(&not_original_string);
Label single_char;
@@ -5048,7 +4251,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
__ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
__ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
+ __ ret(3 * kPointerSize);
__ bind(&copy_routine);
}
@@ -5102,7 +4305,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
__ movq(rsi, r14); // Restore rsi.
__ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
+ __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
__ bind(&two_byte_sequential);
// Allocate the result.
@@ -5127,7 +4330,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
__ movq(rsi, r14); // Restore esi.
__ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
+ __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
// Just jump to runtime to create the sub string.
__ bind(&runtime);
@@ -5141,7 +4344,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
StringCharAtGenerator generator(
rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm);
- __ ret(kArgumentsSize);
+ __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
generator.SkipSlow(masm, &runtime);
}
@@ -5370,23 +4573,23 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
// Load left and right operand.
Label done, left, left_smi, right_smi;
__ JumpIfSmi(rax, &right_smi, Label::kNear);
- __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ CompareMap(rax, masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
- __ cvtlsi2sd(xmm1, rcx);
+ __ Cvtlsi2sd(xmm1, rcx);
__ bind(&left);
__ JumpIfSmi(rdx, &left_smi, Label::kNear);
- __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&left_smi);
__ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
- __ cvtlsi2sd(xmm0, rcx);
+ __ Cvtlsi2sd(xmm0, rcx);
__ bind(&done);
// Compare operands
@@ -5872,91 +5075,12 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField and
- // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
- // GenerateStoreField calls the stub with two different permutations of
- // registers. This is the second.
- { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
- // StoreIC::GenerateNormal via GenerateDictionaryStore.
- { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
- // KeyedStoreIC::GenerateGeneric.
- { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
- { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
- { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
- // StoreArrayLiteralElementStub::Generate
- { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub::Generate and
- // StringAddStub::Generate
- { REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
- // StringAddStub::Generate
- { REG(rcx), REG(rax), REG(rbx), EMIT_REMEMBERED_SET},
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
+ stub2.GetCode(isolate);
}
@@ -6263,12 +5387,26 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ movq(rdi, rax);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ movq(rax, MemOperand(rbp, parameter_count_offset));
+ // The parameter count above includes the receiver for the arguments passed to
+ // the deoptimization handler. Subtract the receiver for the parameter count
+ // for the call.
+ __ subl(rax, Immediate(1));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ ParameterCount argument_count(rax);
+ __ InvokeFunction(
+ rdi, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- // It's always safe to call the entry hook stub, as the hook itself
- // is not allowed to call back to V8.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
ProfileEntryHookStub stub;
masm->CallStub(&stub);
}
@@ -6392,17 +5530,18 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ incl(rdx);
__ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
if (FLAG_debug_code) {
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ Cmp(FieldOperand(rcx, 0), allocation_site_map);
__ Assert(equal, kExpectedAllocationSiteInCell);
}
- // Save the resulting elements kind in type info
- __ Integer32ToSmi(rdx, rdx);
- __ movq(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset), rdx);
- __ SmiToInteger32(rdx, rdx);
+ // Save the resulting elements kind in type info. We can't just store r3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ SmiAddConstant(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset),
+ Smi::FromInt(kFastElementsKindPackedToHoley));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -6435,12 +5574,12 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
(!FLAG_track_allocation_sites &&
(kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
}
}
}
@@ -6462,11 +5601,11 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ stubh1.GetCode(isolate);
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ stubh2.GetCode(isolate);
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ stubh3.GetCode(isolate);
}
}
@@ -6541,11 +5680,14 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ j(equal, &no_info);
__ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
__ Cmp(FieldOperand(rdx, 0),
- Handle<Map>(masm->isolate()->heap()->allocation_site_map()));
+ masm->isolate()->factory()->allocation_site_map());
__ j(not_equal, &no_info);
+ // Only look at the lower 16 bits of the transition info.
__ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ and_(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
diff --git a/chromium/v8/src/x64/code-stubs-x64.h b/chromium/v8/src/x64/code-stubs-x64.h
index 41678ecd20e..7a3f6a68691 100644
--- a/chromium/v8/src/x64/code-stubs-x64.h
+++ b/chromium/v8/src/x64/code-stubs-x64.h
@@ -69,7 +69,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -208,34 +207,6 @@ class StringCompareStub: public PlatformCodeStub {
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found);
-
- private:
- static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask);
-
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
@@ -321,8 +292,6 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
diff --git a/chromium/v8/src/x64/codegen-x64.cc b/chromium/v8/src/x64/codegen-x64.cc
index 24773c2595d..afe0e3b7f52 100644
--- a/chromium/v8/src/x64/codegen-x64.cc
+++ b/chromium/v8/src/x64/codegen-x64.cc
@@ -213,7 +213,7 @@ ModuloFunction CreateModuloFunction() {
__ j(zero, &valid_result);
__ fstp(0); // Drop result in st(0).
int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
- __ movq(rcx, kNaNValue, RelocInfo::NONE64);
+ __ movq(rcx, kNaNValue);
__ movq(Operand(rsp, kPointerSize), rcx);
__ movsd(xmm0, Operand(rsp, kPointerSize));
__ jmp(&return_result);
@@ -263,8 +263,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
- __ TestJSArrayForAllocationMemento(rdx, rdi);
- __ j(equal, allocation_memento_found);
+ __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, allocation_memento_found);
}
// Set transitioned map.
@@ -292,8 +291,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label allocated, new_backing_store, only_change_map, done;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(rdx, rdi);
- __ j(equal, fail);
+ __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -340,7 +338,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
Label loop, entry, convert_hole;
- __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
+ __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64));
// r15: the-hole NaN
__ jmp(&entry);
@@ -386,7 +384,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// rbx: current element (smi-tagged)
__ JumpIfNotSmi(rbx, &convert_hole);
__ SmiToInteger32(rbx, rbx);
- __ cvtlsi2sd(xmm0, rbx);
+ __ Cvtlsi2sd(xmm0, rbx);
__ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
xmm0);
__ jmp(&entry);
@@ -418,8 +416,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label loop, entry, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(rdx, rdi);
- __ j(equal, fail);
+ __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -443,7 +440,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
// Prepare for conversion loop.
- __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
+ __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
__ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
// rsi: the-hole NaN
// rdi: pointer to the-hole
@@ -469,7 +466,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(rax, r15, &gc_required);
// rax: new heap number
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
+ __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), r14);
__ movq(FieldOperand(r11,
r9,
times_pointer_size,
@@ -638,7 +635,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
Label done;
- __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
+ __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
__ xorpd(result, result);
__ ucomisd(double_scratch, input);
@@ -657,10 +654,10 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ and_(temp2, Immediate(0x7ff));
__ shr(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
- __ movq(kScratchRegister, ExternalReference::math_exp_log_table());
+ __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
__ shl(temp1, Immediate(52));
__ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
- __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
+ __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ subsd(double_scratch, input);
__ movsd(input, double_scratch);
__ subsd(result, double_scratch);
@@ -678,8 +675,6 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-static const int kNoCodeAgeSequenceLength = 6;
-
static byte* GetNoCodeAgeSequence(uint32_t* length) {
static bool initialized = false;
static byte sequence[kNoCodeAgeSequenceLength];
@@ -711,7 +706,7 @@ bool Code::IsYoungSequence(byte* sequence) {
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
- *age = kNoAge;
+ *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
sequence++; // Skip the kCallOpcode byte
@@ -723,30 +718,27 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
+ if (age == kNoAgeCodeAge) {
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
patcher.masm()->call(stub->instruction_start());
- for (int i = 0;
- i < kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength;
- i++) {
- patcher.masm()->nop();
- }
+ patcher.masm()->Nop(
+ kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
}
}
Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
ASSERT(index >= 0);
- ASSERT(base_reg_.is(rsp) || base_reg_.is(rbp));
int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
int displacement_to_last_argument = base_reg_.is(rsp) ?
kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
diff --git a/chromium/v8/src/x64/codegen-x64.h b/chromium/v8/src/x64/codegen-x64.h
index 7d1f59ad5ff..811ac507d53 100644
--- a/chromium/v8/src/x64/codegen-x64.h
+++ b/chromium/v8/src/x64/codegen-x64.h
@@ -156,7 +156,7 @@ class StackArgumentsAccessor BASE_EMBEDDED {
Operand GetArgumentOperand(int index);
Operand GetReceiverOperand() {
ASSERT(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER);
- return GetArgumentOperand(0);;
+ return GetArgumentOperand(0);
}
private:
diff --git a/chromium/v8/src/x64/debug-x64.cc b/chromium/v8/src/x64/debug-x64.cc
index 6612242a037..5ddf69a414e 100644
--- a/chromium/v8/src/x64/debug-x64.cc
+++ b/chromium/v8/src/x64/debug-x64.cc
@@ -132,7 +132,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ Set(rax, 0); // No arguments (argc == 0).
- __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
+ __ Move(rbx, ExternalReference::debug_break(masm->isolate()));
CEntryStub ceb(1);
__ CallStub(&ceb);
@@ -172,7 +172,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
- __ movq(kScratchRegister, after_break_target);
+ __ Move(kScratchRegister, after_break_target);
__ jmp(Operand(kScratchRegister, 0));
}
@@ -319,7 +319,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
masm->isolate());
- __ movq(rax, restarter_frame_function_slot);
+ __ Move(rax, restarter_frame_function_slot);
__ movq(Operand(rax, 0), Immediate(0));
// We do not know our frame height, but set rsp based on rbp.
diff --git a/chromium/v8/src/x64/deoptimizer-x64.cc b/chromium/v8/src/x64/deoptimizer-x64.cc
index 303b756cacd..ae180ec59b4 100644
--- a/chromium/v8/src/x64/deoptimizer-x64.cc
+++ b/chromium/v8/src/x64/deoptimizer-x64.cc
@@ -82,87 +82,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x1d;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-// The back edge bookkeeping code matches the pattern:
-//
-// add <profiling_counter>, <-delta>
-// jns ok
-// call <stack guard>
-// ok:
-//
-// We will patch away the branch so the code is:
-//
-// add <profiling_counter>, <-delta> ;; Not changed
-// nop
-// nop
-// call <on-stack replacment>
-// ok:
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- // Turn the jump into nops.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- // Replace the call address.
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- // Restore the original jump.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- // Restore the original call address.
- Assembler::set_target_address_at(call_target_address,
- interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT_EQ(osr_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT_EQ(interrupt_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -187,10 +106,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
intptr_t handler =
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
+ int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(rax.code(), params);
output_frame->SetRegister(rbx.code(), handler);
}
@@ -210,6 +126,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
+Code* Deoptimizer::NotifyStubFailureBuiltin() {
+ return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
diff --git a/chromium/v8/src/x64/disasm-x64.cc b/chromium/v8/src/x64/disasm-x64.cc
index 9984a46307d..76b541c0100 100644
--- a/chromium/v8/src/x64/disasm-x64.cc
+++ b/chromium/v8/src/x64/disasm-x64.cc
@@ -93,7 +93,7 @@ static const ByteMnemonic two_operands_instr[] = {
{ 0x39, OPER_REG_OP_ORDER, "cmp" },
{ 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
{ 0x3B, REG_OPER_OP_ORDER, "cmp" },
- { 0x63, REG_OPER_OP_ORDER, "movsxlq" },
+ { 0x63, REG_OPER_OP_ORDER, "movsxl" },
{ 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
{ 0x85, REG_OPER_OP_ORDER, "test" },
{ 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
@@ -1036,14 +1036,14 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("extractps "); // reg/m32, xmm, imm8
current += PrintRightOperand(current);
- AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
} else if (third_byte == 0x0b) {
get_modrm(*current, &mod, &regop, &rm);
// roundsd xmm, xmm/m64, imm8
- AppendToBuffer("roundsd %s, ", NameOfCPURegister(regop));
- current += PrintRightOperand(current);
- AppendToBuffer(", %d", (*current) & 3);
+ AppendToBuffer("roundsd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", (*current) & 3);
current += 1;
} else {
UnimplementedInstruction();
@@ -1062,12 +1062,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} // else no immediate displacement.
AppendToBuffer("nop");
} else if (opcode == 0x28) {
- AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
+ AppendToBuffer("movapd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x29) {
AppendToBuffer("movapd ");
current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x6E) {
AppendToBuffer("mov%c %s,",
rex_w() ? 'q' : 'd',
@@ -1081,15 +1081,15 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("mov%c ",
rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x7F) {
AppendToBuffer("movdqa ");
current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0xD6) {
AppendToBuffer("movq ");
current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x50) {
AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
@@ -1214,7 +1214,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x7E) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movq %s, ", NameOfXMMRegister(regop));
+ AppendToBuffer("movq %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else {
UnimplementedInstruction();
@@ -1238,7 +1238,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// movaps xmm, xmm/m128
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movaps %s, ", NameOfXMMRegister(regop));
+ AppendToBuffer("movaps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x29) {
@@ -1247,7 +1247,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("movaps ");
current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0xA2) {
// CPUID
@@ -1260,18 +1260,43 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte_size_operand_ = idesc.byte_size_operation;
current += PrintOperands(idesc.mnem, idesc.op_order_, current);
- } else if (opcode == 0x57) {
- // xorps xmm, xmm/m128
+ } else if (opcode >= 0x53 && opcode <= 0x5F) {
+ const char* const pseudo_op[] = {
+ "rcpps",
+ "andps",
+ "andnps",
+ "orps",
+ "xorps",
+ "addps",
+ "mulps",
+ "cvtps2pd",
+ "cvtdq2ps",
+ "subps",
+ "minps",
+ "divps",
+ "maxps",
+ };
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,",
+ pseudo_op[opcode - 0x53],
+ NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0xC6) {
+ // shufps xmm, xmm/m128, imm8
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
+ AppendToBuffer("shufps %s, ", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %d", (*current) & 3);
+ current += 1;
} else if (opcode == 0x50) {
// movmskps reg, xmm
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movmskps %s, ", NameOfCPURegister(regop));
+ AppendToBuffer("movmskps %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if ((opcode & 0xF0) == 0x80) {
@@ -1450,7 +1475,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case SHORT_IMMEDIATE_INSTR: {
byte* addr =
reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
- AppendToBuffer("%s rax, %s", idesc.mnem, NameOfAddress(addr));
+ AppendToBuffer("%s rax,%s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
@@ -1551,9 +1576,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else {
AppendToBuffer("mov%c ", operand_size_code());
data += PrintRightOperand(data);
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 4;
+ if (operand_size() == OPERAND_WORD_SIZE) {
+ int16_t imm = *reinterpret_cast<int16_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 2;
+ } else {
+ int32_t imm = *reinterpret_cast<int32_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 4;
+ }
}
}
break;
@@ -1599,7 +1630,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (reg == 0) {
AppendToBuffer("nop"); // Common name for xchg rax,rax.
} else {
- AppendToBuffer("xchg%c rax, %s",
+ AppendToBuffer("xchg%c rax,%s",
operand_size_code(),
NameOfCPURegister(reg));
}
@@ -1628,12 +1659,12 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
bool is_32bit = (opcode >= 0xB8);
int reg = (opcode & 0x7) | (rex_b() ? 8 : 0);
if (is_32bit) {
- AppendToBuffer("mov%c %s, ",
+ AppendToBuffer("mov%c %s,",
operand_size_code(),
NameOfCPURegister(reg));
data += PrintImmediate(data, OPERAND_DOUBLEWORD_SIZE);
} else {
- AppendToBuffer("movb %s, ",
+ AppendToBuffer("movb %s,",
NameOfByteCPURegister(reg));
data += PrintImmediate(data, OPERAND_BYTE_SIZE);
}
@@ -1755,7 +1786,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0x3C:
- AppendToBuffer("cmp al, 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ AppendToBuffer("cmp al,0x%x", *reinterpret_cast<int8_t*>(data + 1));
data +=2;
break;
diff --git a/chromium/v8/src/x64/frames-x64.h b/chromium/v8/src/x64/frames-x64.h
index 2af5a81bb5f..fb17964adae 100644
--- a/chromium/v8/src/x64/frames-x64.h
+++ b/chromium/v8/src/x64/frames-x64.h
@@ -70,11 +70,11 @@ class ExitFrameConstants : public AllStatic {
static const int kSPOffset = -1 * kPointerSize;
static const int kCallerFPOffset = +0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
+ static const int kCallerPCOffset = kFPOnStackSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
+ static const int kCallerSPDisplacement = kCallerPCOffset + kPCOnStackSize;
};
@@ -82,7 +82,7 @@ class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
+ static const int kLastParameterOffset = kFPOnStackSize + kPCOnStackSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
diff --git a/chromium/v8/src/x64/full-codegen-x64.cc b/chromium/v8/src/x64/full-codegen-x64.cc
index c24512ecae3..e4793683ee7 100644
--- a/chromium/v8/src/x64/full-codegen-x64.cc
+++ b/chromium/v8/src/x64/full-codegen-x64.cc
@@ -140,10 +140,9 @@ void FullCodeGenerator::Generate() {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
+ StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+ __ movq(args.GetReceiverOperand(), kScratchRegister);
__ bind(&ok);
}
@@ -153,10 +152,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ __ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -319,9 +315,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
reset_value = Smi::kMaxValue;
}
__ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
- RelocInfo::NONE64);
+ __ Move(kScratchRegister, Smi::FromInt(reset_value));
__ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
}
@@ -678,7 +672,8 @@ MemOperand FullCodeGenerator::StackOperand(Variable* var) {
int offset = -var->index() * kPointerSize;
// Adjust by a (parameter or local) base offset.
if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ offset += kFPOnStackSize + kPCOnStackSize +
+ (info_->scope()->num_parameters() - 1) * kPointerSize;
} else {
offset += JavaScriptFrameConstants::kLocal0Offset;
}
@@ -1129,7 +1124,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(rbx, cell);
+ __ Move(rbx, cell);
__ Move(FieldOperand(rbx, Cell::kValueOffset),
Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
@@ -1600,6 +1595,8 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+
+ expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
@@ -1609,21 +1606,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
@@ -1734,6 +1725,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
+ expr->BuildConstantElements(isolate());
+ int flags = expr->depth() == 1
+ ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
+
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
@@ -1745,6 +1741,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
@@ -1757,35 +1761,27 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
- } else if (expr->depth() > 1) {
+ } else if (expr->depth() > 1 || Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_elements);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (Serializer::enabled() ||
- length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_elements);
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ __ Push(Smi::FromInt(flags));
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
if (has_constant_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
@@ -2272,7 +2268,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movq(rax, rcx);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2321,7 +2317,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ pop(rdx);
- BinaryOpStub stub(op, mode);
+ BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
@@ -2638,7 +2634,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
// Push the receiver of the enclosing function and do runtime call.
- __ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
+ StackArgumentsAccessor args(rbp, info_->scope()->num_parameters());
+ __ push(args.GetReceiverOperand());
// Push the language mode.
__ Push(Smi::FromInt(language_mode()));
@@ -3037,6 +3034,33 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(rax, map, if_false, DO_SMI_CHECK);
+ __ cmpl(FieldOperand(rax, HeapNumber::kExponentOffset),
+ Immediate(0x80000000));
+ __ j(not_equal, if_false);
+ __ cmpl(FieldOperand(rax, HeapNumber::kMantissaOffset),
+ Immediate(0x00000000));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -3257,47 +3281,6 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rbx, rax);
-
- __ bind(&heapnumber_allocated);
-
- // Return a random uint32 number in rax.
- // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
- __ PrepareCallCFunction(1);
- __ movq(arg_reg_1,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ movq(arg_reg_1,
- FieldOperand(arg_reg_1, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, rcx);
- __ movd(xmm0, rax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorps(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-
- __ movq(rax, rbx);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
SubStringStub stub;
@@ -3377,8 +3360,8 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
- __ movq(arg_reg_1, object);
- __ movq(arg_reg_2, index, RelocInfo::NONE64);
+ __ movq(arg_reg_1, object);
+ __ movq(arg_reg_2, index, RelocInfo::NONE64);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
@@ -3391,30 +3374,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask) {
- __ Check(masm()->CheckSmi(index), kNonSmiIndex);
- __ Check(masm()->CheckSmi(value), kNonSmiValue);
-
- __ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, kIndexIsTooLarge);
-
- __ SmiCompare(index, Smi::FromInt(0));
- __ Check(greater_equal, kIndexIsNegative);
-
- __ push(value);
- __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- __ cmpq(value, Immediate(encoding_mask));
- __ Check(equal, kUnexpectedStringType);
- __ pop(value);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
@@ -3425,17 +3384,23 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
__ pop(value);
__ pop(index);
- VisitForAccumulatorValue(args->at(0)); // string
if (FLAG_debug_code) {
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ ThrowIf(NegateCondition(__ CheckSmi(value)), kNonSmiValue);
+ __ ThrowIf(NegateCondition(__ CheckSmi(index)), kNonSmiValue);
}
__ SmiToInteger32(value, value);
__ SmiToInteger32(index, index);
+
+ if (FLAG_debug_code) {
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ }
+
__ movb(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
value);
context()->Plug(string);
@@ -3452,17 +3417,23 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
+ VisitForAccumulatorValue(args->at(0)); // string
__ pop(value);
__ pop(index);
- VisitForAccumulatorValue(args->at(0)); // string
if (FLAG_debug_code) {
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ ThrowIf(NegateCondition(__ CheckSmi(value)), kNonSmiValue);
+ __ ThrowIf(NegateCondition(__ CheckSmi(index)), kNonSmiValue);
}
__ SmiToInteger32(value, value);
__ SmiToInteger32(index, index);
+
+ if (FLAG_debug_code) {
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ }
+
__ movw(FieldOperand(string, index, times_2, SeqTwoByteString::kHeaderSize),
value);
context()->Plug(rax);
@@ -3513,8 +3484,8 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into rax and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@@ -3639,11 +3610,20 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
+ if (FLAG_new_string_add) {
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
- StringAddStub stub(STRING_ADD_CHECK_BOTH);
- __ CallStub(&stub);
+ __ pop(rdx);
+ NewStringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
+ __ CallStub(&stub);
+ } else {
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
+ __ CallStub(&stub);
+ }
context()->Plug(rax);
}
@@ -3661,42 +3641,6 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -4392,14 +4336,47 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
+ // Inline smi case if we are in a loop.
+ Label done, stub_call;
+ JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(expr->op())) {
- __ JumpIfSmi(rax, &no_conversion, Label::kNear);
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(rax, &slow, Label::kNear);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(rax);
+ break;
+ case NAMED_PROPERTY:
+ __ movq(Operand(rsp, kPointerSize), rax);
+ break;
+ case KEYED_PROPERTY:
+ __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ break;
+ }
+ }
+ }
+
+ SmiOperationExecutionMode mode;
+ mode.Add(PRESERVE_SOURCE_REGISTER);
+ mode.Add(BAILOUT_ON_NO_OVERFLOW);
+ if (expr->op() == Token::INC) {
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1), mode, &done, Label::kNear);
+ } else {
+ __ SmiSubConstant(rax, rax, Smi::FromInt(1), mode, &done, Label::kNear);
+ }
+ __ jmp(&stub_call, Label::kNear);
+ __ bind(&slow);
}
+
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
- __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4421,37 +4398,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // Inline smi case if we are in a loop.
- Label done, stub_call;
- JumpPatchSite patch_site(masm_);
-
- if (ShouldInlineSmiCase(expr->op())) {
- if (expr->op() == Token::INC) {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- } else {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1));
- }
- __ j(overflow, &stub_call, Label::kNear);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(rax, &done, Label::kNear);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1));
- } else {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- }
- }
-
// Record position before stub call.
SetSourcePosition(expr->position());
// Call stub for +1/-1.
+ __ bind(&stub_call);
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(1));
- BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+ BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
CallIC(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
expr->CountBinOpFeedbackId());
@@ -4883,6 +4837,79 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x1d;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ Address jns_offset_address = call_target_address - 2;
+
+ switch (target_state) {
+ case INTERRUPT:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // jns ok
+ // call <interrupt stub>
+ // ok:
+ *jns_instr_address = kJnsInstruction;
+ *jns_offset_address = kJnsOffset;
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // ok:
+ *jns_instr_address = kNopByteOne;
+ *jns_offset_address = kNopByteTwo;
+ break;
+ }
+
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+
+ if (*jns_instr_address == kJnsInstruction) {
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return INTERRUPT;
+ }
+
+ ASSERT_EQ(kNopByteOne, *jns_instr_address);
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+
+ if (Assembler::target_address_at(call_target_address) ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/chromium/v8/src/x64/ic-x64.cc b/chromium/v8/src/x64/ic-x64.cc
index 4a7c68a53ca..9448d3771a7 100644
--- a/chromium/v8/src/x64/ic-x64.cc
+++ b/chromium/v8/src/x64/ic-x64.cc
@@ -540,7 +540,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -583,7 +583,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1);
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -609,6 +609,21 @@ static void KeyedStoreGenerateGenericHelper(
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, fast_double);
}
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ movq(kScratchRegister, FieldOperand(rbx,
+ rcx,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(rdx, rdi, kScratchRegister, slow);
+
+ __ bind(&holecheck_passed1);
+
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(rax, &non_smi_value);
@@ -648,6 +663,15 @@ static void KeyedStoreGenerateGenericHelper(
__ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
__ j(not_equal, slow);
}
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmpl(FieldOperand(rbx, rcx, times_8, offset), Immediate(kHoleNanUpper32));
+ __ j(not_equal, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(rdx, rdi, kScratchRegister, slow);
+
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
&transition_double_elements);
@@ -725,10 +749,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver.
__ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
__ testb(FieldOperand(r9, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
+ Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
__ j(not_zero, &slow_with_tagged_index);
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &slow_with_tagged_index);
@@ -809,7 +833,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
int argc,
Code::Kind kind,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// rcx : function name
// rdx : receiver
@@ -922,7 +946,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
void CallICBase::GenerateMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id,
- Code::ExtraICState extra_state) {
+ ExtraICState extra_state) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -994,7 +1018,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
int argc,
- Code::ExtraICState extra_ic_state) {
+ ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -1103,7 +1127,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMonomorphicCacheProbe(masm,
argc,
Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
+ kNoExtraICState);
// Fall through on miss.
__ bind(&slow_call);
@@ -1247,7 +1271,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ movq(rax, unmapped_location);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -1286,7 +1310,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
INLINE_SMI_CHECK);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, MISS);
+ GenerateMiss(masm);
}
@@ -1330,7 +1354,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::HANDLER, MONOMORPHIC, kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rax, rcx, rbx, rdx);
@@ -1399,7 +1423,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -1415,10 +1439,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 2, 1);
}
@@ -1441,7 +1463,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1451,7 +1473,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, strict_mode,
+ Code::HANDLER, MONOMORPHIC, extra_ic_state,
Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rdx, rcx, rbx, no_reg);
@@ -1588,7 +1610,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1603,10 +1625,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
__ TailCallExternalReference(ref, 3, 1);
}
diff --git a/chromium/v8/src/x64/lithium-codegen-x64.cc b/chromium/v8/src/x64/lithium-codegen-x64.cc
index 483d537568b..ff6f1e6ef3d 100644
--- a/chromium/v8/src/x64/lithium-codegen-x64.cc
+++ b/chromium/v8/src/x64/lithium-codegen-x64.cc
@@ -89,9 +89,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
+ RegisterDependentCodeForEmbeddedMaps(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -103,24 +101,6 @@ void LChunkBuilder::Abort(BailoutReason reason) {
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- int length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
#ifdef _MSC_VER
void LCodeGen::MakeSureStackPagesMapped(int offset) {
const int kPageSize = 4 * KB;
@@ -131,6 +111,38 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) {
#endif
+void LCodeGen::SaveCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ movsd(MemOperand(rsp, count * kDoubleSize),
+ XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+ ASSERT(info()->saves_caller_doubles());
+ ASSERT(NeedsEagerFrame());
+ Comment(";;; Restore clobbered callee double registers");
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(rsp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+}
+
+
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
@@ -152,10 +164,9 @@ bool LCodeGen::GeneratePrologue() {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ StackArgumentsAccessor args(rsp, scope()->num_parameters());
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+ __ movq(args.GetReceiverOperand(), kScratchRegister);
__ bind(&ok);
}
}
@@ -164,14 +175,7 @@ bool LCodeGen::GeneratePrologue() {
if (NeedsEagerFrame()) {
ASSERT(!frame_is_built_);
frame_is_built_ = true;
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- if (info()->IsStub()) {
- __ Push(Smi::FromInt(StackFrame::STUB));
- } else {
- __ push(rdi); // Callee's JS function.
- }
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
info()->AddNoFrameRange(0, masm_->pc_offset());
}
@@ -185,7 +189,7 @@ bool LCodeGen::GeneratePrologue() {
#endif
__ push(rax);
__ Set(rax, slots);
- __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
+ __ movq(kScratchRegister, kSlotsZapValue);
Label loop;
__ bind(&loop);
__ movq(MemOperand(rsp, rax, times_pointer_size, 0),
@@ -201,16 +205,7 @@ bool LCodeGen::GeneratePrologue() {
}
if (info()->saves_caller_doubles()) {
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ movsd(MemOperand(rsp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
- save_iterator.Advance();
- count++;
- }
+ SaveCallerDoubles();
}
}
@@ -273,36 +268,6 @@ void LCodeGen::GenerateOsrPrologue() {
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
-
- // Don't emit code for basic blocks with a replacement.
- if (instr->IsLabel()) {
- emit_instructions = !LLabel::cast(instr)->HasReplacement();
- }
- if (!emit_instructions) continue;
-
- if (FLAG_code_comments && instr->HasInterestingComment(this)) {
- Comment(";;; <@%d,#%d> %s",
- current_instruction_,
- instr->hydrogen_value()->id(),
- instr->Mnemonic());
- }
-
- RecordAndUpdatePosition(instr->position());
-
- instr->CompileToNative(this);
- }
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- return !is_aborted();
-}
-
-
bool LCodeGen::GenerateJumpTable() {
Label needs_frame;
if (jump_table_.length() > 0) {
@@ -319,11 +284,13 @@ bool LCodeGen::GenerateJumpTable() {
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
}
if (jump_table_[i].needs_frame) {
- __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
+ ASSERT(!info()->saves_caller_doubles());
+ __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
if (needs_frame.is_bound()) {
__ jmp(&needs_frame);
} else {
__ bind(&needs_frame);
+ __ movq(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
__ push(rbp);
__ movq(rbp, rsp);
__ push(rsi);
@@ -337,6 +304,10 @@ bool LCodeGen::GenerateJumpTable() {
__ call(kScratchRegister);
}
} else {
+ if (info()->saves_caller_doubles()) {
+ ASSERT(info()->IsStub());
+ RestoreCallerDoubles();
+ }
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
}
@@ -350,8 +321,9 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
- int pos = instructions_->at(code->instruction_index())->position();
- RecordAndUpdatePosition(pos);
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -471,11 +443,23 @@ Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
}
+static int ArgumentsOffsetWithoutFrame(int index) {
+ ASSERT(index < 0);
+ return -(index + 1) * kPointerSize + kPCOnStackSize;
+}
+
+
Operand LCodeGen::ToOperand(LOperand* op) const {
// Does not handle registers. In X64 assembler, plain registers are not
// representable as an Operand.
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return Operand(rbp, StackSlotOffset(op->index()));
+ if (NeedsEagerFrame()) {
+ return Operand(rbp, StackSlotOffset(op->index()));
+ } else {
+ // Retrieve parameter without eager stack-frame relative to the
+ // stack-pointer.
+ return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
+ }
}
@@ -614,8 +598,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
int argc) {
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
@@ -637,21 +619,41 @@ void LCodeGen::CallCode(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ CallRuntime(function, num_arguments);
+ __ CallRuntime(function, num_arguments, save_doubles);
+
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
}
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+ if (context->IsRegister()) {
+ if (!ToRegister(context).is(rsi)) {
+ __ movq(rsi, ToRegister(context));
+ }
+ } else if (context->IsStackSlot()) {
+ __ movq(rsi, ToOperand(context));
+ } else if (context->IsConstantOperand()) {
+ HConstant* constant =
+ chunk_->LookupConstant(LConstantOperand::cast(context));
+ __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr) {
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
+
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
@@ -708,7 +710,27 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return;
}
- ASSERT(FLAG_deopt_every_n_times == 0); // Not yet implemented on x64.
+ if (DeoptEveryNTimes()) {
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+ Label no_deopt;
+ __ pushfq();
+ __ push(rax);
+ Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
+ __ movl(rax, count_operand);
+ __ subl(rax, Immediate(1));
+ __ j(not_zero, &no_deopt, Label::kNear);
+ if (FLAG_trap_on_deopt) __ int3();
+ __ movl(rax, Immediate(FLAG_deopt_every_n_times));
+ __ movl(count_operand, rax);
+ __ pop(rax);
+ __ popfq();
+ ASSERT(frame_is_built_);
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ __ bind(&no_deopt);
+ __ movl(count_operand, rax);
+ __ pop(rax);
+ __ popfq();
+ }
if (info()->ShouldTrapOnDeopt()) {
Label done;
@@ -720,7 +742,10 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
ASSERT(info()->IsStub() || frame_is_built_);
- if (cc == no_condition && frame_is_built_) {
+ // Go through jump table if we need to handle condition, build frame, or
+ // restore caller doubles.
+ if (cc == no_condition && frame_is_built_ &&
+ !info()->saves_caller_doubles()) {
__ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
@@ -754,26 +779,31 @@ void LCodeGen::DeoptimizeIf(Condition cc,
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
@@ -870,10 +900,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
- if (kind & Safepoint::kWithRegisters) {
- // Register rsi always contains a pointer to the context.
- safepoint.DefinePointerRegister(rsi, zone());
- }
}
@@ -884,7 +910,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@@ -896,17 +922,10 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::RecordAndUpdatePosition(int position) {
- if (position >= 0 && position != old_position_) {
- masm()->positions_recorder()->RecordPosition(position);
- old_position_ = position;
- }
+ masm()->positions_recorder()->WriteRecordedPositions();
}
@@ -956,6 +975,7 @@ void LCodeGen::DoParameter(LParameter* instr) {
void LCodeGen::DoCallStub(LCallStub* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->result()).is(rax));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
@@ -973,11 +993,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1029,36 +1044,6 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&left_is_not_negative);
__ andl(left_reg, Immediate(divisor - 1));
__ bind(&done);
-
- } else if (hmod->fixed_right_arg().has_value) {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
- Register right_reg = ToRegister(instr->right());
-
- int32_t divisor = hmod->fixed_right_arg().value;
- ASSERT(IsPowerOf2(divisor));
-
- // Check if our assumption of a fixed right operand still holds.
- __ cmpl(right_reg, Immediate(divisor));
- DeoptimizeIf(not_equal, instr->environment());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ testl(left_reg, left_reg);
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ negl(left_reg);
- __ andl(left_reg, Immediate(divisor - 1));
- __ negl(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&left_is_not_negative);
- __ andl(left_reg, Immediate(divisor - 1));
- __ bind(&done);
-
} else {
Register left_reg = ToRegister(instr->left());
ASSERT(left_reg.is(rax));
@@ -1189,7 +1174,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
__ neg(reg1);
DeoptimizeIf(zero, instr->environment());
}
- __ movq(reg2, multiplier, RelocInfo::NONE64);
+ __ Set(reg2, multiplier);
// Result just fit in r64, because it's int32 * uint32.
__ imul(reg2, reg1);
@@ -1615,8 +1600,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value(isolate());
- AllowDeferredHandleDereference smi_check;
- __ LoadObject(ToRegister(instr->result()), value);
+ __ Move(ToRegister(instr->result()), value);
}
@@ -1686,51 +1670,104 @@ void LCodeGen::DoDateField(LDateField* instr) {
__ j(not_equal, &runtime, Label::kNear);
__ movq(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
__ movq(arg_reg_1, object);
__ movq(arg_reg_2, index, RelocInfo::NONE64);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&done);
}
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+Operand LCodeGen::BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding) {
+ if (index->IsConstantOperand()) {
+ int offset = ToInteger32(LConstantOperand::cast(index));
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset *= kUC16Size;
+ }
+ STATIC_ASSERT(kCharSize == 1);
+ return FieldOperand(string, SeqString::kHeaderSize + offset);
+ }
+ return FieldOperand(
+ string, ToRegister(index),
+ encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
+ SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register result = ToRegister(instr->result());
Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register value = ToRegister(instr->value());
- String::Encoding encoding = instr->encoding();
if (FLAG_debug_code) {
- __ push(value);
- __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+ __ push(string);
+ __ movq(string, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset));
- __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
+ __ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
- __ pop(value);
+ __ pop(string);
}
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (encoding == String::ONE_BYTE_ENCODING) {
- __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
+ __ movzxbl(result, operand);
} else {
- __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
- value);
+ __ movzxwl(result, operand);
+ }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ String::Encoding encoding = instr->hydrogen()->encoding();
+ Register string = ToRegister(instr->string());
+
+ if (FLAG_debug_code) {
+ Register value = ToRegister(instr->value());
+ Register index = ToRegister(instr->index());
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ int encoding_mask =
+ instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type;
+ __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+ }
+
+ Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+ if (instr->value()->IsConstantOperand()) {
+ int value = ToInteger32(LConstantOperand::cast(instr->value()));
+ ASSERT_LE(0, value);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ ASSERT_LE(value, String::kMaxOneByteCharCode);
+ __ movb(operand, Immediate(value));
+ } else {
+ ASSERT_LE(value, String::kMaxUtf16CodeUnit);
+ __ movw(operand, Immediate(value));
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ movb(operand, value);
+ } else {
+ __ movw(operand, value);
+ }
}
}
void LCodeGen::DoThrow(LThrow* instr) {
__ push(ToRegister(instr->value()));
+ ASSERT(ToRegister(instr->context()).is(rsi));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
@@ -1744,14 +1781,22 @@ void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
+ Representation target_rep = instr->hydrogen()->representation();
+ bool is_q = target_rep.IsSmi() || target_rep.IsExternal();
+
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
int32_t offset = ToInteger32(LConstantOperand::cast(right));
- __ leal(ToRegister(instr->result()),
- MemOperand(ToRegister(left), offset));
+ if (is_q) {
+ __ lea(ToRegister(instr->result()),
+ MemOperand(ToRegister(left), offset));
+ } else {
+ __ leal(ToRegister(instr->result()),
+ MemOperand(ToRegister(left), offset));
+ }
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
- if (instr->hydrogen()->representation().IsSmi()) {
+ if (is_q) {
__ lea(ToRegister(instr->result()), address);
} else {
__ leal(ToRegister(instr->result()), address);
@@ -1759,16 +1804,21 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
} else {
if (right->IsConstantOperand()) {
- __ addl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
+ if (is_q) {
+ __ addq(ToRegister(left),
+ Immediate(ToInteger32(LConstantOperand::cast(right))));
+ } else {
+ __ addl(ToRegister(left),
+ Immediate(ToInteger32(LConstantOperand::cast(right))));
+ }
} else if (right->IsRegister()) {
- if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_q) {
__ addq(ToRegister(left), ToRegister(right));
} else {
__ addl(ToRegister(left), ToRegister(right));
}
} else {
- if (instr->hydrogen_value()->representation().IsSmi()) {
+ if (is_q) {
__ addq(ToRegister(left), ToOperand(right));
} else {
__ addl(ToRegister(left), ToOperand(right));
@@ -1832,13 +1882,13 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
// At this point, both left and right are either 0 or -0.
if (operation == HMathMinMax::kMathMin) {
- __ orpd(left_reg, right_reg);
+ __ orps(left_reg, right_reg);
} else {
// Since we operate on +0 and/or -0, addsd and andsd have the same effect.
__ addsd(left_reg, right_reg);
@@ -1849,7 +1899,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ ucomisd(left_reg, left_reg); // NaN check.
__ j(parity_even, &return_left, Label::kNear);
__ bind(&return_right);
- __ movsd(left_reg, right_reg);
+ __ movaps(left_reg, right_reg);
__ bind(&return_left);
}
@@ -1878,15 +1928,16 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
// when there is a mulsd depending on the result
__ movaps(left, left);
break;
- case Token::MOD:
+ case Token::MOD: {
+ XMMRegister xmm_scratch = double_scratch0();
__ PrepareCallCFunction(2);
- __ movaps(xmm0, left);
+ __ movaps(xmm_scratch, left);
ASSERT(right.is(xmm1));
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movaps(result, xmm0);
+ __ movaps(result, xmm_scratch);
break;
+ }
default:
UNREACHABLE();
break;
@@ -1895,24 +1946,17 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->left()).is(rdx));
ASSERT(ToRegister(instr->right()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
+ BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
-int LCodeGen::GetNextEmittedBlock() const {
- for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
- if (!chunk_->GetLabel(i)->HasReplacement()) return i;
- }
- return -1;
-}
-
-
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
int left_block = instr->TrueDestination(chunk_);
@@ -1947,25 +1991,6 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, no_condition);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, no_condition);
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- EmitBranch(instr, equal);
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
@@ -1981,8 +2006,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
@@ -2001,8 +2027,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, no_condition);
} else if (type.IsHeapNumber()) {
ASSERT(!info()->IsStub());
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
@@ -2083,8 +2110,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &not_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, instr->FalseLabel(chunk_));
__ jmp(instr->TrueLabel(chunk_));
__ bind(&not_heap_number);
@@ -2119,6 +2147,10 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = equal;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = not_equal;
+ break;
case Token::LT:
cond = is_unsigned ? below : less;
break;
@@ -2206,7 +2238,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
if (instr->right()->IsConstantOperand()) {
Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
- __ CmpObject(left, right);
+ __ Cmp(left, right);
} else {
Register right = ToRegister(instr->right());
__ cmpq(left, right);
@@ -2237,6 +2269,33 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+ Representation rep = instr->hydrogen()->value()->representation();
+ ASSERT(!rep.IsInteger32());
+
+ if (rep.IsDouble()) {
+ XMMRegister value = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, value);
+ EmitFalseBranch(instr, not_equal);
+ __ movmskpd(kScratchRegister, value);
+ __ testl(kScratchRegister, Immediate(1));
+ EmitBranch(instr, not_zero);
+ } else {
+ Register value = ToRegister(instr->value());
+ Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+ __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
+ __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
+ Immediate(0x80000000));
+ EmitFalseBranch(instr, not_equal);
+ __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
+ Immediate(0x00000000));
+ EmitBranch(instr, equal);
+ }
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Label* is_not_object,
Label* is_object) {
@@ -2329,6 +2388,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -2485,6 +2545,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
InstanceofStub stub(InstanceofStub::kNoFlags);
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
@@ -2516,7 +2577,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Label map_check_;
};
-
+ ASSERT(ToRegister(instr->context()).is(rsi));
DeferredInstanceOfKnownGlobal* deferred;
deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
@@ -2524,7 +2585,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register object = ToRegister(instr->value());
// A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result);
+ __ JumpIfSmi(object, &false_result, Label::kNear);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
@@ -2546,7 +2607,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ bind(&end_of_patched_code);
ASSERT(true);
#endif
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// The inlined call site cache did not match. Check for null and string
// before calling the deferred code.
@@ -2574,7 +2635,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub stub(flags);
__ push(ToRegister(instr->value()));
- __ PushHeapObject(instr->function());
+ __ Push(instr->function());
static const int kAdditionalDelta = 10;
int delta =
@@ -2601,24 +2662,17 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ testq(kScratchRegister, kScratchRegister);
Label load_false;
Label done;
- __ j(not_zero, &load_false);
+ __ j(not_zero, &load_false, Label::kNear);
__ LoadRoot(rax, Heap::kTrueValueRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&load_false);
__ LoadRoot(rax, Heap::kFalseValueRootIndex);
__ bind(&done);
}
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(result, FieldOperand(result, Map::kInstanceSizeOffset));
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
@@ -2638,22 +2692,16 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace && info()->IsOptimizing()) {
- // Preserve the return value on the stack and rely on the runtime
- // call to return the value in the same register.
+ // Preserve the return value on the stack and rely on the runtime call
+ // to return the value in the same register. We're leaving the code
+ // managed by the register allocator and tearing down the frame, it's
+ // safe to write to the context register.
__ push(rax);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
- ASSERT(NeedsEagerFrame());
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(rsp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
+ RestoreCallerDoubles();
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
@@ -2682,7 +2730,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ LoadGlobalCell(result, instr->hydrogen()->cell());
+ __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
@@ -2691,6 +2739,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->global_object()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
@@ -2704,7 +2753,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
- Handle<Cell> cell_handle = instr->hydrogen()->cell();
+ Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -2729,6 +2778,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->global_object()).is(rdx));
ASSERT(ToRegister(instr->value()).is(rax));
@@ -2805,7 +2855,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
__ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
} else {
Register object = ToRegister(instr->object());
- __ movq(result, MemOperand(object, offset));
+ __ Load(result, MemOperand(object, offset), access.representation());
}
return;
}
@@ -2819,16 +2869,16 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (access.IsInobject()) {
- __ movq(result, FieldOperand(object, offset));
- } else {
+ if (!access.IsInobject()) {
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, offset));
+ object = result;
}
+ __ Load(result, FieldOperand(object, offset), access.representation());
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->object()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
@@ -2879,6 +2929,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@@ -2896,8 +2952,9 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
instr->index()->IsConstantOperand()) {
int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- int index = (const_length - const_index) + 1;
- __ movq(result, Operand(arguments, index * kPointerSize));
+ StackArgumentsAccessor args(arguments, const_length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(result, args.GetArgumentOperand(const_index));
} else {
Register length = ToRegister(instr->length());
// There are two words between the frame pointer and the last argument.
@@ -2907,8 +2964,9 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
} else {
__ subl(length, ToOperand(instr->index()));
}
- __ movq(result,
- Operand(arguments, length, times_pointer_size, kPointerSize));
+ StackArgumentsAccessor args(arguments, length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(result, args.GetArgumentOperand(0));
}
}
@@ -3100,6 +3158,7 @@ Operand LCodeGen::BuildFastArrayOperand(
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rax));
@@ -3112,7 +3171,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(rsp, -2 * kPointerSize));
+ __ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
} else {
// Check for arguments adapter frame.
Label done, adapted;
@@ -3169,6 +3228,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok;
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
// Do not transform the receiver to object for strict mode
// functions.
@@ -3177,13 +3237,13 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ testb(FieldOperand(kScratchRegister,
SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &receiver_ok, Label::kNear);
+ __ j(not_equal, &receiver_ok, dist);
// Do not transform the receiver to object for builtins.
__ testb(FieldOperand(kScratchRegister,
SharedFunctionInfo::kNativeByteOffset),
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &receiver_ok, Label::kNear);
+ __ j(not_equal, &receiver_ok, dist);
// Normal function. Replace undefined or null with global receiver.
__ CompareRoot(receiver, Heap::kNullValueRootIndex);
@@ -3202,7 +3262,8 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// TODO(kmillikin): We have a hydrogen value for the global object. See
// if it's better to use it than to explicitly fetch it from the context
// here.
- __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
+ __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
__ movq(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
@@ -3234,7 +3295,9 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ testl(length, length);
__ j(zero, &invoke, Label::kNear);
__ bind(&loop);
- __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+ StackArgumentsAccessor args(elements, length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ push(args.GetArgumentOperand(0));
__ decl(length);
__ j(not_zero, &loop);
@@ -3242,13 +3305,11 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(rax);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -3271,7 +3332,12 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, rsi);
+ if (info()->IsOptimizing()) {
+ __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in rsi.
+ ASSERT(result.is(rsi));
+ }
}
@@ -3284,16 +3350,19 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
__ push(rsi); // The context is the first argument.
- __ PushHeapObject(instr->hydrogen()->pairs());
+ __ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
}
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ movq(result, GlobalObjectOperand());
+ __ movq(result,
+ Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
}
@@ -3316,11 +3385,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
dont_adapt_arguments || formal_parameter_count == arity;
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
if (can_invoke_directly) {
if (rdi_state == RDI_UNINITIALIZED) {
- __ LoadHeapObject(rdi, function);
+ __ Move(rdi, function);
}
// Change context.
@@ -3351,9 +3419,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ InvokeFunction(
function, expected, count, CALL_FUNCTION, generator, call_kind);
}
-
- // Restore context.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -3394,17 +3459,18 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateHeapNumber, 0, instr, instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp.is(rax)) __ movq(tmp, rax);
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
__ bind(&allocated);
- __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ MoveDouble(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ shl(tmp2, Immediate(1));
__ shr(tmp2, Immediate(1));
- __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
+ __ MoveDouble(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
@@ -3451,11 +3517,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
+ XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
- __ andpd(input_reg, scratch);
+ __ andps(input_reg, scratch);
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else if (r.IsSmi()) {
@@ -3473,7 +3539,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathFloor(LMathFloor* instr) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3505,7 +3571,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ testq(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr->environment());
__ Set(output_reg, 0);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&positive_sign);
}
@@ -3520,7 +3586,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ bind(&negative_sign);
// Truncate, then compare and compensate.
__ cvttsd2si(output_reg, input_reg);
- __ cvtlsi2sd(xmm_scratch, output_reg);
+ __ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
@@ -3532,17 +3598,18 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
void LCodeGen::DoMathRound(LMathRound* instr) {
- const XMMRegister xmm_scratch = xmm0;
+ const XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
Label done, round_to_zero, below_one_half, do_not_compensate, restore;
- __ movq(kScratchRegister, one_half, RelocInfo::NONE64);
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+ __ movq(kScratchRegister, one_half);
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_one_half);
+ __ j(above, &below_one_half, Label::kNear);
// CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
__ addsd(xmm_scratch, input_reg);
@@ -3551,13 +3618,13 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cmpl(output_reg, Immediate(0x80000000));
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ jmp(&done);
+ __ jmp(&done, dist);
__ bind(&below_one_half);
- __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64);
+ __ movq(kScratchRegister, minus_one_half);
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &round_to_zero);
+ __ j(below_equal, &round_to_zero, Label::kNear);
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
// compare and compensate.
@@ -3569,14 +3636,14 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ cvtlsi2sd(xmm_scratch, output_reg);
+ __ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &restore, Label::kNear);
__ subl(output_reg, Immediate(1));
// No overflow because we already ruled out minint.
__ bind(&restore);
__ movq(input_reg, kScratchRegister); // Restore input_reg.
- __ jmp(&done);
+ __ jmp(&done, dist);
__ bind(&round_to_zero);
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
@@ -3600,7 +3667,7 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
@@ -3610,7 +3677,7 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
Label done, sqrt;
// Check base for -Infinity. According to IEEE-754, double-precision
// -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
- __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
+ __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
// Comparing -Infinity with NaN results in "unordered", which sets the
@@ -3649,7 +3716,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
- __ JumpIfSmi(exponent, &no_deopt);
+ __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr->environment());
__ bind(&no_deopt);
@@ -3666,88 +3733,55 @@ void LCodeGen::DoPower(LPower* instr) {
}
-void LCodeGen::DoRandom(LRandom* instr) {
- // Assert that register size is twice the size of each seed.
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
-
- // Load native context
- Register global_object = ToRegister(instr->global_object());
- Register native_context = global_object;
- __ movq(native_context, FieldOperand(
- global_object, GlobalObject::kNativeContextOffset));
-
- // Load state (FixedArray of the native context's random seeds)
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- Register state = native_context;
- __ movq(state, FieldOperand(native_context, kRandomSeedOffset));
-
- // Load state[0].
- Register state0 = ToRegister(instr->scratch());
- __ movl(state0, FieldOperand(state, ByteArray::kHeaderSize));
- // Load state[1].
- Register state1 = ToRegister(instr->scratch2());
- __ movl(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- Register scratch3 = ToRegister(instr->scratch3());
- __ movzxwl(scratch3, state0);
- __ imull(scratch3, scratch3, Immediate(18273));
- __ shrl(state0, Immediate(16));
- __ addl(state0, scratch3);
- // Save state[0].
- __ movl(FieldOperand(state, ByteArray::kHeaderSize), state0);
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzxwl(scratch3, state1);
- __ imull(scratch3, scratch3, Immediate(36969));
- __ shrl(state1, Immediate(16));
- __ addl(state1, scratch3);
- // Save state[1].
- __ movl(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- Register random = state0;
- __ shll(random, Immediate(14));
- __ andl(state1, Immediate(0x3FFFF));
- __ addl(random, state1);
-
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- XMMRegister result = ToDoubleRegister(instr->result());
- // We use xmm0 as fixed scratch register here.
- XMMRegister scratch4 = xmm0;
- __ movq(scratch3, V8_INT64_C(0x4130000000000000),
- RelocInfo::NONE64); // 1.0 x 2^20 as double
- __ movq(scratch4, scratch3);
- __ movd(result, random);
- __ xorps(result, scratch4);
- __ subsd(result, scratch4);
-}
-
-
void LCodeGen::DoMathExp(LMathExp* instr) {
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister temp0 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+ MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
void LCodeGen::DoMathLog(LMathLog* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ ASSERT(instr->value()->Equals(instr->result()));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
+ Label positive, done, zero;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
+ __ j(above, &positive, Label::kNear);
+ __ j(equal, &zero, Label::kNear);
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ Operand nan_operand = masm()->ExternalOperand(nan);
+ __ movsd(input_reg, nan_operand);
+ __ jmp(&done, Label::kNear);
+ __ bind(&zero);
+ ExternalReference ninf =
+ ExternalReference::address_of_negative_infinity();
+ Operand ninf_operand = masm()->ExternalOperand(ninf);
+ __ movsd(input_reg, ninf_operand);
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive);
+ __ fldln2();
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(Operand(rsp, 0), input_reg);
+ __ fld_d(Operand(rsp, 0));
+ __ fyl2x();
+ __ fstp_d(Operand(rsp, 0));
+ __ movsd(input_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ bind(&done);
}
void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ Set(rsi, 0);
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3756,6 +3790,9 @@ void LCodeGen::DoMathTan(LMathTan* instr) {
void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ Set(rsi, 0);
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3764,6 +3801,9 @@ void LCodeGen::DoMathCos(LMathCos* instr) {
void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ Set(rsi, 0);
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3771,17 +3811,16 @@ void LCodeGen::DoMathSin(LMathSin* instr) {
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->function()).is(rdi));
ASSERT(instr->HasPointerMap());
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3794,6 +3833,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->result()).is(rax));
@@ -3801,11 +3841,11 @@ void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallNamed(LCallNamed* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
@@ -3814,22 +3854,27 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ Move(rcx, instr->name());
CallCode(ic, mode, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->function()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ leave();
+ __ jmp(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ } else {
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ }
}
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
@@ -3837,7 +3882,6 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ Move(rcx, instr->name());
CallCode(ic, mode, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -3853,6 +3897,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->constructor()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
@@ -3866,6 +3911,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->constructor()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
@@ -3889,13 +3935,13 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
// look at the first argument
__ movq(rcx, Operand(rsp, 0));
__ testq(rcx, rcx);
- __ j(zero, &packed_case);
+ __ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
@@ -3910,7 +3956,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
+ ASSERT(ToRegister(instr->context()).is(rsi));
+ CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
}
@@ -3925,7 +3972,13 @@ void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
- __ lea(result, Operand(base, instr->offset()));
+ if (instr->offset()->IsConstantOperand()) {
+ LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+ __ lea(result, Operand(base, ToInteger32(offset)));
+ } else {
+ Register offset = ToRegister(instr->offset());
+ __ lea(result, Operand(base, offset, times_1, 0));
+ }
}
@@ -3940,11 +3993,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
if (instr->object()->IsConstantOperand()) {
ASSERT(value.is(rax));
+ ASSERT(!access.representation().IsSpecialization());
LConstantOperand* object = LConstantOperand::cast(instr->object());
__ store_rax(ToExternalReference(object));
} else {
Register object = ToRegister(instr->object());
- __ movq(MemOperand(object, offset), value);
+ __ Store(MemOperand(object, offset), value, representation);
}
return;
}
@@ -4013,15 +4067,20 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (operand_value->IsRegister()) {
- __ movq(FieldOperand(write_register, offset),
- ToRegister(operand_value));
+ Register value = ToRegister(operand_value);
+ __ Store(FieldOperand(write_register, offset), value, representation);
+ } else if (representation.IsInteger32()) {
+ int32_t value = ToInteger32(operand_value);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ __ movl(FieldOperand(write_register, offset), Immediate(value));
} else {
Handle<Object> handle_value = ToHandle(operand_value);
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
__ Move(FieldOperand(write_register, offset), handle_value);
}
} else {
- __ movq(FieldOperand(write_register, offset), ToRegister(instr->value()));
+ Register value = ToRegister(instr->value());
+ __ Store(FieldOperand(write_register, offset), value, representation);
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -4040,6 +4099,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->value()).is(rax));
@@ -4189,7 +4249,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Label have_value;
__ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
+ __ j(parity_odd, &have_value, Label::kNear); // NaN.
__ Set(kScratchRegister, BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
@@ -4277,6 +4337,7 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax));
@@ -4308,6 +4369,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ToRegister(instr->temp()), kDontSaveFPRegs);
} else {
+ ASSERT(ToRegister(instr->context()).is(rsi));
PushSafepointRegistersScope scope(this);
if (!object_reg.is(rax)) {
__ movq(rax, object_reg);
@@ -4325,16 +4387,27 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationMemento(object, temp);
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(equal, instr->environment());
+ __ bind(&no_memento_found);
}
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- EmitPushTaggedOperand(instr->left());
- EmitPushTaggedOperand(instr->right());
- StringAddStub stub(instr->hydrogen()->flags());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ ASSERT(ToRegister(instr->context()).is(rsi));
+ if (FLAG_new_string_add) {
+ ASSERT(ToRegister(instr->left()).is(rdx));
+ ASSERT(ToRegister(instr->right()).is(rax));
+ NewStringAddStub stub(instr->hydrogen()->flags(),
+ isolate()->heap()->GetPretenureMode());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ } else {
+ EmitPushTaggedOperand(instr->left());
+ EmitPushTaggedOperand(instr->right());
+ StringAddStub stub(instr->hydrogen()->flags());
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ }
}
@@ -4385,7 +4458,8 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ Integer32ToSmi(index, index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+ CallRuntimeFromDeferred(
+ Runtime::kStringCharCodeAt, 2, instr, instr->context());
__ AssertSmi(rax);
__ SmiToInteger32(rax, rax);
__ StoreToSafepointRegisterSlot(result, rax);
@@ -4438,7 +4512,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ Integer32ToSmi(char_code, char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -4449,9 +4523,9 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
if (input->IsRegister()) {
- __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
+ __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
} else {
- __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+ __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
}
}
@@ -4533,15 +4607,17 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
Label slow;
Register reg = ToRegister(instr->value());
Register tmp = reg.is(rax) ? rcx : rax;
+ XMMRegister temp_xmm = ToDoubleRegister(instr->temp());
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
Label done;
- // Load value into xmm1 which will be preserved across potential call to
+ // Load value into temp_xmm which will be preserved across potential call to
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
// XMM registers on x64).
- __ LoadUint32(xmm1, reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ LoadUint32(temp_xmm, reg, xmm_scratch);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, &slow);
@@ -4556,13 +4632,22 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
// integer value.
__ StoreToSafepointRegisterSlot(reg, Immediate(0));
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagU uses the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+
if (!reg.is(rax)) __ movq(reg, rax);
- // Done. Put the value in xmm1 into the value of the allocated heap
+ // Done. Put the value in temp_xmm into the value of the allocated heap
// number.
__ bind(&done);
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm1);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
__ StoreToSafepointRegisterSlot(reg, reg);
}
@@ -4604,8 +4689,15 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
{
PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- // Ensure that value in rax survives popping registers.
+ // NumberTagD uses the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ movq(kScratchRegister, rax);
}
__ movq(reg, kScratchRegister);
@@ -4639,7 +4731,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
- Label load_smi, done;
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
@@ -4648,27 +4740,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, env);
- } else {
- Label heap_number, convert;
- __ j(equal, &heap_number, Label::kNear);
- // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
-
- __ bind(&convert);
- __ xorps(result_reg, result_reg);
- __ divsd(result_reg, result_reg);
- __ jmp(&done, Label::kNear);
+ // On x64 it is safe to load at heap number offset before evaluating the map
+ // check, since all heap objects are at least two words long.
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ bind(&heap_number);
+ if (can_convert_undefined_to_nan) {
+ __ j(not_equal, &convert, Label::kNear);
+ } else {
+ DeoptimizeIf(not_equal, env);
}
- // Heap number to XMM conversion.
- __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, result_reg);
__ j(not_equal, &done, Label::kNear);
@@ -4677,6 +4761,18 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
+
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+
+ // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, env);
+
+ __ xorps(result_reg, result_reg);
+ __ divsd(result_reg, result_reg);
+ __ jmp(&done, Label::kNear);
+ }
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
@@ -4684,30 +4780,44 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Smi to XMM conversion
__ bind(&load_smi);
__ SmiToInteger32(kScratchRegister, input_reg);
- __ cvtlsi2sd(result_reg, kScratchRegister);
+ __ Cvtlsi2sd(result_reg, kScratchRegister);
__ bind(&done);
}
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
- Label heap_number;
Register input_reg = ToRegister(instr->value());
-
if (instr->truncating()) {
+ Label no_heap_number, check_bools, check_false;
+
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- __ j(equal, &heap_number, Label::kNear);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
+ __ j(not_equal, &no_heap_number, Label::kNear);
+ __ TruncateHeapNumberToI(input_reg, input_reg);
+ __ jmp(done);
+
+ __ bind(&no_heap_number);
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
+ __ j(not_equal, &check_bools, Label::kNear);
__ Set(input_reg, 0);
__ jmp(done);
- __ bind(&heap_number);
- __ TruncateHeapNumberToI(input_reg, input_reg);
+ __ bind(&check_bools);
+ __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &check_false, Label::kNear);
+ __ Set(input_reg, 1);
+ __ jmp(done);
+
+ __ bind(&check_false);
+ __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
+ __ RecordComment("Deferred TaggedToI: cannot truncate");
+ DeoptimizeIf(not_equal, instr->environment());
+ __ Set(input_reg, 0);
+ __ jmp(done);
} else {
Label bailout;
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
@@ -4737,12 +4847,16 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
-
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiToInteger32(input_reg, input_reg);
- __ bind(deferred->exit());
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiToInteger32(input_reg, input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiToInteger32(input_reg, input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -4780,7 +4894,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ TruncateDoubleToI(result_reg, input_reg);
} else {
Label bailout, done;
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
__ jmp(&done, Label::kNear);
@@ -4801,7 +4916,8 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
Register result_reg = ToRegister(result);
Label bailout, done;
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
__ jmp(&done, Label::kNear);
@@ -4878,8 +4994,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object();
- __ CmpHeapObject(reg, object);
+ __ Cmp(reg, instr->hydrogen()->object().handle());
DeoptimizeIf(not_equal, instr->environment());
}
@@ -4888,7 +5003,11 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
PushSafepointRegistersScope scope(this);
__ push(object);
- CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
+ __ Set(rsi, 0);
+ __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+
__ testq(rax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr->environment());
@@ -4919,23 +5038,22 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
-
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->has_migration_target()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, map, &success);
- __ j(equal, &success);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
+ __ CompareMap(reg, map);
+ __ j(equal, &success, Label::kNear);
}
- Handle<Map> map = map_set->last();
- __ CompareMap(reg, map, &success);
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
+ __ CompareMap(reg, map);
if (instr->hydrogen()->has_migration_target()) {
__ j(not_equal, deferred->entry());
} else {
@@ -4948,8 +5066,9 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+ __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
@@ -4964,9 +5083,10 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
+ XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
-
- __ JumpIfSmi(input_reg, &is_smi);
+ Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+ __ JumpIfSmi(input_reg, &is_smi, dist);
// Check for heap number
__ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -4982,8 +5102,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg);
+ __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
@@ -5030,7 +5150,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ if (size <= Page::kMaxRegularHeapObjectSize) {
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+ } else {
+ __ jmp(deferred->entry());
+ }
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
@@ -5076,16 +5200,21 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(size));
}
+ int flags = 0;
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
+ flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
- CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
+ flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
} else {
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
+ __ Push(Smi::FromInt(flags));
+
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -5098,6 +5227,7 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
Label materialized;
// Registers will be used as follows:
// rcx = literals array.
@@ -5105,7 +5235,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// rax = regexp literal clone.
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(rcx, instr->hydrogen()->literals());
+ __ Move(rcx, instr->hydrogen()->literals());
__ movq(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
@@ -5123,7 +5253,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
__ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
+ __ jmp(&allocated, Label::kNear);
__ bind(&runtime_allocate);
__ push(rbx);
@@ -5148,6 +5278,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
@@ -5167,6 +5298,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
LOperand* input = instr->value();
EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
@@ -5176,13 +5308,7 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
- Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- AllowDeferredHandleDereference smi_check;
- if (object->IsSmi()) {
- __ Push(Handle<Smi>::cast(object));
- } else {
- __ PushHeapObject(Handle<HeapObject>::cast(object));
- }
+ __ Push(ToHandle(LConstantOperand::cast(operand)));
} else if (operand->IsRegister()) {
__ push(ToRegister(operand));
} else {
@@ -5193,44 +5319,49 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
-
- Condition final_branch_condition =
- EmitTypeofIs(instr->TrueLabel(chunk_),
- instr->FalseLabel(chunk_), input, instr->type_literal());
+ Condition final_branch_condition = EmitTypeofIs(instr, input);
if (final_branch_condition != no_condition) {
EmitBranch(instr, final_branch_condition);
}
}
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
+Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
+ Label* true_label = instr->TrueLabel(chunk_);
+ Label* false_label = instr->FalseLabel(chunk_);
+ Handle<String> type_name = instr->type_literal();
+ int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
+ int next_block = GetNextEmittedBlock();
+
+ Label::Distance true_distance = left_block == next_block ? Label::kNear
+ : Label::kFar;
+ Label::Distance false_distance = right_block == next_block ? Label::kNear
+ : Label::kFar;
Condition final_branch_condition = no_condition;
if (type_name->Equals(heap()->number_string())) {
- __ JumpIfSmi(input, true_label);
+ __ JumpIfSmi(input, true_label, true_distance);
__ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->string_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label);
+ __ j(above_equal, false_label, false_distance);
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
} else if (type_name->Equals(heap()->symbol_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
@@ -5240,8 +5371,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (type_name->Equals(heap()->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
- __ j(equal, true_label);
- __ JumpIfSmi(input, false_label);
+ __ j(equal, true_label, true_distance);
+ __ JumpIfSmi(input, false_label, false_distance);
// Check for undetectable objects => true.
__ movq(input, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(input, Map::kBitFieldOffset),
@@ -5250,29 +5381,29 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->object_string())) {
- __ JumpIfSmi(input, false_label);
+ __ JumpIfSmi(input, false_label, false_distance);
if (!FLAG_harmony_typeof) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
- __ j(equal, true_label);
+ __ j(equal, true_label, true_distance);
}
__ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
- __ j(below, false_label);
+ __ j(below, false_label, false_distance);
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, false_label);
+ __ j(above, false_label, false_distance);
// Check for undetectable objects => false.
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
} else {
- __ jmp(false_label);
+ __ jmp(false_label, false_distance);
}
return final_branch_condition;
@@ -5296,7 +5427,7 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
__ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker, Label::kNear);
- __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+ __ movq(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
@@ -5342,6 +5473,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
}
+void LCodeGen::DoDummy(LDummy* instr) {
+ // Nothing to see here, move on!
+}
+
+
void LCodeGen::DoDummyUse(LDummyUse* instr) {
// Nothing to see here, move on!
}
@@ -5380,6 +5516,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
Label done;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &done, Label::kNear);
+
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(rsi));
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
@@ -5423,6 +5562,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ ASSERT(ToRegister(instr->context()).is(rsi));
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(equal, instr->environment());
@@ -5462,9 +5602,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Label load_cache, done;
__ EnumLength(result, map);
__ Cmp(result, Smi::FromInt(0));
- __ j(not_equal, &load_cache);
+ __ j(not_equal, &load_cache, Label::kNear);
__ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ movq(result,
@@ -5492,7 +5632,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
Label out_of_object, done;
__ SmiToInteger32(index, index);
__ cmpl(index, Immediate(0));
- __ j(less, &out_of_object);
+ __ j(less, &out_of_object, Label::kNear);
__ movq(object, FieldOperand(object,
index,
times_pointer_size,
diff --git a/chromium/v8/src/x64/lithium-codegen-x64.h b/chromium/v8/src/x64/lithium-codegen-x64.h
index f994645019d..53d26460b32 100644
--- a/chromium/v8/src/x64/lithium-codegen-x64.h
+++ b/chromium/v8/src/x64/lithium-codegen-x64.h
@@ -32,6 +32,7 @@
#include "checks.h"
#include "deoptimizer.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
@@ -44,42 +45,25 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen V8_FINAL BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- old_position_(RelocInfo::kNoPosition) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -146,18 +130,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
@@ -166,7 +138,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk()->graph(); }
- int GetNextEmittedBlock() const;
+ XMMRegister double_scratch0() const { return xmm0; }
void EmitClassOfTest(Label* if_true,
Label* if_false,
@@ -178,14 +150,16 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
- void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+ void SaveCallerDoubles();
+ void RestoreCallerDoubles();
+
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
@@ -211,7 +185,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* function,
int num_arguments,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
@@ -222,7 +197,10 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ LOperand* context);
+
+ void LoadContextFromDeferred(LOperand* context);
enum RDIState {
RDI_UNINITIALIZED,
@@ -249,6 +227,10 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
+ bool DeoptEveryNTimes() {
+ return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
+ }
+
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
@@ -271,6 +253,10 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
uint32_t offset,
uint32_t additional_index = 0);
+ Operand BuildSeqStringOperand(Register string,
+ LOperand* index,
+ String::Encoding encoding);
+
void EmitIntegerMathAbs(LMathAbs* instr);
void EmitSmiMathAbs(LMathAbs* instr);
@@ -284,11 +270,12 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
- void RecordAndUpdatePosition(int position);
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
+
+ // EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
template<class InstrType>
@@ -304,10 +291,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
+ Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
@@ -340,7 +324,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int* offset,
AllocationSiteMode mode);
- void EnsureSpaceForLazyDeopt(int space_needed);
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -355,24 +339,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void MakeSureStackPagesMapped(int offset);
#endif
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@@ -384,8 +358,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- int old_position_;
-
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
diff --git a/chromium/v8/src/x64/lithium-gap-resolver-x64.cc b/chromium/v8/src/x64/lithium-gap-resolver-x64.cc
index 71db17c9315..6059c50b726 100644
--- a/chromium/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/chromium/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -200,7 +200,7 @@ void LGapResolver::EmitMove(int index) {
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ __ Move(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
double v = cgen_->ToDouble(constant_source);
@@ -209,7 +209,7 @@ void LGapResolver::EmitMove(int index) {
if (int_val == 0) {
__ xorps(dst, dst);
} else {
- __ movq(kScratchRegister, int_val, RelocInfo::NONE64);
+ __ Set(kScratchRegister, int_val);
__ movq(dst, kScratchRegister);
}
} else {
@@ -222,7 +222,7 @@ void LGapResolver::EmitMove(int index) {
// value.
__ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
- __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source));
+ __ Move(kScratchRegister, cgen_->ToHandle(constant_source));
__ movq(dst, kScratchRegister);
}
}
@@ -262,7 +262,7 @@ void LGapResolver::EmitSwap(int index) {
// Swap two general-purpose registers.
Register src = cgen_->ToRegister(source);
Register dst = cgen_->ToRegister(destination);
- __ xchg(dst, src);
+ __ xchgq(dst, src);
} else if ((source->IsRegister() && destination->IsStackSlot()) ||
(source->IsStackSlot() && destination->IsRegister())) {
@@ -305,7 +305,7 @@ void LGapResolver::EmitSwap(int index) {
Operand other_operand = cgen_->ToOperand(other);
__ movsd(xmm0, other_operand);
__ movsd(other_operand, reg);
- __ movsd(reg, xmm0);
+ __ movaps(reg, xmm0);
} else {
// No other combinations are possible.
diff --git a/chromium/v8/src/x64/lithium-x64.cc b/chromium/v8/src/x64/lithium-x64.cc
index a0e853d4fc3..473e93dde6f 100644
--- a/chromium/v8/src/x64/lithium-x64.cc
+++ b/chromium/v8/src/x64/lithium-x64.cc
@@ -275,7 +275,8 @@ void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
- stream->Add(" + %d", offset());
+ stream->Add(" + ");
+ offset()->PrintTo(stream);
}
@@ -353,19 +354,20 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
return spill_slot_count_++;
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
// All stack slots are Double stack slots on x64.
// Alternatively, at some point, start using half-size
// stack slots for int32 values.
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -445,7 +447,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// which will be subsumed into this frame.
if (graph()->has_osr()) {
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(false);
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
}
}
@@ -664,7 +666,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -719,46 +721,39 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ } else {
+ right = UseFixed(right_value, rcx);
+ }
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ bool does_deopt = false;
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
} else {
- right = UseFixed(right_value, rcx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
- }
+ return DoArithmeticT(op, instr);
}
-
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
}
@@ -767,29 +762,31 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* left_operand = UseFixed(left, rdx);
LOperand* right_operand = UseFixed(right, rax);
LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
+ new(zone()) LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -864,10 +861,33 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ if (current->OperandCount() == 0) {
+ instr = DefineAsRegister(new(zone()) LDummy());
+ } else {
+ instr = DefineAsRegister(new(zone())
+ LDummyUse(UseAny(current->OperandAt(0))));
+ }
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
@@ -897,14 +917,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
- instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -996,7 +1014,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -1006,16 +1024,10 @@ LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- ASSERT(value->IsConstant());
- ASSERT(!value->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ HValue* value = instr->value();
LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
// deoptimization environment. If the instruction is generic no
@@ -1053,7 +1065,8 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LOperand* left = UseFixed(instr->left(), rax);
LOperand* right = UseFixed(instr->right(), rdx);
- LInstanceOf* result = new(zone()) LInstanceOf(left, right);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1061,18 +1074,13 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), rax),
+ new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->context(), rsi),
+ UseFixed(instr->left(), rax),
FixedTemp(rdi));
return MarkAsCall(DefineFixed(result, rax), instr);
}
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegister(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -1095,7 +1103,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = UseOrConstant(instr->argument());
return new(zone()) LPushArgument(argument);
}
@@ -1110,11 +1117,11 @@ LInstruction* LChunkBuilder::DoStoreCodeEntry(
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
- HInnerAllocatedObject* inner_object) {
- LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
- LInnerAllocatedObject* result =
- new(zone()) LInnerAllocatedObject(base_object);
- return DefineAsRegister(result);
+ HInnerAllocatedObject* instr) {
+ LOperand* base_object = UseRegisterAtStart(instr->base_object());
+ LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+ return DefineAsRegister(
+ new(zone()) LInnerAllocatedObject(base_object, offset));
}
@@ -1126,14 +1133,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- // If there is a non-return use, the context must be allocated in a register.
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- return DefineAsRegister(new(zone()) LContext);
- }
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, rsi);
}
- return NULL;
+ return DefineAsRegister(new(zone()) LContext);
}
@@ -1144,12 +1150,14 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- return DefineAsRegister(new(zone()) LGlobalObject);
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LGlobalObject(context));
}
@@ -1161,15 +1169,14 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, rax), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* function = UseFixed(instr->function(), rdi);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1208,16 +1215,19 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ LOperand* context = UseAny(instr->context());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(input);
+ LMathAbs* result = new(zone()) LMathAbs(context, input);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
LMathLog* result = new(zone()) LMathLog(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ return DefineSameAsFirst(result);
}
@@ -1269,58 +1279,62 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* key = UseFixed(instr->key(), rcx);
- argument_count_ -= instr->argument_count();
- LCallKeyed* result = new(zone()) LCallKeyed(key);
+ LCallKeyed* result = new(zone()) LCallKeyed(context, key);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, rax), instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LCallNamed* result = new(zone()) LCallNamed(context);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, rax), instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LCallGlobal* result = new(zone()) LCallGlobal(context);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, rax), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* constructor = UseFixed(instr->constructor(), rdi);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
+ LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* constructor = UseFixed(instr->constructor(), rdi);
- argument_count_ -= instr->argument_count();
- LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* function = UseFixed(instr->function(), rdi);
- argument_count_ -= instr->argument_count();
- LCallFunction* result = new(zone()) LCallFunction(function);
- return MarkAsCall(DefineFixed(result, rax), instr);
+ LCallFunction* call = new(zone()) LCallFunction(context, function);
+ LInstruction* result = DefineFixed(call, rax);
+ if (instr->IsTailCall()) return result;
+ return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, rax), instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LCallRuntime* result = new(zone()) LCallRuntime(context);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1348,27 +1362,19 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
@@ -1385,8 +1391,9 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LOperand* divisor = UseRegister(instr->right());
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, rax));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1463,11 +1470,6 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
- } else if (instr->fixed_right_arg().has_value) {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegisterAtStart(right),
- NULL);
- return AssignEnvironment(DefineSameAsFirst(mod));
} else {
// The temporary operand is necessary to ensure that right is not
// allocated into edx.
@@ -1485,17 +1487,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need to
- // use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, xmm2),
- UseFixedDouble(right, xmm1));
- return MarkAsCall(DefineFixedDouble(mod, xmm1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1515,7 +1510,6 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
@@ -1536,7 +1530,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
@@ -1565,10 +1558,24 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
result = AssignEnvironment(result);
}
return result;
+ } else if (instr->representation().IsExternal()) {
+ ASSERT(instr->left()->representation().IsExternal());
+ ASSERT(instr->right()->representation().IsInteger32());
+ ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+ bool use_lea = LAddI::UseLea(instr);
+ LOperand* left = UseRegisterAtStart(instr->left());
+ HValue* right_candidate = instr->right();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
+ LAddI* add = new(zone()) LAddI(left, right);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
return NULL;
@@ -1578,15 +1585,16 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
- if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ if (instr->representation().IsSmi()) {
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseAtStart(instr->BetterRightOperand());
+ } else if (instr->representation().IsInteger32()) {
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
@@ -1610,25 +1618,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseTempRegister(instr->global_object());
- LOperand* scratch = TempRegister();
- LOperand* scratch2 = TempRegister();
- LOperand* scratch3 = TempRegister();
- LRandom* result = new(zone()) LRandom(
- global_object, scratch, scratch2, scratch3);
- return DefineFixedDouble(result, xmm1);
-}
-
-
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* left = UseFixed(instr->left(), rdx);
LOperand* right = UseFixed(instr->right(), rax);
- LCmpT* result = new(zone()) LCmpT(left, right);
+ LCmpT* result = new(zone()) LCmpT(context, left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1662,6 +1658,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1670,8 +1668,17 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+ HCompareMinusZeroAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+ LOperand* value = UseRegister(instr->value());
+ return new(zone()) LCompareMinusZeroAndBranch(value);
}
@@ -1709,10 +1716,11 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* left = UseFixed(instr->left(), rdx);
LOperand* right = UseFixed(instr->right(), rax);
LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
+ new(zone()) LStringCompareAndBranch(context, left, right);
return MarkAsCall(result, instr);
}
@@ -1778,14 +1786,28 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+ return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- ASSERT(rcx.is_byte_register());
- LOperand* value = UseFixed(instr->value(), rcx);
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineSameAsFirst(result);
+ LOperand* string = UseRegisterAtStart(instr->string());
+ LOperand* index = FLAG_debug_code
+ ? UseRegisterAtStart(instr->index())
+ : UseRegisterOrConstantAtStart(instr->index());
+ LOperand* value = FLAG_debug_code
+ ? UseRegisterAtStart(instr->value())
+ : UseRegisterOrConstantAtStart(instr->value());
+ LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), rsi) : NULL;
+ LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
+ index, value);
+ if (FLAG_debug_code) {
+ result = MarkAsCall(result, instr);
+ }
+ return result;
}
@@ -1803,9 +1825,17 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* value = UseFixed(instr->value(), rax);
- return MarkAsCall(new(zone()) LThrow(value), instr);
+ return MarkAsCall(new(zone()) LThrow(context, value), instr);
}
@@ -1837,7 +1867,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
// building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@@ -1942,12 +1971,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone()) LIsNumberAndBranch(
- UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
@@ -1996,9 +2019,10 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ LOperand* context = info()->IsStub() ? UseFixed(instr->context(), rsi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
- return new(zone()) LReturn(UseFixed(instr->value(), rax),
- parameter_count);
+ return new(zone()) LReturn(
+ UseFixed(instr->value(), rax), context, parameter_count);
}
@@ -2031,8 +2055,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* global_object = UseFixed(instr->global_object(), rax);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
+ LLoadGlobalGeneric* result =
+ new(zone()) LLoadGlobalGeneric(context, global_object);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -2048,10 +2074,11 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* global_object = UseFixed(instr->global_object(), rdx);
LOperand* value = UseFixed(instr->value(), rax);
- LStoreGlobalGeneric* result = new(zone()) LStoreGlobalGeneric(global_object,
- value);
+ LStoreGlobalGeneric* result =
+ new(zone()) LStoreGlobalGeneric(context, global_object, value);
return MarkAsCall(result, instr);
}
@@ -2068,12 +2095,11 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LOperand* context;
LOperand* value;
LOperand* temp;
+ context = UseRegister(instr->context());
if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
temp = TempRegister();
} else {
- context = UseRegister(instr->context());
value = UseRegister(instr->value());
temp = NULL;
}
@@ -2083,7 +2109,14 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- if (instr->access().IsExternalMemory() && instr->access().offset() == 0) {
+ // Use the special mov rax, moffs64 encoding for external
+ // memory accesses with 64-bit word-sized values.
+ if (instr->access().IsExternalMemory() &&
+ instr->access().offset() == 0 &&
+ (instr->access().representation().IsSmi() ||
+ instr->access().representation().IsTagged() ||
+ instr->access().representation().IsHeapObject() ||
+ instr->access().representation().IsExternal())) {
LOperand* obj = UseRegisterOrConstantAtStart(instr->object());
return DefineFixed(new(zone()) LLoadNamedField(obj), rax);
}
@@ -2093,8 +2126,9 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* object = UseFixed(instr->object(), rax);
- LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(object);
+ LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -2106,6 +2140,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2144,10 +2183,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* object = UseFixed(instr->object(), rdx);
LOperand* key = UseFixed(instr->key(), rax);
- LLoadKeyedGeneric* result = new(zone()) LLoadKeyedGeneric(object, key);
+ LLoadKeyedGeneric* result =
+ new(zone()) LLoadKeyedGeneric(context, object, key);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -2201,6 +2242,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* object = UseFixed(instr->object(), rdx);
LOperand* key = UseFixed(instr->key(), rcx);
LOperand* value = UseFixed(instr->value(), rax);
@@ -2210,7 +2252,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
ASSERT(instr->value()->representation().IsTagged());
LStoreKeyedGeneric* result =
- new(zone()) LStoreKeyedGeneric(object, key, value);
+ new(zone()) LStoreKeyedGeneric(context, object, key, value);
return MarkAsCall(result, instr);
}
@@ -2222,12 +2264,13 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
+ LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(
+ object, NULL, new_map_reg, temp_reg);
return result;
} else {
+ LOperand* context = UseFixed(instr->context(), rsi);
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL, NULL);
+ new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
return AssignPointerMap(result);
}
}
@@ -2304,55 +2347,71 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* object = UseFixed(instr->object(), rdx);
LOperand* value = UseFixed(instr->value(), rax);
- LStoreNamedGeneric* result = new(zone()) LStoreNamedGeneric(object, value);
+ LStoreNamedGeneric* result =
+ new(zone()) LStoreNamedGeneric(context, object, value);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* left = UseOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), rax),
- instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LOperand* left = FLAG_new_string_add
+ ? UseFixed(instr->left(), rdx)
+ : UseOrConstantAtStart(instr->left());
+ LOperand* right = FLAG_new_string_add
+ ? UseFixed(instr->right(), rax)
+ : UseOrConstantAtStart(instr->right());
+ return MarkAsCall(
+ DefineFixed(new(zone()) LStringAdd(context, left, right), rax), instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result =
+ new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result =
+ new(zone()) LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
LOperand* size = instr->size()->IsConstant()
? UseConstant(instr->size())
: UseTempRegister(instr->size());
LOperand* temp = TempRegister();
- LAllocate* result = new(zone()) LAllocate(size, temp);
+ LAllocate* result = new(zone()) LAllocate(context, size, temp);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, rax), instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LRegExpLiteral* result = new(zone()) LRegExpLiteral(context);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, rax), instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LFunctionLiteral* result = new(zone()) LFunctionLiteral(context);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -2374,7 +2433,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
int index = static_cast<int>(instr->index());
- Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index);
+ Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg);
}
}
@@ -2399,8 +2458,9 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, rax), instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LCallStub* result = new(zone()) LCallStub(context);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -2445,12 +2505,17 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseAtStart(instr->value()));
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LOperand* value = UseAtStart(instr->value());
+ LTypeof* result = new(zone()) LTypeof(context, value);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
+
return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
@@ -2485,10 +2550,13 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
info()->MarkAsDeferredCalling();
if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
+ LOperand* context = UseFixed(instr->context(), rsi);
+ return MarkAsCall(new(zone()) LStackCheck(context), instr);
} else {
ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(
+ AssignPointerMap(new(zone()) LStackCheck(context)));
}
}
@@ -2521,7 +2589,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
@@ -2533,8 +2601,9 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
LOperand* object = UseFixed(instr->enumerable(), rax);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
diff --git a/chromium/v8/src/x64/lithium-x64.h b/chromium/v8/src/x64/lithium-x64.h
index 4942c10526e..44bd992f7dd 100644
--- a/chromium/v8/src/x64/lithium-x64.h
+++ b/chromium/v8/src/x64/lithium-x64.h
@@ -72,6 +72,7 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
+ V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -92,6 +93,7 @@ class LCodeGen;
V(DoubleToSmi) \
V(Drop) \
V(DummyUse) \
+ V(Dummy) \
V(ElementsKind) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
@@ -105,7 +107,6 @@ class LCodeGen;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
@@ -114,12 +115,12 @@ class LCodeGen;
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
- V(IsNumberAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -152,9 +153,9 @@ class LCodeGen;
V(Parameter) \
V(Power) \
V(PushArgument) \
- V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringGetChar) \
V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
@@ -214,7 +215,6 @@ class LInstruction : public ZoneObject {
: environment_(NULL),
hydrogen_value_(NULL),
bit_field_(IsCallBits::encode(false)) {
- set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() {}
@@ -255,15 +255,6 @@ class LInstruction : public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- // The 31 bits PositionBits is used to store the int position value. And the
- // position value may be RelocInfo::kNoPosition (-1). The accessor always
- // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
- // and can fit into the 31 bits PositionBits.
- void set_position(int pos) {
- bit_field_ = PositionBits::update(bit_field_, pos + 1);
- }
- int position() { return PositionBits::decode(bit_field_) - 1; }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -273,7 +264,7 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
@@ -303,7 +294,6 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
- class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -404,17 +394,17 @@ class LInstructionGap V8_FINAL : public LGap {
class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
virtual bool IsControl() const V8_OVERRIDE { return true; }
- int block_id() const { return block_id_; }
+ int block_id() const { return block_->block_id(); }
private:
- int block_id_;
+ HBasicBlock* block_;
};
@@ -434,6 +424,13 @@ class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
+class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LDummy() { }
+ DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
@@ -485,8 +482,14 @@ class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallStub(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -740,12 +743,14 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LMathAbs(LOperand* value) {
+ explicit LMathAbs(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
inputs_[0] = value;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
@@ -869,31 +874,33 @@ class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
};
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
- explicit LIsObjectAndBranch(LOperand* value) {
+ explicit LCompareMinusZeroAndBranch(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+ "cmp-minus-zero-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
- explicit LIsNumberAndBranch(LOperand* value) {
+ explicit LIsObjectAndBranch(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
@@ -947,15 +954,19 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
public:
- explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ explicit LStringCompareAndBranch(LOperand* context,
+ LOperand* left,
+ LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
@@ -1033,15 +1044,17 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
};
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -1050,28 +1063,32 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
temps_[0] = temp;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@@ -1092,19 +1109,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1260,7 +1264,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1311,45 +1315,59 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* date() { return inputs_[0]; }
Smi* index() const { return index_; }
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;
};
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
+ LSeqStringGetChar(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
- inputs_[2] = value;
}
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* string() const { return inputs_[0]; }
+ LOperand* index() const { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LSeqStringSetChar(LOperand* context,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
+ inputs_[3] = value;
+ }
+
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
};
-class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
+ explicit LThrow(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@@ -1405,28 +1423,6 @@ class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
- public:
- LRandom(LOperand* global_object,
- LOperand* scratch,
- LOperand* scratch2,
- LOperand* scratch3) {
- inputs_[0] = global_object;
- temps_[0] = scratch;
- temps_[1] = scratch2;
- temps_[2] = scratch3;
- }
-
- LOperand* global_object() { return inputs_[0]; }
- LOperand* scratch() const { return temps_[0]; }
- LOperand* scratch2() const { return temps_[1]; }
- LOperand* scratch3() const { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1450,17 +1446,22 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
: op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
virtual Opcode opcode() const V8_OVERRIDE {
return LInstruction::kArithmeticT;
@@ -1473,14 +1474,18 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LReturn(LOperand* value, LOperand* parameter_count) {
+ explicit LReturn(LOperand* value,
+ LOperand* context,
+ LOperand* parameter_count) {
inputs_[0] = value;
- inputs_[1] = parameter_count;
+ inputs_[1] = context;
+ inputs_[2] = parameter_count;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[1]; }
bool has_constant_parameter_count() {
return parameter_count()->IsConstantOperand();
@@ -1489,7 +1494,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
ASSERT(has_constant_parameter_count());
return LConstantOperand::cast(parameter_count());
}
- LOperand* parameter_count() { return inputs_[1]; }
+ LOperand* parameter_count() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
DECLARE_HYDROGEN_ACCESSOR(Return)
@@ -1509,16 +1514,18 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
+ explicit LLoadNamedGeneric(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
@@ -1536,6 +1543,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -1573,17 +1589,19 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
+ LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
};
@@ -1594,16 +1612,18 @@ class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
+ explicit LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool for_typeof() const { return hydrogen()->for_typeof(); }
};
@@ -1624,16 +1644,19 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
+ explicit LStoreGlobalGeneric(LOperand* context,
+ LOperand* global_object,
LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ inputs_[2] = value;
}
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
@@ -1723,19 +1746,19 @@ class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
};
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInnerAllocatedObject(LOperand* base_object) {
+ LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
inputs_[0] = base_object;
+ inputs_[1] = offset;
}
- LOperand* base_object() { return inputs_[0]; }
- int offset() { return hydrogen()->offset(); }
+ LOperand* base_object() const { return inputs_[0]; }
+ LOperand* offset() const { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
- DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
+ DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@@ -1765,15 +1788,27 @@ class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LGlobalObject(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
};
@@ -1802,13 +1837,15 @@ class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
+ LInvokeFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
@@ -1819,16 +1856,18 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
+ LCallKeyed(LOperand* context, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- LOperand* key() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
@@ -1836,8 +1875,14 @@ class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallNamed(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
@@ -1848,22 +1893,30 @@ class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
+ LCallFunction(LOperand* context, LOperand* function) {
+ inputs_[0] = context;
+ inputs_[1] = function;
}
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- LOperand* function() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallGlobal(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
@@ -1885,13 +1938,15 @@ class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNew(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
@@ -1902,13 +1957,15 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallNewArray(LOperand* constructor) {
- inputs_[0] = constructor;
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
}
- LOperand* constructor() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
@@ -1919,13 +1976,24 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
@@ -2063,7 +2131,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2135,15 +2203,17 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
+ LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@@ -2178,17 +2248,22 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
+ LStoreKeyedGeneric(LOperand* context,
+ LOperand* object,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ inputs_[2] = key;
+ inputs_[3] = value;
}
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
+ LOperand* value() { return inputs_[3]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@@ -2199,17 +2274,20 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
};
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
+ LOperand* context,
LOperand* new_map_temp,
LOperand* temp) {
inputs_[0] = object;
+ inputs_[1] = context;
temps_[0] = new_map_temp;
temps_[1] = temp;
}
LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[1]; }
LOperand* new_map_temp() { return temps_[0]; }
LOperand* temp() { return temps_[1]; }
@@ -2219,8 +2297,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2242,43 +2322,49 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
};
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
}
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
+ explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
}
- LOperand* char_code() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
@@ -2388,14 +2474,16 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
- LAllocate(LOperand* size, LOperand* temp) {
- inputs_[0] = size;
+ LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = size;
temps_[0] = temp;
}
- LOperand* size() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
@@ -2403,15 +2491,27 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
@@ -2430,13 +2530,15 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
- LOperand* value() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -2484,8 +2586,14 @@ class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2496,13 +2604,15 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
@@ -2558,8 +2668,8 @@ class LPlatformChunk V8_FINAL : public LChunk {
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
};
@@ -2576,13 +2686,14 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
next_block_(NULL),
argument_count_(0),
allocator_(allocator),
- position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2715,7 +2826,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
@@ -2727,7 +2838,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HBasicBlock* next_block_;
int argument_count_;
LAllocator* allocator_;
- int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/chromium/v8/src/x64/macro-assembler-x64.cc b/chromium/v8/src/x64/macro-assembler-x64.cc
index 69abc5454f0..6c3f50163ef 100644
--- a/chromium/v8/src/x64/macro-assembler-x64.cc
+++ b/chromium/v8/src/x64/macro-assembler-x64.cc
@@ -37,6 +37,7 @@
#include "serialize.h"
#include "debug.h"
#include "heap.h"
+#include "isolate-inl.h"
namespace v8 {
namespace internal {
@@ -44,7 +45,6 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false),
root_array_available_(true) {
if (isolate() != NULL) {
@@ -79,7 +79,7 @@ Operand MacroAssembler::ExternalOperand(ExternalReference target,
return Operand(kRootRegister, static_cast<int32_t>(delta));
}
}
- movq(scratch, target);
+ Move(scratch, target);
return Operand(scratch, 0);
}
@@ -97,7 +97,7 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
if (destination.is(rax)) {
load_rax(source);
} else {
- movq(kScratchRegister, source);
+ Move(kScratchRegister, source);
movq(destination, Operand(kScratchRegister, 0));
}
}
@@ -116,7 +116,7 @@ void MacroAssembler::Store(ExternalReference destination, Register source) {
if (source.is(rax)) {
store_rax(destination);
} else {
- movq(kScratchRegister, destination);
+ Move(kScratchRegister, destination);
movq(Operand(kScratchRegister, 0), source);
}
}
@@ -133,7 +133,7 @@ void MacroAssembler::LoadAddress(Register destination,
}
}
// Safe code.
- movq(destination, source);
+ Move(destination, source);
}
@@ -163,7 +163,7 @@ void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
if (is_int32(address) && !Serializer::enabled()) {
if (emit_debug_code()) {
- movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(kScratchRegister, kZapValue, RelocInfo::NONE64);
}
push(Immediate(static_cast<int32_t>(address)));
return;
@@ -275,20 +275,21 @@ void MacroAssembler::InNewSpace(Register object,
// case the size of the new space is different between the snapshot maker
// and the running system.
if (scratch.is(object)) {
- movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
+ Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
and_(scratch, kScratchRegister);
} else {
- movq(scratch, ExternalReference::new_space_mask(isolate()));
+ Move(scratch, ExternalReference::new_space_mask(isolate()));
and_(scratch, object);
}
- movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
+ Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
cmpq(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
intptr_t new_space_start =
reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
+ movq(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
+ RelocInfo::NONE64);
if (scratch.is(object)) {
addq(scratch, kScratchRegister);
} else {
@@ -309,11 +310,6 @@ void MacroAssembler::RecordWriteField(
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!value.is(rsi) && !dst.is(rsi));
-
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -344,8 +340,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(value, kZapValue, RelocInfo::NONE64);
+ movq(dst, kZapValue, RelocInfo::NONE64);
}
}
@@ -378,8 +374,8 @@ void MacroAssembler::RecordWriteArray(Register object,
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(value, kZapValue, RelocInfo::NONE64);
+ movq(index, kZapValue, RelocInfo::NONE64);
}
}
@@ -390,11 +386,6 @@ void MacroAssembler::RecordWrite(Register object,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!value.is(rsi) && !address.is(rsi));
-
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
@@ -413,6 +404,10 @@ void MacroAssembler::RecordWrite(Register object,
bind(&ok);
}
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
@@ -444,8 +439,8 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(address, kZapValue, RelocInfo::NONE64);
+ movq(value, kZapValue, RelocInfo::NONE64);
}
}
@@ -533,10 +528,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
#endif
push(rax);
- movq(kScratchRegister, p0, RelocInfo::NONE64);
+ movq(kScratchRegister, reinterpret_cast<Smi*>(p0), RelocInfo::NONE64);
push(kScratchRegister);
- movq(kScratchRegister,
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
+ movq(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
RelocInfo::NONE64);
push(kScratchRegister);
@@ -560,8 +554,6 @@ void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ ||
- stub->CompilingCallsToThisStubIsGCSafe(isolate()));
Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -573,8 +565,7 @@ void MacroAssembler::StubReturn(int argc) {
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -605,22 +596,9 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(rax, function->nargs);
- LoadAddress(rbx, ExternalReference(function, isolate()));
- CEntryStub ces(1, kSaveFPRegs);
- CallStub(&ces);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -635,7 +613,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(rax, num_arguments);
LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size);
+ CEntryStub ces(f->result_size, save_doubles);
CallStub(&ces);
}
@@ -691,13 +669,16 @@ void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
- Register thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Address function_address,
+ Address thunk_address,
+ Register thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
Label prologue;
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label write_back;
@@ -719,7 +700,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Register prev_next_address_reg = r14;
Register prev_limit_reg = rbx;
Register base_reg = r15;
- movq(base_reg, next_address);
+ Move(base_reg, next_address);
movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
@@ -750,7 +731,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&profiler_disabled);
// Call the api function!
- movq(rax, reinterpret_cast<int64_t>(function_address),
+ movq(rax, reinterpret_cast<Address>(function_address),
RelocInfo::EXTERNAL_REFERENCE);
bind(&end_profiler_check);
@@ -768,7 +749,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
}
// Load the value from ReturnValue
- movq(rax, Operand(rbp, return_value_offset * kPointerSize));
+ movq(rax, return_value_operand);
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
@@ -780,9 +761,10 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&leave_exit_frame);
// Check if the function scheduled an exception.
- movq(rsi, scheduled_exception_address);
+ Move(rsi, scheduled_exception_address);
Cmp(Operand(rsi, 0), factory->the_hole_value());
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
@@ -819,11 +801,19 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&ok);
#endif
- LeaveApiExitFrame();
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ movq(rsi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -936,6 +926,50 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
}
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
+ xorps(dst, dst);
+ cvtlsi2sd(dst, src);
+}
+
+
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtlsi2sd(dst, src);
+}
+
+
+void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8()) {
+ movsxbq(dst, src);
+ } else if (r.IsUInteger8()) {
+ movzxbl(dst, src);
+ } else if (r.IsInteger16()) {
+ movsxwq(dst, src);
+ } else if (r.IsUInteger16()) {
+ movzxwl(dst, src);
+ } else if (r.IsInteger32()) {
+ movl(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
+ ASSERT(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ movb(dst, src);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ movw(dst, src);
+ } else if (r.IsInteger32()) {
+ movl(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
@@ -944,7 +978,7 @@ void MacroAssembler::Set(Register dst, int64_t x) {
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
- movq(dst, x, RelocInfo::NONE64);
+ movq(dst, x);
}
}
@@ -1009,18 +1043,9 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (emit_debug_code()) {
- movq(dst,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE64);
+ movq(dst, Smi::FromInt(kSmiConstantRegisterValue), RelocInfo::NONE64);
cmpq(dst, kSmiConstantRegister);
- if (allow_stub_calls()) {
- Assert(equal, kUninitializedKSmiConstantRegister);
- } else {
- Label ok;
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
+ Assert(equal, kUninitializedKSmiConstantRegister);
}
int value = source->value();
if (value == 0) {
@@ -1058,7 +1083,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
UNREACHABLE();
return;
default:
- movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64);
+ movq(dst, source, RelocInfo::NONE64);
return;
}
if (negative) {
@@ -1081,11 +1106,7 @@ void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
testb(dst, Immediate(0x01));
Label ok;
j(zero, &ok, Label::kNear);
- if (allow_stub_calls()) {
- Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
- } else {
- int3();
- }
+ Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
bind(&ok);
}
ASSERT(kSmiShift % kBitsPerByte == 0);
@@ -1423,28 +1444,6 @@ void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
}
-void MacroAssembler::SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- // Does not assume that src is a smi.
- ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
-
- JumpIfNotSmi(src, on_not_smi_result, near_jump);
- Register tmp = (dst.is(src) ? kScratchRegister : dst);
- LoadSmiConstant(tmp, constant);
- addq(tmp, src);
- j(overflow, on_not_smi_result, near_jump);
- if (dst.is(src)) {
- movq(dst, tmp);
- }
-}
-
-
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1504,7 +1503,8 @@ void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
void MacroAssembler::SmiAddConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
+ SmiOperationExecutionMode mode,
+ Label* bailout_label,
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1512,15 +1512,32 @@ void MacroAssembler::SmiAddConstant(Register dst,
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
-
LoadSmiConstant(kScratchRegister, constant);
- addq(kScratchRegister, src);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
+ addq(dst, kScratchRegister);
+ if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
+ j(no_overflow, bailout_label, near_jump);
+ ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ subq(dst, kScratchRegister);
+ } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
+ if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
+ Label done;
+ j(no_overflow, &done, Label::kNear);
+ subq(dst, kScratchRegister);
+ jmp(bailout_label, near_jump);
+ bind(&done);
+ } else {
+ // Bailout if overflow without reserving src.
+ j(overflow, bailout_label, near_jump);
+ }
+ } else {
+ CHECK(mode.IsEmpty());
+ }
} else {
+ ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
LoadSmiConstant(dst, constant);
addq(dst, src);
- j(overflow, on_not_smi_result, near_jump);
+ j(overflow, bailout_label, near_jump);
}
}
@@ -1552,7 +1569,8 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiSubConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
+ SmiOperationExecutionMode mode,
+ Label* bailout_label,
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1560,35 +1578,40 @@ void MacroAssembler::SmiSubConstant(Register dst,
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result, near_jump);
- LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ LoadSmiConstant(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
+ j(no_overflow, bailout_label, near_jump);
+ ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ addq(dst, kScratchRegister);
+ } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
+ if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
+ Label done;
+ j(no_overflow, &done, Label::kNear);
+ addq(dst, kScratchRegister);
+ jmp(bailout_label, near_jump);
+ bind(&done);
+ } else {
+ // Bailout if overflow without reserving src.
+ j(overflow, bailout_label, near_jump);
+ }
} else {
- // Subtract by adding the negation.
- LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
- addq(kScratchRegister, dst);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
+ CHECK(mode.IsEmpty());
}
} else {
+ ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result, near_jump);
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
+ ASSERT(!dst.is(kScratchRegister));
+ movq(dst, src);
+ LoadSmiConstant(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ j(overflow, bailout_label, near_jump);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
addq(dst, src);
- j(overflow, on_not_smi_result, near_jump);
+ j(overflow, bailout_label, near_jump);
}
}
}
@@ -1616,6 +1639,29 @@ void MacroAssembler::SmiNeg(Register dst,
}
+template<class T>
+static void SmiAddHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (dst.is(src1)) {
+ Label done;
+ masm->addq(dst, src2);
+ masm->j(no_overflow, &done, Label::kNear);
+ // Restore src1.
+ masm->subq(dst, src2);
+ masm->jmp(on_not_smi_result, near_jump);
+ masm->bind(&done);
+ } else {
+ masm->movq(dst, src1);
+ masm->addq(dst, src2);
+ masm->j(overflow, on_not_smi_result, near_jump);
+ }
+}
+
+
void MacroAssembler::SmiAdd(Register dst,
Register src1,
Register src2,
@@ -1623,16 +1669,7 @@ void MacroAssembler::SmiAdd(Register dst,
Label::Distance near_jump) {
ASSERT_NOT_NULL(on_not_smi_result);
ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1642,17 +1679,8 @@ void MacroAssembler::SmiAdd(Register dst,
Label* on_not_smi_result,
Label::Distance near_jump) {
ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- ASSERT(!src2.AddressUsesRegister(dst));
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ ASSERT(!src2.AddressUsesRegister(dst));
+ SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1675,34 +1703,37 @@ void MacroAssembler::SmiAdd(Register dst,
}
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
+template<class T>
+static void SmiSubHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
if (dst.is(src1)) {
- cmpq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- subq(dst, src2);
+ Label done;
+ masm->subq(dst, src2);
+ masm->j(no_overflow, &done, Label::kNear);
+ // Restore src1.
+ masm->addq(dst, src2);
+ masm->jmp(on_not_smi_result, near_jump);
+ masm->bind(&done);
} else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
+ masm->movq(dst, src1);
+ masm->subq(dst, src2);
+ masm->j(overflow, on_not_smi_result, near_jump);
}
}
-void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ ASSERT_NOT_NULL(on_not_smi_result);
ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- subq(dst, src2);
- Assert(no_overflow, kSmiSubtractionOverflow);
+ SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1712,29 +1743,36 @@ void MacroAssembler::SmiSub(Register dst,
Label* on_not_smi_result,
Label::Distance near_jump) {
ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src2);
- cmpq(src1, kScratchRegister);
- j(overflow, on_not_smi_result, near_jump);
- subq(src1, kScratchRegister);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ ASSERT(!src2.AddressUsesRegister(dst));
+ SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2) {
+template<class T>
+static void SmiSubNoOverflowHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2) {
// No overflow checking. Use only when it's known that
// overflowing is impossible (e.g., subtracting two positive smis).
if (!dst.is(src1)) {
- movq(dst, src1);
+ masm->movq(dst, src1);
}
- subq(dst, src2);
- Assert(no_overflow, kSmiSubtractionOverflow);
+ masm->subq(dst, src2);
+ masm->Assert(no_overflow, kSmiSubtractionOverflow);
+}
+
+
+void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
+ ASSERT(!dst.is(src2));
+ SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ const Operand& src2) {
+ SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
}
@@ -2129,10 +2167,8 @@ void MacroAssembler::SelectNonSmi(Register dst,
ASSERT(!dst.is(src2));
// Both operands must not be smis.
#ifdef DEBUG
- if (allow_stub_calls()) { // Check contains a stub call.
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
- }
+ Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+ Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
#endif
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
@@ -2240,6 +2276,90 @@ void MacroAssembler::Test(const Operand& src, Smi* source) {
// ----------------------------------------------------------------------------
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ SmiToInteger32(
+ mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shrl(mask, Immediate(1));
+ subq(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ isolate()->factory()->heap_number_map(),
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ and_(scratch, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ shl(scratch, Immediate(kPointerSizeLog2 + 1));
+
+ Register index = scratch;
+ Register probe = mask;
+ movq(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache);
+
+ bind(&is_smi);
+ SmiToInteger32(scratch, object);
+ and_(scratch, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ shl(scratch, Immediate(kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ cmpq(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ movq(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
@@ -2376,8 +2496,7 @@ void MacroAssembler::Move(Register dst, Handle<Object> source) {
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(dst, source);
}
}
@@ -2387,8 +2506,7 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
movq(dst, kScratchRegister);
}
}
@@ -2399,8 +2517,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
cmpq(dst, kScratchRegister);
}
}
@@ -2411,8 +2528,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
cmpq(dst, kScratchRegister);
}
}
@@ -2423,47 +2539,22 @@ void MacroAssembler::Push(Handle<Object> source) {
if (source->IsSmi()) {
Push(Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(kScratchRegister, source);
push(kScratchRegister);
}
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
+void MacroAssembler::MoveHeapObject(Register result,
+ Handle<Object> object) {
AllowDeferredHandleDereference using_raw_address;
+ ASSERT(object->IsHeapObject());
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
movq(result, cell, RelocInfo::CELL);
movq(result, Operand(result, 0));
} else {
- Move(result, object);
- }
-}
-
-
-void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- movq(kScratchRegister, cell, RelocInfo::CELL);
- cmpq(reg, Operand(kScratchRegister, 0));
- } else {
- Cmp(reg, object);
- }
-}
-
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- movq(kScratchRegister, cell, RelocInfo::CELL);
- movq(kScratchRegister, Operand(kScratchRegister, 0));
- push(kScratchRegister);
- } else {
- Push(object);
+ movq(result, object, RelocInfo::EMBEDDED_OBJECT);
}
}
@@ -2548,7 +2639,8 @@ void MacroAssembler::Call(Handle<Code> code_object,
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+ rmode == RelocInfo::CODE_AGE_SEQUENCE);
call(code_object, rmode, ast_id);
#ifdef DEBUG
CHECK_EQ(end_position, pc_offset());
@@ -2651,7 +2743,8 @@ Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2710,7 +2803,8 @@ void MacroAssembler::JumpToHandlerEntry() {
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2750,7 +2844,8 @@ void MacroAssembler::Throw(Register value) {
void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2917,7 +3012,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
// Value is a smi. convert to a double and store.
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
- cvtlsi2sd(xmm_scratch, kScratchRegister);
+ Cvtlsi2sd(xmm_scratch, kScratchRegister);
movsd(FieldOperand(elements, index, times_8,
FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
@@ -2925,9 +3020,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
}
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success) {
+void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}
@@ -2940,10 +3033,8 @@ void MacroAssembler::CheckMap(Register obj,
JumpIfSmi(obj, fail);
}
- Label success;
- CompareMap(obj, map, &success);
+ CompareMap(obj, map);
j(not_equal, fail);
- bind(&success);
}
@@ -3028,9 +3119,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
cvttsd2siq(result_reg, input_reg);
- movq(kScratchRegister,
- V8_INT64_C(0x8000000000000000),
- RelocInfo::NONE64);
+ movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
cmpq(result_reg, kScratchRegister);
j(not_equal, &done, Label::kNear);
@@ -3050,7 +3139,7 @@ void MacroAssembler::DoubleToI(Register result_reg,
Label* conversion_failed,
Label::Distance dst) {
cvttsd2si(result_reg, input_reg);
- cvtlsi2sd(xmm0, result_reg);
+ Cvtlsi2sd(xmm0, result_reg);
ucomisd(xmm0, input_reg);
j(not_equal, conversion_failed, dst);
j(parity_even, conversion_failed, dst); // NaN.
@@ -3087,7 +3176,7 @@ void MacroAssembler::TaggedToI(Register result_reg,
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, xmm0);
- cvtlsi2sd(temp, result_reg);
+ Cvtlsi2sd(temp, result_reg);
ucomisd(xmm0, temp);
RecordComment("Deferred TaggedToI: lost precision");
j(not_equal, lost_precision, dst);
@@ -3104,6 +3193,39 @@ void MacroAssembler::TaggedToI(Register result_reg,
}
+void MacroAssembler::Throw(BailoutReason reason) {
+#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
+ if (msg != NULL) {
+ RecordComment("Throw message: ");
+ RecordComment(msg);
+ }
+#endif
+
+ push(rax);
+ Push(Smi::FromInt(reason));
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kThrowMessage, 1);
+ } else {
+ CallRuntime(Runtime::kThrowMessage, 1);
+ }
+ // Control will not return here.
+ int3();
+}
+
+
+void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+ Label L;
+ j(NegateCondition(cc), &L);
+ Throw(reason);
+ // will not return here
+ bind(&L);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
@@ -3180,7 +3302,7 @@ void MacroAssembler::AssertSmi(const Operand& object) {
void MacroAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
ASSERT(!int32_register.is(kScratchRegister));
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
+ movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
cmpq(kScratchRegister, int32_register);
Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
}
@@ -3462,7 +3584,7 @@ void MacroAssembler::InvokeFunction(Register function,
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
@@ -3471,18 +3593,27 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
- // Get the function and setup the context.
- LoadHeapObject(rdi, function);
- movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
+ ASSERT(function.is(rdi));
+ movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ // Advances rdx to the end of the Code object header, to the start of
+ // the executable code.
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+
InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
}
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ Move(rdi, function);
+ InvokeFunction(rdi, expected, actual, flag, call_wrapper, call_kind);
+}
+
+
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
@@ -3559,6 +3690,30 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ push(rbp); // Caller's frame pointer.
+ movq(rbp, rsp);
+ push(rsi); // Callee's context.
+ Push(Smi::FromInt(StackFrame::STUB));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
+ } else {
+ push(rbp); // Caller's frame pointer.
+ movq(rbp, rsp);
+ push(rsi); // Callee's context.
+ push(rdi); // Callee's JS function.
+ }
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(rbp);
movq(rbp, rsp);
@@ -3590,9 +3745,10 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
// Set up the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
+ kFPOnStackSize + kPCOnStackSize);
+ ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
+ ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(rbp);
movq(rbp, rsp);
@@ -3620,7 +3776,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
+ int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
arg_stack_space * kPointerSize;
subq(rsp, Immediate(space));
int offset = -2 * kPointerSize;
@@ -3683,23 +3839,25 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
PushReturnAddressFrom(rcx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
movq(rsp, rbp);
pop(rbp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
Operand context_operand = ExternalOperand(context_address);
- movq(rsi, context_operand);
+ if (restore_context) {
+ movq(rsi, context_operand);
+ }
#ifdef DEBUG
movq(context_operand, Immediate(0));
#endif
@@ -3777,6 +3935,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
// First of all we assign the hash seed to scratch.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -3851,8 +4012,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
decl(r1);
// Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
movq(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
@@ -3870,7 +4030,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
+ if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
@@ -4280,18 +4440,27 @@ void MacroAssembler::CopyBytes(Register destination,
cmpl(length, Immediate(min_length));
Assert(greater_equal, kInvalidMinLength);
}
- Label loop, done, short_string, short_loop;
+ Label short_loop, len8, len16, len24, done, short_string;
- const int kLongStringLimit = 20;
+ const int kLongStringLimit = 4 * kPointerSize;
if (min_length <= kLongStringLimit) {
- cmpl(length, Immediate(kLongStringLimit));
- j(less_equal, &short_string);
+ cmpl(length, Immediate(kPointerSize));
+ j(below, &short_string, Label::kNear);
}
ASSERT(source.is(rsi));
ASSERT(destination.is(rdi));
ASSERT(length.is(rcx));
+ if (min_length <= kLongStringLimit) {
+ cmpl(length, Immediate(2 * kPointerSize));
+ j(below_equal, &len8, Label::kNear);
+ cmpl(length, Immediate(3 * kPointerSize));
+ j(below_equal, &len16, Label::kNear);
+ cmpl(length, Immediate(4 * kPointerSize));
+ j(below_equal, &len24, Label::kNear);
+ }
+
// Because source is 8-byte aligned in our uses of this function,
// we keep source aligned for the rep movs operation by copying the odd bytes
// at the end of the ranges.
@@ -4305,25 +4474,38 @@ void MacroAssembler::CopyBytes(Register destination,
addq(destination, scratch);
if (min_length <= kLongStringLimit) {
- jmp(&done);
+ jmp(&done, Label::kNear);
+ bind(&len24);
+ movq(scratch, Operand(source, 2 * kPointerSize));
+ movq(Operand(destination, 2 * kPointerSize), scratch);
+ bind(&len16);
+ movq(scratch, Operand(source, kPointerSize));
+ movq(Operand(destination, kPointerSize), scratch);
+ bind(&len8);
+ movq(scratch, Operand(source, 0));
+ movq(Operand(destination, 0), scratch);
+ // Move remaining bytes of length.
+ movq(scratch, Operand(source, length, times_1, -kPointerSize));
+ movq(Operand(destination, length, times_1, -kPointerSize), scratch);
+ addq(destination, length);
+ jmp(&done, Label::kNear);
bind(&short_string);
if (min_length == 0) {
testl(length, length);
- j(zero, &done);
+ j(zero, &done, Label::kNear);
}
- lea(scratch, Operand(destination, length, times_1, 0));
bind(&short_loop);
- movb(length, Operand(source, 0));
- movb(Operand(destination, 0), length);
+ movb(scratch, Operand(source, 0));
+ movb(Operand(destination, 0), scratch);
incq(source);
incq(destination);
- cmpq(destination, scratch);
- j(not_equal, &short_loop);
-
- bind(&done);
+ decl(length);
+ j(not_zero, &short_loop);
}
+
+ bind(&done);
}
@@ -4478,6 +4660,39 @@ int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ JumpIfNotSmi(string, &is_object);
+ Throw(kNonObject);
+ bind(&is_object);
+
+ push(value);
+ movq(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ cmpq(value, Immediate(encoding_mask));
+ pop(value);
+ ThrowIf(not_equal, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ Integer32ToSmi(index, index);
+ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
+ ThrowIf(greater_equal, kIndexIsTooLarge);
+
+ SmiCompare(index, Smi::FromInt(0));
+ ThrowIf(less, kIndexIsNegative);
+
+ // Restore the index
+ SmiToInteger32(index, index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_arguments) {
int frame_alignment = OS::ActivationFrameAlignment();
ASSERT(frame_alignment != 0);
@@ -4762,7 +4977,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
EnumLength(rdx, rbx);
- Cmp(rdx, Smi::FromInt(Map::kInvalidEnumCache));
+ Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
j(equal, call_runtime);
jmp(&start);
@@ -4791,8 +5006,8 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
- Register scratch_reg) {
- Label no_memento_available;
+ Register scratch_reg,
+ Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
@@ -4800,14 +5015,39 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
lea(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
- movq(kScratchRegister, new_space_start);
+ Move(kScratchRegister, new_space_start);
cmpq(scratch_reg, kScratchRegister);
- j(less, &no_memento_available);
+ j(less, no_memento_found);
cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
- j(greater, &no_memento_available);
+ j(greater, no_memento_found);
CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
Heap::kAllocationMementoMapRootIndex);
- bind(&no_memento_available);
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
+ ASSERT(!scratch1.is(scratch0));
+ Register current = scratch0;
+ Label loop_again;
+
+ movq(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ movq(current, FieldOperand(current, HeapObject::kMapOffset));
+ movq(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ and_(scratch1, Immediate(Map::kElementsKindMask));
+ shr(scratch1, Immediate(Map::kElementsKindShift));
+ cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ j(equal, found);
+ movq(current, FieldOperand(current, Map::kPrototypeOffset));
+ CompareRoot(current, Heap::kNullValueRootIndex);
+ j(not_equal, &loop_again);
}
diff --git a/chromium/v8/src/x64/macro-assembler-x64.h b/chromium/v8/src/x64/macro-assembler-x64.h
index 09c8a800cca..98808a86722 100644
--- a/chromium/v8/src/x64/macro-assembler-x64.h
+++ b/chromium/v8/src/x64/macro-assembler-x64.h
@@ -53,6 +53,22 @@ typedef Operand MemOperand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum SmiOperationConstraint {
+ PRESERVE_SOURCE_REGISTER,
+ BAILOUT_ON_NO_OVERFLOW,
+ BAILOUT_ON_OVERFLOW,
+ NUMBER_OF_CONSTRAINTS
+};
+
+STATIC_ASSERT(NUMBER_OF_CONSTRAINTS <= 8);
+
+class SmiOperationExecutionMode : public EnumSet<SmiOperationConstraint, byte> {
+ public:
+ SmiOperationExecutionMode() : EnumSet<SmiOperationConstraint, byte>(0) { }
+ explicit SmiOperationExecutionMode(byte bits)
+ : EnumSet<SmiOperationConstraint, byte>(bits) { }
+};
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
// Forward declaration.
@@ -282,6 +298,9 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
// Enter specific kind of exit frame; either in normal or
// debug mode. Expects the number of arguments in register rax and
// sets up the number of arguments in register rdi and the pointer
@@ -302,7 +321,7 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects/provides the return value in
// register rax (untouched).
- void LeaveApiExitFrame();
+ void LeaveApiExitFrame(bool restore_context);
// Push and pop the registers that can hold pointers.
void PushSafepointRegisters() { Pushad(); }
@@ -316,7 +335,7 @@ class MacroAssembler: public Assembler {
void InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
- movq(kRootRegister, roots_array_start);
+ Move(kRootRegister, roots_array_start);
addq(kRootRegister, Immediate(kRootRegisterBias));
}
@@ -352,6 +371,13 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
+ void InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
+
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
@@ -381,8 +407,7 @@ class MacroAssembler: public Assembler {
void SafePush(Smi* src);
void InitializeSmiConstantRegister() {
- movq(kSmiConstantRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+ movq(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
RelocInfo::NONE64);
}
@@ -532,15 +557,6 @@ class MacroAssembler: public Assembler {
// Smis represent a subset of integers. The subset is always equivalent to
// a two's complement interpretation of a fixed number of bits.
- // Optimistically adds an integer constant to a supposed smi.
- // If the src is not a smi, or the result is not a smi, jump to
- // the label.
- void SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
// Add an integer constant to a tagged smi, giving a tagged smi as result.
// No overflow testing on the result is done.
void SmiAddConstant(Register dst, Register src, Smi* constant);
@@ -554,7 +570,8 @@ class MacroAssembler: public Assembler {
void SmiAddConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
+ SmiOperationExecutionMode mode,
+ Label* bailout_label,
Label::Distance near_jump = Label::kFar);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
@@ -567,7 +584,8 @@ class MacroAssembler: public Assembler {
void SmiSubConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
+ SmiOperationExecutionMode mode,
+ Label* bailout_label,
Label::Distance near_jump = Label::kFar);
// Negating a smi can give a negative zero or too large positive value.
@@ -578,8 +596,8 @@ class MacroAssembler: public Assembler {
Label::Distance near_jump = Label::kFar);
// Adds smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
+ // If dst is src1, then src1 will be destroyed if the operation is
+ // successful, otherwise kept intact.
void SmiAdd(Register dst,
Register src1,
Register src2,
@@ -596,18 +614,13 @@ class MacroAssembler: public Assembler {
Register src2);
// Subtracts smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
+ // If dst is src1, then src1 will be destroyed if the operation is
+ // successful, otherwise kept intact.
void SmiSub(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump = Label::kFar);
-
- void SmiSub(Register dst,
- Register src1,
- Register src2);
-
void SmiSub(Register dst,
Register src1,
const Operand& src2,
@@ -616,6 +629,10 @@ class MacroAssembler: public Assembler {
void SmiSub(Register dst,
Register src1,
+ Register src2);
+
+ void SmiSub(Register dst,
+ Register src1,
const Operand& src2);
// Multiplies smi values and return the result as a smi,
@@ -739,6 +756,17 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String macros.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found);
+
// If object is a string, its map is loaded into object_map.
void JumpIfNotString(Register object,
Register object_map,
@@ -771,6 +799,11 @@ class MacroAssembler: public Assembler {
Label* on_fail,
Label::Distance near_jump = Label::kFar);
+ void EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask);
+
// Checks if the given register or operand is a unique name
void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
Label::Distance distance = Label::kFar);
@@ -780,10 +813,20 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Macro instructions.
+ // Load/store with specific representation.
+ void Load(Register dst, const Operand& src, Representation r);
+ void Store(const Operand& dst, Register src, Representation r);
+
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
void Set(const Operand& dst, int64_t x);
+ // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+ // hinders register renaming and makes dependence chains longer. So we use
+ // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ void Cvtlsi2sd(XMMRegister dst, Register src);
+ void Cvtlsi2sd(XMMRegister dst, const Operand& src);
+
// Move if the registers are not identical.
void Move(Register target, Register source);
@@ -801,27 +844,7 @@ class MacroAssembler: public Assembler {
// Load a heap object and handle the case of new-space objects by
// indirecting via a global cell.
- void LoadHeapObject(Register result, Handle<HeapObject> object);
- void CmpHeapObject(Register reg, Handle<HeapObject> object);
- void PushHeapObject(Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, object);
- }
- }
-
- void CmpObject(Register reg, Handle<Object> object) {
- AllowDeferredHandleDereference heap_object_check;
- if (object->IsHeapObject()) {
- CmpHeapObject(reg, Handle<HeapObject>::cast(object));
- } else {
- Cmp(reg, object);
- }
- }
+ void MoveHeapObject(Register result, Handle<Object> object);
// Load a global cell into a register.
void LoadGlobalCell(Register dst, Handle<Cell> cell);
@@ -835,6 +858,12 @@ class MacroAssembler: public Assembler {
void Pop(Register dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
+ void MoveDouble(Register dst, const Operand& src) { movq(dst, src); }
+ void MoveDouble(const Operand& dst, Register src) { movq(dst, src); }
+ void Move(Register dst, ExternalReference ext) {
+ movq(dst, reinterpret_cast<Address>(ext.address()),
+ RelocInfo::EXTERNAL_REFERENCE);
+ }
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
@@ -920,13 +949,8 @@ class MacroAssembler: public Assembler {
Label* fail,
int elements_offset = 0);
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
- // result of map compare. If multiple map compares are required, the compare
- // sequences branches to early_success.
- void CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success);
+ // Compare an object's map with the specified map.
+ void CompareMap(Register obj, Handle<Map> map);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@@ -1046,6 +1070,12 @@ class MacroAssembler: public Assembler {
// Propagate an uncatchable exception out of the current JS stack.
void ThrowUncatchable(Register value);
+ // Throw a message string as an exception.
+ void Throw(BailoutReason reason);
+
+ // Throw a message string as an exception if a condition is not true.
+ void ThrowIf(Condition cc, BailoutReason reason);
+
// ---------------------------------------------------------------------------
// Inline caching support
@@ -1232,13 +1262,22 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Call a runtime function and save the value of XMM registers.
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ }
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@@ -1274,7 +1313,8 @@ class MacroAssembler: public Assembler {
Address thunk_address,
Register thunk_last_arg,
int stack_space,
- int return_value_offset_from_rbp);
+ Operand return_value_operand,
+ Operand* context_restore_operand);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in rsp[0], rsp[8],
@@ -1360,8 +1400,6 @@ class MacroAssembler: public Assembler {
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
@@ -1384,9 +1422,24 @@ class MacroAssembler: public Assembler {
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
- // If allocation info is present, condition flags are set to equal
+ // If allocation info is present, condition flags are set to equal.
void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg);
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ j(equal, memento_found);
+ bind(&no_memento_found);
+ }
+
+ // Jumps to found label if a prototype map has dictionary elements.
+ void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+ Register scratch1, Label* found);
private:
// Order general registers are pushed by Pushad.
@@ -1396,7 +1449,6 @@ class MacroAssembler: public Assembler {
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
bool generating_stub_;
- bool allow_stub_calls_;
bool has_frame_;
bool root_array_available_;
@@ -1430,7 +1482,7 @@ class MacroAssembler: public Assembler {
// accessible via StackSpaceOperand.
void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
- void LeaveExitFrameEpilogue();
+ void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
// Loads the top of new-space into the result register.
diff --git a/chromium/v8/src/x64/regexp-macro-assembler-x64.cc b/chromium/v8/src/x64/regexp-macro-assembler-x64.cc
index ca834e2771f..3e65a68b831 100644
--- a/chromium/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/chromium/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -618,7 +618,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate('z'));
BranchOrBacktrack(above, on_no_match);
}
- __ movq(rbx, ExternalReference::re_word_character_map());
+ __ Move(rbx, ExternalReference::re_word_character_map());
ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
__ testb(Operand(rbx, current_character(), times_1, 0),
current_character());
@@ -632,7 +632,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate('z'));
__ j(above, &done);
}
- __ movq(rbx, ExternalReference::re_word_character_map());
+ __ Move(rbx, ExternalReference::re_word_character_map());
ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
__ testb(Operand(rbx, current_character(), times_1, 0),
current_character());
@@ -718,7 +718,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ movq(rcx, rsp);
- __ movq(kScratchRegister, stack_limit);
+ __ Move(kScratchRegister, stack_limit);
__ subq(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
diff --git a/chromium/v8/src/x64/simulator-x64.cc b/chromium/v8/src/x64/simulator-x64.cc
index 209aa2d3070..448b025a6bf 100644
--- a/chromium/v8/src/x64/simulator-x64.cc
+++ b/chromium/v8/src/x64/simulator-x64.cc
@@ -24,4 +24,3 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/chromium/v8/src/x64/stub-cache-x64.cc b/chromium/v8/src/x64/stub-cache-x64.cc
index 365bd385799..c87f00fc4db 100644
--- a/chromium/v8/src/x64/stub-cache-x64.cc
+++ b/chromium/v8/src/x64/stub-cache-x64.cc
@@ -300,32 +300,28 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length directly from the string.
__ movq(rax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
+ __ j(not_equal, miss);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
}
@@ -383,17 +379,12 @@ static void CompileCallLoadPropertyWithInterceptor(
Register receiver,
Register holder,
Register name,
- Handle<JSObject> holder_obj) {
+ Handle<JSObject> holder_obj,
+ IC::UtilityId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ Set(rax, StubCache::kInterceptorArgsLength);
- __ LoadAddress(rbx, ref);
-
- CEntryStub stub(1);
- __ CallStub(&stub);
+ __ CallExternalReference(
+ ExternalReference(IC_Utility(id), masm->isolate()),
+ StubCache::kInterceptorArgsLength);
}
@@ -440,133 +431,216 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
}
+static void GenerateFastApiCallBody(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc,
+ bool restore_context);
+
+
// Generates call to API function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
int argc) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- rsp[16] : api function
- // (first fast api call extra argument)
- // -- rsp[24] : api call data
- // -- rsp[32] : isolate
- // -- rsp[40] : ReturnValue default value
- // -- rsp[48] : ReturnValue
- //
- // -- rsp[56] : last argument
- // -- ...
- // -- rsp[(argc + 6) * 8] : first argument
- // -- rsp[(argc + 7) * 8] : receiver
- // -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+ StackArgumentsAccessor args(rsp, argc + kFastApiCallArguments);
+
+ // Save calling context.
+ int offset = argc + kFastApiCallArguments;
+ __ movq(args.GetArgumentOperand(offset - FCA::kContextSaveIndex), rsi);
+
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(rdi, function);
+ __ Move(rdi, function);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- int api_call_argc = argc + kFastApiCallArguments;
- StackArgumentsAccessor args(rsp, api_call_argc);
-
- // Pass the additional arguments.
- __ movq(args.GetArgumentOperand(api_call_argc - 1), rdi);
+ // Construct the FunctionCallbackInfo on the stack.
+ __ movq(args.GetArgumentOperand(offset - FCA::kCalleeIndex), rdi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ Move(rcx, api_call_info);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(args.GetArgumentOperand(api_call_argc - 2), rbx);
+ __ movq(args.GetArgumentOperand(offset - FCA::kDataIndex), rbx);
} else {
- __ Move(args.GetArgumentOperand(api_call_argc - 2), call_data);
+ __ Move(args.GetArgumentOperand(offset - FCA::kDataIndex), call_data);
}
- __ movq(kScratchRegister,
+ __ Move(kScratchRegister,
ExternalReference::isolate_address(masm->isolate()));
- __ movq(args.GetArgumentOperand(api_call_argc - 3), kScratchRegister);
+ __ movq(args.GetArgumentOperand(offset - FCA::kIsolateIndex),
+ kScratchRegister);
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(args.GetArgumentOperand(api_call_argc - 4), kScratchRegister);
- __ movq(args.GetArgumentOperand(api_call_argc - 5), kScratchRegister);
+ __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueDefaultValueIndex),
+ kScratchRegister);
+ __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueOffset),
+ kScratchRegister);
// Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 6);
- __ lea(rbx, Operand(rsp, kFastApiCallArguments * kPointerSize));
+ STATIC_ASSERT(kFastApiCallArguments == 7);
+ __ lea(rax, Operand(rsp, 1 * kPointerSize));
- // Function address is a foreign pointer outside V8's heap.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
-
-#if defined(__MINGW64__) || defined(_WIN64)
- Register arguments_arg = rcx;
- Register callback_arg = rdx;
-#else
- Register arguments_arg = rdi;
- Register callback_arg = rsi;
-#endif
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- __ PrepareCallApiFunction(kApiStackSpace);
-
- __ movq(StackSpaceOperand(0), rbx); // v8::Arguments::implicit_args_.
- __ addq(rbx, Immediate(argc * kPointerSize));
- __ movq(StackSpaceOperand(1), rbx); // v8::Arguments::values_.
- __ Set(StackSpaceOperand(2), argc); // v8::Arguments::length_.
- // v8::Arguments::is_construct_call_.
- __ Set(StackSpaceOperand(3), 0);
-
- // v8::InvocationCallback's argument.
- __ lea(arguments_arg, StackSpaceOperand(0));
-
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
-
- __ CallApiFunctionAndReturn(function_address,
- thunk_address,
- callback_arg,
- api_call_argc + 1,
- kFastApiCallArguments + 1);
+ GenerateFastApiCallBody(masm, optimization, argc, false);
}
// Generate call to api function.
+// This function uses push() to generate smaller, faster code than
+// the version above. It is an optimization that should will be removed
+// when api call ICs are generated in hydrogen.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
Register receiver,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
int argc,
Register* values) {
ASSERT(optimization.is_simple_api_call());
- ASSERT(!receiver.is(scratch));
- const int stack_space = kFastApiCallArguments + argc + 1;
// Copy return value.
- __ movq(scratch, Operand(rsp, 0));
- // Assign stack space for the call arguments.
- __ subq(rsp, Immediate(stack_space * kPointerSize));
- // Move the return address on top of the stack.
- __ movq(Operand(rsp, 0), scratch);
- // Write holder to stack frame.
- __ movq(Operand(rsp, 1 * kPointerSize), receiver);
- // Write receiver to stack frame.
- int index = stack_space;
- __ movq(Operand(rsp, index-- * kPointerSize), receiver);
+ __ pop(scratch1);
+
+ // receiver
+ __ push(receiver);
+
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
- ASSERT(!receiver.is(values[i]));
- ASSERT(!scratch.is(values[i]));
- __ movq(Operand(rsp, index-- * kPointerSize), values[i]);
+ Register arg = values[argc-1-i];
+ ASSERT(!receiver.is(arg));
+ ASSERT(!scratch1.is(arg));
+ ASSERT(!scratch2.is(arg));
+ ASSERT(!scratch3.is(arg));
+ __ push(arg);
}
- GenerateFastApiCall(masm, optimization, argc);
+ typedef FunctionCallbackArguments FCA;
+
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kCalleeIndex == 5);
+ STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+ STATIC_ASSERT(FCA::kArgsLength == 7);
+
+ // context save
+ __ push(rsi);
+
+ // Get the function and setup the context.
+ Handle<JSFunction> function = optimization.constant_function();
+ __ Move(scratch2, function);
+ __ push(scratch2);
+
+ Isolate* isolate = masm->isolate();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data(api_call_info->data(), isolate);
+ // Push data from ExecutableAccessorInfo.
+ bool call_data_undefined = false;
+ if (isolate->heap()->InNewSpace(*call_data)) {
+ __ Move(scratch2, api_call_info);
+ __ movq(scratch3, FieldOperand(scratch2, CallHandlerInfo::kDataOffset));
+ } else if (call_data->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(scratch3, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ Move(scratch3, call_data);
+ }
+ // call data
+ __ push(scratch3);
+ if (!call_data_undefined) {
+ __ LoadRoot(scratch3, Heap::kUndefinedValueRootIndex);
+ }
+ // return value
+ __ push(scratch3);
+ // return value default
+ __ push(scratch3);
+ // isolate
+ __ Move(scratch3,
+ ExternalReference::isolate_address(masm->isolate()));
+ __ push(scratch3);
+ // holder
+ __ push(receiver);
+
+ ASSERT(!scratch1.is(rax));
+ // store receiver address for GenerateFastApiCallBody
+ __ movq(rax, rsp);
+
+ // return address
+ __ push(scratch1);
+
+ GenerateFastApiCallBody(masm, optimization, argc, true);
+}
+
+
+static void GenerateFastApiCallBody(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc,
+ bool restore_context) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] - rsp[56] : FunctionCallbackInfo, incl.
+ // : object passing the type check
+ // (set by CheckPrototypes)
+ // -- rsp[64] : last argument
+ // -- ...
+ // -- rsp[(argc + 7) * 8] : first argument
+ // -- rsp[(argc + 8) * 8] : receiver
+ //
+ // rax : receiver address
+ // -----------------------------------
+ typedef FunctionCallbackArguments FCA;
+
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ // Function address is a foreign pointer outside V8's heap.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ const int kApiStackSpace = 4;
+
+ __ PrepareCallApiFunction(kApiStackSpace);
+
+ __ movq(StackSpaceOperand(0), rax); // FunctionCallbackInfo::implicit_args_.
+ __ addq(rax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
+ __ movq(StackSpaceOperand(1), rax); // FunctionCallbackInfo::values_.
+ __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
+ // FunctionCallbackInfo::is_construct_call_.
+ __ Set(StackSpaceOperand(3), 0);
+
+#if defined(__MINGW64__) || defined(_WIN64)
+ Register arguments_arg = rcx;
+ Register callback_arg = rdx;
+#else
+ Register arguments_arg = rdi;
+ Register callback_arg = rsi;
+#endif
+
+ // v8::InvocationCallback's argument.
+ __ lea(arguments_arg, StackSpaceOperand(0));
+
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+
+ StackArgumentsAccessor args_from_rbp(rbp, kFastApiCallArguments,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
+ kFastApiCallArguments - 1 - FCA::kContextSaveIndex);
+ Operand return_value_operand = args_from_rbp.GetArgumentOperand(
+ kFastApiCallArguments - 1 - FCA::kReturnValueOffset);
+ __ CallApiFunctionAndReturn(
+ function_address,
+ thunk_address,
+ callback_arg,
+ argc + kFastApiCallArguments + 1,
+ return_value_operand,
+ restore_context ? &context_restore_operand : NULL);
}
class CallInterceptorCompiler BASE_EMBEDDED {
public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
+ CallInterceptorCompiler(CallStubCompiler* stub_compiler,
const ParameterCount& arguments,
Register name,
- Code::ExtraICState extra_ic_state)
+ ExtraICState extra_ic_state)
: stub_compiler_(stub_compiler),
arguments_(arguments),
name_(name),
@@ -641,9 +715,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(object, masm->isolate()), receiver,
+ interceptor_holder, scratch1, scratch2, scratch3,
+ name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -657,10 +732,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(interceptor_holder, masm->isolate()), holder,
+ handle(lookup->holder()), scratch1, scratch2, scratch3,
+ name, depth2, miss);
} else {
// CheckPrototypes has a side effect of fetching a 'holder'
// for API (object which is instanceof for the signature). It's
@@ -673,13 +748,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
if (can_do_fast_api_call) {
GenerateFastApiCall(masm, optimization, arguments_.immediate());
} else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
Handle<JSFunction> fun = optimization.constant_function();
- ParameterCount expected(fun);
- __ InvokeFunction(fun, expected, arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ stub_compiler_->GenerateJumpFunction(object, fun);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -706,20 +776,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Handle<JSObject> interceptor_holder,
Label* miss_label) {
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
+ stub_compiler_->CheckPrototypes(
+ IC::CurrentTypeOf(object, masm->isolate()), receiver,
+ interceptor_holder, scratch1, scratch2, scratch3, name, miss_label);
FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- StubCache::kInterceptorArgsLength);
+ CompileCallLoadPropertyWithInterceptor(
+ masm, receiver, holder, name_, interceptor_holder,
+ IC::kLoadPropertyWithInterceptorForCall);
// Restore the name_ register.
__ pop(name_);
@@ -734,17 +801,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label* interceptor_succeeded) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
+ __ push(receiver);
+ __ push(holder);
+ __ push(name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
+ CompileCallLoadPropertyWithInterceptor(
+ masm, receiver, holder, name_, holder_obj,
+ IC::kLoadPropertyWithInterceptorOnly);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
+ __ pop(name_);
+ __ pop(holder);
+ __ pop(receiver);
// Leave the internal frame.
}
@@ -752,16 +819,16 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ j(not_equal, interceptor_succeeded);
}
- StubCompiler* stub_compiler_;
+ CallStubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
- Code::ExtraICState extra_ic_state_;
+ ExtraICState extra_ic_state_;
};
-void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ Move(this->name(), name);
@@ -784,7 +851,7 @@ void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+void StoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -802,19 +869,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
// store is successful.
-void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register unused,
- Label* miss_label,
- Label* slow) {
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
@@ -823,7 +890,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (details.type() == CONSTANT) {
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
- __ CmpObject(value_reg, constant);
+ __ Cmp(value_reg, constant);
__ j(not_equal, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
@@ -835,7 +902,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiToInteger32(scratch1, value_reg);
- __ cvtlsi2sd(xmm0, scratch1);
+ __ Cvtlsi2sd(xmm0, scratch1);
__ jmp(&do_store);
__ bind(&heap_number);
@@ -947,15 +1014,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
-void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -989,7 +1056,7 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Label do_store, heap_number;
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiToInteger32(scratch2, value_reg);
- __ cvtlsi2sd(xmm0, scratch2);
+ __ Cvtlsi2sd(xmm0, scratch2);
__ jmp(&do_store);
__ bind(&heap_number);
@@ -1043,26 +1110,6 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<JSGlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ jmp(code, RelocInfo::CODE_TARGET);
}
@@ -1072,7 +1119,7 @@ void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
#define __ ACCESS_MASM((masm()))
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Register holder_reg,
@@ -1082,11 +1129,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ Move(scratch1, Handle<Map>(object->map()));
+ __ Move(scratch1, receiver_map);
- Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1098,29 +1145,40 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register reg = object_reg;
int depth = 0;
+ StackArgumentsAccessor args(rsp, kFastApiCallArguments,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ const int kHolderIndex = kFastApiCallArguments - 1 -
+ FunctionCallbackArguments::kHolderIndex;
+
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPCOnStackSize), object_reg);
+ __ movq(args.GetArgumentOperand(kHolderIndex), object_reg);
}
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant());
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap() &&
+ !current_map->IsJSGlobalProxyMap()) {
if (!name->IsUniqueName()) {
ASSERT(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
+ ASSERT(current.is_null() ||
+ current->property_dictionary()->FindEntry(*name) ==
NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@@ -1131,20 +1189,23 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
__ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
} else {
bool in_new_space = heap()->InNewSpace(*prototype);
- Handle<Map> current_map(current->map());
if (in_new_space) {
// Save the map in scratch1 for later.
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
__ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
- if (current->IsJSGlobalProxy()) {
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(
+ masm(), Handle<JSGlobalObject>::cast(current), name,
+ scratch2, miss);
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -1159,70 +1220,65 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPCOnStackSize), reg);
+ __ movq(args.GetArgumentOperand(kHolderIndex), reg);
}
// Go to the next object in the prototype chain.
current = prototype;
+ current_map = handle(current->map());
}
- ASSERT(current.is_identical_to(holder));
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
// Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()), miss, DONT_DO_SMI_CHECK);
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
- if (current->IsJSGlobalProxy()) {
+ ASSERT(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
// Return the register containing the holder.
return reg;
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ jmp(success);
+ Label success;
+ __ jmp(&success);
__ bind(miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
- __ jmp(success);
+ Label success;
+ __ jmp(&success);
GenerateRestoreName(masm(), miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
}
}
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
+Register LoadStubCompiler::CallbackHandlerFrontend(
+ Handle<Type> type,
Register object_reg,
Handle<JSObject> holder,
Handle<Name> name,
- Label* success,
Handle<Object> callback) {
Label miss;
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+ Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
ASSERT(!reg.is(scratch2()));
@@ -1259,15 +1315,15 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
__ j(not_equal, &miss);
}
- HandlerFrontendFooter(name, success, &miss);
+ HandlerFrontendFooter(name, &miss);
return reg;
}
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
if (!reg.is(receiver())) __ movq(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
@@ -1283,26 +1339,28 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+ masm(), call_optimization, receiver(),
+ scratch1(), scratch2(), name(), 0, NULL);
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch4().is(reg));
__ PopReturnAddressTo(scratch4());
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
__ push(receiver()); // receiver
if (heap()->InNewSpace(callback->data())) {
ASSERT(!scratch2().is(reg));
@@ -1320,7 +1378,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ push(reg); // holder
__ push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
- // passed as the const ExecutableAccessorInfo& to the C++ callback.
+ // passed as the const PropertyAccessorInfo& to the C++ callback.
Address getter_address = v8::ToCData<Address>(callback->getter());
@@ -1345,10 +1403,9 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
const int kArgStackSpace = 1;
__ PrepareCallApiFunction(kArgStackSpace);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- __ lea(rax, Operand(name_arg, 6 * kPointerSize));
+ __ lea(rax, Operand(name_arg, 1 * kPointerSize));
- // v8::AccessorInfo::args_.
+ // v8::PropertyAccessorInfo::args_.
__ movq(StackSpaceOperand(0), rax);
// The context register (rsi) has been saved in PrepareCallApiFunction and
@@ -1357,24 +1414,30 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ // The name handler is counted as an argument.
+ StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
+ Operand return_value_operand = args.GetArgumentOperand(
+ PropertyCallbackArguments::kArgsLength - 1 -
+ PropertyCallbackArguments::kReturnValueOffset);
__ CallApiFunctionAndReturn(getter_address,
thunk_address,
getter_arg,
kStackSpace,
- 6);
+ return_value_operand,
+ NULL);
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadObject(rax, value);
+ __ Move(rax, value);
__ ret(0);
}
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
+void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
- Handle<JSObject> object,
+ Handle<Object> object,
Handle<JSObject> interceptor_holder,
LookupResult* lookup,
Handle<Name> name) {
@@ -1425,11 +1488,9 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder,
+ IC::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -1473,19 +1534,12 @@ void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
}
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- StackArgumentsAccessor args(rsp, arguments());
- __ movq(rdx, args.GetReceiverOperand());
-
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(rdx, miss);
- CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, miss);
+void CallStubCompiler::GenerateFunctionCheck(Register function,
+ Register scratch,
+ Label* miss) {
+ __ JumpIfSmi(function, miss);
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
+ __ j(not_equal, miss);
}
@@ -1504,9 +1558,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
- __ JumpIfSmi(rdi, miss);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
- __ j(not_equal, miss);
+ GenerateFunctionCheck(rdi, rax, miss);
// Check the shared function info. Make sure it hasn't changed.
__ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
@@ -1522,7 +1574,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> code =
isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
kind_,
- extra_state_);
+ extra_state());
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1531,57 +1583,19 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
PropertyIndex index,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
Label miss;
- GenerateNameCheck(name, &miss);
-
- StackArgumentsAccessor args(rsp, arguments());
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, rdi,
- name, &miss);
+ Register reg = HandlerFrontendHeader(
+ object, holder, name, RECEIVER_MAP_CHECK, &miss);
GenerateFastPropertyLoad(masm(), rdi, reg, index.is_inobject(holder),
index.translate(holder), Representation::Tagged());
+ GenerateJumpFunction(object, rdi, &miss);
- // Check that the function really is a function.
- __ JumpIfSmi(rdi, &miss);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
- return GetCode(Code::FIELD, name);
+ return GetCode(Code::FAST, name);
}
@@ -1594,28 +1608,16 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
Code::StubType type) {
Label miss;
- // Check that function is still array
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
- site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ site->SetElementsKind(GetInitialFastElementsKind());
Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
+ const int argc = arguments().immediate();
__ movq(rax, Immediate(argc));
__ Move(rbx, site_feedback_cell);
__ Move(rdi, function);
@@ -1623,8 +1625,7 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
ArrayConstructorStub stub(isolate());
__ TailCallStub(&stub);
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -1638,30 +1639,21 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
+ // If object is not an array or is observed or sealed, bail out to regular
+ // call.
+ if (!object->IsJSArray() ||
+ !cell.is_null() ||
+ Handle<JSArray>::cast(object)->map()->is_observed() ||
+ !Handle<JSArray>::cast(object)->map()->is_extensible()) {
+ return Handle<Code>::null();
+ }
Label miss;
- GenerateNameCheck(name, &miss);
+
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
const int argc = arguments().immediate();
StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
-
if (argc == 0) {
// Noop, return the length.
__ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1874,8 +1866,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
1);
}
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -1889,29 +1880,18 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
+ // If object is not an array or is observed or sealed, bail out to regular
+ // call.
+ if (!object->IsJSArray() ||
+ !cell.is_null() ||
+ Handle<JSArray>::cast(object)->map()->is_observed() ||
+ !Handle<JSArray>::cast(object)->map()->is_extensible()) {
+ return Handle<Code>::null();
+ }
Label miss, return_undefined, call_builtin;
- GenerateNameCheck(name, &miss);
-
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
- __ movq(rdx, args.GetReceiverOperand());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1944,6 +1924,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
rcx, times_pointer_size,
FixedArray::kHeaderSize),
r9);
+ const int argc = arguments().immediate();
__ ret((argc + 1) * kPointerSize);
__ bind(&return_undefined);
@@ -1956,8 +1937,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
argc + 1,
1);
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -1971,44 +1951,27 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
// If object is not a string, bail out to regular call.
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
- const int argc = arguments().immediate();
- StackArgumentsAccessor args(rsp, argc);
-
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
+ (CallICBase::StringStubState::decode(extra_state()) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
+
+ HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
Register receiver = rbx;
Register index = rdi;
Register result = rax;
+ const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
+
__ movq(receiver, args.GetReceiverOperand());
if (argc > 0) {
__ movq(index, args.GetArgumentOperand(1));
@@ -2038,8 +2001,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
__ bind(&miss);
// Restore function name in rcx.
__ Move(rcx, name);
- __ bind(&name_miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&name_miss);
// Return the generated code.
return GetCode(type, name);
@@ -2053,14 +2015,6 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
// If object is not a string, bail out to regular call.
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
@@ -2072,21 +2026,12 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
+ (CallICBase::StringStubState::decode(extra_state()) ==
DEFAULT_STRING_STUB)) {
index_out_of_range_label = &miss;
}
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
+
+ HandlerFrontendHeader(object, holder, name, STRING_CHECK, &name_miss);
Register receiver = rax;
Register index = rdi;
@@ -2121,8 +2066,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
__ bind(&miss);
// Restore function name in rcx.
__ Move(rcx, name);
- __ bind(&name_miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&name_miss);
// Return the generated code.
return GetCode(type, name);
@@ -2136,14 +2080,6 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
const int argc = arguments().immediate();
@@ -2151,23 +2087,16 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ movq(rdx, args.GetArgumentOperand(argc - 1));
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the char code argument.
Register code = rbx;
- __ movq(code, args.GetArgumentOperand(argc));
+ __ movq(code, args.GetArgumentOperand(1));
// Check the code is a smi.
Label slow;
@@ -2183,19 +2112,12 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm(), call_helper);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // rcx: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2209,14 +2131,8 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 4] : receiver
- // -----------------------------------
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@@ -2224,26 +2140,16 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
return Handle<Code>::null();
}
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ Label miss, slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into rax.
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rax, args.GetArgumentOperand(1));
// Check if the argument is a smi.
Label smi;
@@ -2251,7 +2157,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ JumpIfSmi(rax, &smi);
// Check if the argument is a heap number and load its value into xmm0.
- Label slow;
__ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
__ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
@@ -2277,7 +2182,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
Label already_round;
__ bind(&conversion_failure);
int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
- __ movq(rbx, kTwoMantissaBits, RelocInfo::NONE64);
+ __ movq(rbx, kTwoMantissaBits);
__ movq(xmm1, rbx);
__ ucomisd(xmm0, xmm1);
__ j(above_equal, &already_round);
@@ -2298,7 +2203,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Subtract 1 if the argument was less than the tentative result.
int64_t kOne = V8_INT64_C(0x3ff0000000000000);
- __ movq(rbx, kOne, RelocInfo::NONE64);
+ __ movq(rbx, kOne);
__ movq(xmm1, rbx);
__ andpd(xmm1, xmm2);
__ subsd(xmm0, xmm1);
@@ -2310,19 +2215,15 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Return the argument (when it's an already round heap number).
__ bind(&already_round);
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rax, args.GetArgumentOperand(1));
__ ret(2 * kPointerSize);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // rcx: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2336,14 +2237,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
const int argc = arguments().immediate();
@@ -2351,21 +2244,15 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ movq(rdx, args.GetArgumentOperand(argc - 1));
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
- } else {
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ if (!cell.is_null()) {
ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
+
// Load the (only) argument into rax.
- __ movq(rax, args.GetArgumentOperand(argc));
+ __ movq(rax, args.GetArgumentOperand(1));
// Check if the argument is a smi.
Label not_smi;
@@ -2395,15 +2282,14 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// Check if the argument is a heap number and load its value.
__ bind(&not_smi);
__ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
// Check the sign of the argument. If the argument is positive,
// just return it.
Label negative_sign;
const int sign_mask_shift =
(HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
- __ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
- RelocInfo::NONE64);
+ __ Set(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift);
__ testq(rbx, rdi);
__ j(not_zero, &negative_sign);
__ ret(2 * kPointerSize);
@@ -2413,22 +2299,15 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ bind(&negative_sign);
__ xor_(rbx, rdi);
__ AllocateHeapNumber(rax, rdx, &slow);
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
+ __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
__ ret(2 * kPointerSize);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
__ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ // We do not have to patch the receiver because the function makes no use of
+ // it.
+ GenerateJumpFunctionIgnoreReceiver(function);
- __ bind(&miss);
- // rcx: function name.
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(type, name);
@@ -2471,8 +2350,8 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
__ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
// Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, depth, &miss);
+ CheckPrototypes(IC::CurrentTypeOf(object, isolate()), rdx, holder,
+ rbx, rax, rdi, name, depth, &miss);
// Move the return address on top of the stack.
__ movq(rax,
@@ -2484,37 +2363,48 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
__ bind(&miss);
__ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss_before_stack_reserved);
// Return the generated code.
return GetCode(function);
}
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
+void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
+ Label success;
+ // Check that the object is a boolean.
+ __ CompareRoot(object, Heap::kTrueValueRootIndex);
+ __ j(equal, &success);
+ __ CompareRoot(object, Heap::kFalseValueRootIndex);
+ __ j(not_equal, miss);
+ __ bind(&success);
+}
+
+
+void CallStubCompiler::PatchGlobalProxy(Handle<Object> object) {
+ if (object->IsGlobalObject()) {
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+ __ movq(args.GetReceiverOperand(), rdx);
+ }
+}
+
+
+Register CallStubCompiler::HandlerFrontendHeader(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ CheckType check,
+ Label* miss) {
+ GenerateNameCheck(name, miss);
+
+ Register reg = rdx;
StackArgumentsAccessor args(rsp, arguments());
- __ movq(rdx, args.GetReceiverOperand());
+ __ movq(reg, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ JumpIfSmi(rdx, &miss);
+ __ JumpIfSmi(reg, miss);
}
// Make sure that it's okay not to patch the on stack receiver
@@ -2527,134 +2417,81 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
__ IncrementCounter(counters->call_const(), 1);
// Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax,
- rdi, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- }
+ reg = CheckPrototypes(IC::CurrentTypeOf(object, isolate()), reg, holder,
+ rbx, rax, rdi, name, miss);
break;
- case STRING_CHECK:
+ case STRING_CHECK: {
// Check that the object is a string.
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
- __ j(above_equal, &miss);
+ __ CmpObjectType(reg, FIRST_NONSTRING_TYPE, rax);
+ __ j(above_equal, miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
+ masm(), Context::STRING_FUNCTION_INDEX, rax, miss);
break;
-
- case SYMBOL_CHECK:
+ }
+ case SYMBOL_CHECK: {
// Check that the object is a symbol.
- __ CmpObjectType(rdx, SYMBOL_TYPE, rax);
- __ j(not_equal, &miss);
+ __ CmpObjectType(reg, SYMBOL_TYPE, rax);
+ __ j(not_equal, miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::SYMBOL_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
+ masm(), Context::SYMBOL_FUNCTION_INDEX, rax, miss);
break;
-
+ }
case NUMBER_CHECK: {
Label fast;
// Check that the object is a smi or a heap number.
- __ JumpIfSmi(rdx, &fast);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
- __ j(not_equal, &miss);
+ __ JumpIfSmi(reg, &fast);
+ __ CmpObjectType(reg, HEAP_NUMBER_TYPE, rax);
+ __ j(not_equal, miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
+ masm(), Context::NUMBER_FUNCTION_INDEX, rax, miss);
break;
}
case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &fast);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&fast);
+ GenerateBooleanCheck(reg, miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, miss);
break;
}
}
- __ jmp(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
+ if (check != RECEIVER_MAP_CHECK) {
+ Handle<Object> prototype(object->GetPrototype(isolate()), isolate());
+ reg = CheckPrototypes(
+ IC::CurrentTypeOf(prototype, isolate()),
+ rax, holder, rbx, rdx, rdi, name, miss);
+ }
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount expected(function);
- __ InvokeFunction(function, expected, arguments(),
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
+ return reg;
}
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- CheckType check,
- Handle<JSFunction> function) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<PropertyCell>::null(),
- function, Handle<String>::cast(name),
- Code::CONSTANT);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label success;
+void CallStubCompiler::GenerateJumpFunction(Handle<Object> object,
+ Register function,
+ Label* miss) {
+ // Check that the function really is a function.
+ GenerateFunctionCheck(function, rbx, miss);
- CompileHandlerFrontend(object, holder, name, check, &success);
- __ bind(&success);
- CompileHandlerBackend(function);
+ if (!function.is(rdi)) __ movq(rdi, function);
+ PatchGlobalProxy(object);
- // Return the generated code.
- return GetCode(function);
+ // Invoke the function.
+ __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind());
}
Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
Label miss;
GenerateNameCheck(name, &miss);
-
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
@@ -2662,39 +2499,19 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
StackArgumentsAccessor args(rsp, arguments());
__ movq(rdx, args.GetReceiverOperand());
- CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
+ CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state());
compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
&miss);
// Restore receiver.
__ movq(rdx, args.GetReceiverOperand());
- // Check that the function really is a function.
- __ JumpIfSmi(rax, &miss);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- }
-
- // Invoke the function.
- __ movq(rdi, rax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
+ GenerateJumpFunction(object, rax, &miss);
- // Handle load cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+ HandlerFrontendFooter(&miss);
// Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
+ return GetCode(Code::FAST, name);
}
@@ -2704,16 +2521,6 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<PropertyCell> cell,
Handle<JSFunction> function,
Handle<Name> name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
if (HasCustomCallGenerator(function)) {
Handle<Code> code = CompileCustomCall(
object, holder, cell, function, Handle<String>::cast(name),
@@ -2723,39 +2530,14 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
}
Label miss;
- GenerateNameCheck(name, &miss);
-
- StackArgumentsAccessor args(rsp, arguments());
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
+ HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
+ // Potentially loads a closure that matches the shared function info of the
+ // function, rather than function.
GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(args.GetReceiverOperand(), rdx);
- }
-
- // Set up the context (function already in rdi).
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1);
- GenerateMissBranch();
+ GenerateJumpFunction(object, rdi, function);
+ HandlerFrontendFooter(&miss);
// Return the generated code.
return GetCode(Code::NORMAL, name);
@@ -2767,9 +2549,8 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
__ PopReturnAddressTo(scratch1());
__ push(receiver());
@@ -2784,7 +2565,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ TailCallExternalReference(store_callback_property, 4, 1);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2793,16 +2574,16 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization) {
- Label success;
- HandlerFrontend(object, receiver(), holder, name, &success);
- __ bind(&success);
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
+ receiver(), holder, name);
Register values[] = { value() };
GenerateFastApiCall(
- masm(), call_optimization, receiver(), scratch3(), 1, values);
+ masm(), call_optimization, receiver(), scratch1(),
+ scratch2(), this->name(), 1, values);
// Return the generated code.
- return GetCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2860,16 +2641,15 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ push(receiver());
__ push(this->name());
__ push(value());
- __ Push(Smi::FromInt(strict_mode()));
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
+ __ TailCallExternalReference(store_ic_property, 3, 1);
// Return the generated code.
- return GetCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2908,23 +2688,18 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Handle<JSGlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<Type> type,
+ Handle<JSObject> last,
+ Handle<Name> name) {
+ NonexistentHandlerFrontend(type, last, name);
- __ bind(&success);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ ret(0);
// Return the generated code.
- return GetCode(kind(), Code::NONEXISTENT, name);
+ return GetCode(kind(), Code::FAST, name);
}
@@ -2977,6 +2752,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -2988,7 +2764,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(rax);
+ __ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,
@@ -3011,19 +2787,16 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
+ Handle<Type> type,
Handle<GlobalObject> global,
Handle<PropertyCell> cell,
Handle<Name> name,
bool is_dont_delete) {
- Label success, miss;
+ Label miss;
// TODO(verwaest): Directly store to rax. Currently we cannot do this, since
// rax is used as receiver(), which we would otherwise clobber before a
// potential miss.
-
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
+ HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
__ Move(rbx, cell);
@@ -3038,8 +2811,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
}
- HandlerFrontendFooter(name, &success, &miss);
- __ bind(&success);
+ HandlerFrontendFooter(name, &miss);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
@@ -3047,12 +2819,12 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ ret(0);
// Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), Code::NORMAL, name);
}
Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
+ TypeHandleList* types,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -3063,17 +2835,25 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
GenerateNameCheck(name, this->name(), &miss);
}
- __ JumpIfSmi(receiver(), &miss);
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
Register map_reg = scratch1();
__ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
+ int receiver_count = types->length();
int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = receiver_maps->at(current);
+ Handle<Type> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
// Check map and tail call if there's a match
- __ Cmp(map_reg, receiver_maps->at(current));
+ __ Cmp(map_reg, map);
+ if (type->Is(Type::Number())) {
+ ASSERT(!number_case.is_unused());
+ __ bind(&number_case);
+ }
__ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
}
}
@@ -3100,12 +2880,12 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, miss_force_generic;
+ Label slow, miss;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
- __ JumpIfNotSmi(rax, &miss_force_generic);
+ __ JumpIfNotSmi(rax, &miss);
__ SmiToInteger32(rbx, rax);
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -3125,13 +2905,13 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// -----------------------------------
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
- __ bind(&miss_force_generic);
+ __ bind(&miss);
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
}
diff --git a/chromium/v8/src/zone.cc b/chromium/v8/src/zone.cc
index 9ee00edcba1..417f895e5ae 100644
--- a/chromium/v8/src/zone.cc
+++ b/chromium/v8/src/zone.cc
@@ -185,25 +185,31 @@ Address Zone::NewExpand(int size) {
// except that we employ a maximum segment size when we delete. This
// is to avoid excessive malloc() and free() overhead.
Segment* head = segment_head_;
- int old_size = (head == NULL) ? 0 : head->size();
- static const int kSegmentOverhead = sizeof(Segment) + kAlignment;
- int new_size_no_overhead = size + (old_size << 1);
- int new_size = kSegmentOverhead + new_size_no_overhead;
+ const size_t old_size = (head == NULL) ? 0 : head->size();
+ static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment;
+ const size_t new_size_no_overhead = size + (old_size << 1);
+ size_t new_size = kSegmentOverhead + new_size_no_overhead;
+ const size_t min_new_size = kSegmentOverhead + static_cast<size_t>(size);
// Guard against integer overflow.
- if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
+ if (new_size_no_overhead < static_cast<size_t>(size) ||
+ new_size < static_cast<size_t>(kSegmentOverhead)) {
V8::FatalProcessOutOfMemory("Zone");
return NULL;
}
- if (new_size < kMinimumSegmentSize) {
+ if (new_size < static_cast<size_t>(kMinimumSegmentSize)) {
new_size = kMinimumSegmentSize;
- } else if (new_size > kMaximumSegmentSize) {
+ } else if (new_size > static_cast<size_t>(kMaximumSegmentSize)) {
// Limit the size of new segments to avoid growing the segment size
// exponentially, thus putting pressure on contiguous virtual address space.
// All the while making sure to allocate a segment large enough to hold the
// requested size.
- new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
+ new_size = Max(min_new_size, static_cast<size_t>(kMaximumSegmentSize));
}
- Segment* segment = NewSegment(new_size);
+ if (new_size > INT_MAX) {
+ V8::FatalProcessOutOfMemory("Zone");
+ return NULL;
+ }
+ Segment* segment = NewSegment(static_cast<int>(new_size));
if (segment == NULL) {
V8::FatalProcessOutOfMemory("Zone");
return NULL;
@@ -213,7 +219,10 @@ Address Zone::NewExpand(int size) {
Address result = RoundUp(segment->start(), kAlignment);
position_ = result + size;
// Check for address overflow.
- if (position_ < result) {
+ // (Should not happen since the segment is guaranteed to accomodate
+ // size bytes + header and alignment padding)
+ if (reinterpret_cast<uintptr_t>(position_)
+ < reinterpret_cast<uintptr_t>(result)) {
V8::FatalProcessOutOfMemory("Zone");
return NULL;
}
diff --git a/chromium/v8/test/cctest/cctest.gyp b/chromium/v8/test/cctest/cctest.gyp
index ee7ffad6d35..2017d61a2f4 100644
--- a/chromium/v8/test/cctest/cctest.gyp
+++ b/chromium/v8/test/cctest/cctest.gyp
@@ -56,6 +56,7 @@
'test-circular-queue.cc',
'test-compiler.cc',
'test-condition-variable.cc',
+ 'test-constantpool.cc',
'test-conversions.cc',
'test-cpu.cc',
'test-cpu-profiler.cc',
@@ -90,10 +91,10 @@
'test-platform.cc',
'test-platform-tls.cc',
'test-profile-generator.cc',
- 'test-random.cc',
'test-random-number-generator.cc',
'test-regexp.cc',
'test-reloc-info.cc',
+ 'test-representation.cc',
'test-semaphore.cc',
'test-serialize.cc',
'test-socket.cc',
@@ -121,6 +122,7 @@
'test-code-stubs-ia32.cc',
'test-cpu-ia32.cc',
'test-disasm-ia32.cc',
+ 'test-macro-assembler-ia32.cc',
'test-log-stack-tracer.cc'
],
}],
@@ -139,13 +141,15 @@
'test-assembler-arm.cc',
'test-code-stubs.cc',
'test-code-stubs-arm.cc',
- 'test-disasm-arm.cc'
+ 'test-disasm-arm.cc',
+ 'test-macro-assembler-arm.cc'
],
}],
['v8_target_arch=="mipsel"', {
'sources': [
'test-assembler-mips.cc',
'test-disasm-mips.cc',
+ 'test-macro-assembler-mips.cc'
],
}],
[ 'OS=="linux"', {
diff --git a/chromium/v8/tools/android-sync.sh b/chromium/v8/tools/android-sync.sh
index 5d4ef2effdc..460e92d2a3d 100755
--- a/chromium/v8/tools/android-sync.sh
+++ b/chromium/v8/tools/android-sync.sh
@@ -88,7 +88,6 @@ function sync_dir {
echo -n "sync to $ANDROID_V8/$OUTDIR/$ARCH_MODE"
sync_file "$OUTDIR/$ARCH_MODE/cctest"
sync_file "$OUTDIR/$ARCH_MODE/d8"
-sync_file "$OUTDIR/$ARCH_MODE/preparser"
echo ""
echo -n "sync to $ANDROID_V8/tools"
sync_file tools/consarray.js
@@ -100,6 +99,8 @@ sync_file tools/profile_view.js
sync_file tools/logreader.js
sync_file tools/tickprocessor.js
echo ""
+sync_dir tools/profviz
+sync_dir test/intl
sync_dir test/message
sync_dir test/mjsunit
sync_dir test/preparser
diff --git a/chromium/v8/tools/consarray.js b/chromium/v8/tools/consarray.js
index c67abb79711..dbce1de2982 100644
--- a/chromium/v8/tools/consarray.js
+++ b/chromium/v8/tools/consarray.js
@@ -90,4 +90,3 @@ ConsArray.Cell = function(data, next) {
this.data = data;
this.next = next;
};
-
diff --git a/chromium/v8/tools/gen-postmortem-metadata.py b/chromium/v8/tools/gen-postmortem-metadata.py
index 0acb658c53b..28377273bad 100644
--- a/chromium/v8/tools/gen-postmortem-metadata.py
+++ b/chromium/v8/tools/gen-postmortem-metadata.py
@@ -68,6 +68,7 @@ consts_misc = [
{ 'name': 'SeqStringTag', 'value': 'kSeqStringTag' },
{ 'name': 'ConsStringTag', 'value': 'kConsStringTag' },
{ 'name': 'ExternalStringTag', 'value': 'kExternalStringTag' },
+ { 'name': 'SlicedStringTag', 'value': 'kSlicedStringTag' },
{ 'name': 'FailureTag', 'value': 'kFailureTag' },
{ 'name': 'FailureTagMask', 'value': 'kFailureTagMask' },
@@ -88,6 +89,15 @@ consts_misc = [
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
+ { 'name': 'prop_desc_key',
+ 'value': 'DescriptorArray::kDescriptorKey' },
+ { 'name': 'prop_desc_details',
+ 'value': 'DescriptorArray::kDescriptorDetails' },
+ { 'name': 'prop_desc_value',
+ 'value': 'DescriptorArray::kDescriptorValue' },
+ { 'name': 'prop_desc_size',
+ 'value': 'DescriptorArray::kDescriptorSize' },
+
{ 'name': 'off_fp_context',
'value': 'StandardFrameConstants::kContextOffset' },
{ 'name': 'off_fp_marker',
@@ -113,7 +123,9 @@ extras_accessors = [
'ConsString, second, String, kSecondOffset',
'ExternalString, resource, Object, kResourceOffset',
'SeqOneByteString, chars, char, kHeaderSize',
+ 'SeqTwoByteString, chars, char, kHeaderSize',
'SharedFunctionInfo, code, Code, kCodeOffset',
+ 'SlicedString, parent, String, kParentOffset',
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
];
diff --git a/chromium/v8/tools/generate-trig-table.py b/chromium/v8/tools/generate-trig-table.py
new file mode 100644
index 00000000000..c03cf73e2fe
--- /dev/null
+++ b/chromium/v8/tools/generate-trig-table.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is a utility for populating the lookup table for the
+# approximation of trigonometric functions.
+
+import sys, math
+
+SAMPLES = 1800
+
+TEMPLATE = """\
+// Copyright 2013 Google Inc. All Rights Reserved.
+
+// This file was generated from a python script.
+
+#include "v8.h"
+#include "trig-table.h"
+
+namespace v8 {
+namespace internal {
+
+ const double TrigonometricLookupTable::kSinTable[] =
+ { %(sine_table)s };
+ const double TrigonometricLookupTable::kCosXIntervalTable[] =
+ { %(cosine_table)s };
+ const int TrigonometricLookupTable::kSamples = %(samples)i;
+ const int TrigonometricLookupTable::kTableSize = %(table_size)i;
+ const double TrigonometricLookupTable::kSamplesOverPiHalf =
+ %(samples_over_pi_half)s;
+
+} } // v8::internal
+"""
+
+def main():
+ pi_half = math.pi / 2
+ interval = pi_half / SAMPLES
+ sin = []
+ cos_times_interval = []
+ table_size = SAMPLES + 2
+
+ for i in range(0, table_size):
+ sample = i * interval
+ sin.append(repr(math.sin(sample)))
+ cos_times_interval.append(repr(math.cos(sample) * interval))
+
+ output_file = sys.argv[1]
+ output = open(str(output_file), "w")
+ output.write(TEMPLATE % {
+ 'sine_table': ','.join(sin),
+ 'cosine_table': ','.join(cos_times_interval),
+ 'samples': SAMPLES,
+ 'table_size': table_size,
+ 'samples_over_pi_half': repr(SAMPLES / pi_half)
+ })
+
+if __name__ == "__main__":
+ main()
diff --git a/chromium/v8/tools/grokdump.py b/chromium/v8/tools/grokdump.py
index 317a7d6a911..d09c042204c 100755
--- a/chromium/v8/tools/grokdump.py
+++ b/chromium/v8/tools/grokdump.py
@@ -1029,7 +1029,8 @@ class Map(HeapObject):
class String(HeapObject):
def LengthOffset(self):
- return self.heap.PointerSize()
+ # First word after the map is the hash, the second is the length.
+ return self.heap.PointerSize() * 2
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
@@ -1215,18 +1216,18 @@ class DescriptorArray(object):
def Deleted(self, value):
return self.Decode(6, 1, value) == 1
- def Storage(self, value):
- return self.Decode(7, 11, value)
+ def FieldIndex(self, value):
+ return self.Decode(20, 11, value)
def Pointer(self, value):
- return self.Decode(18, 11, value)
+ return self.Decode(6, 11, value)
def Details(self, di, value):
return (
di,
self.Type(value),
self.Attributes(value),
- self.Storage(value),
+ self.FieldIndex(value),
self.Pointer(value)
)
@@ -1242,7 +1243,7 @@ class DescriptorArray(object):
i = 2 + di * 3
p.Print("0x%x" % (array.address + array.MemberOffset(i)))
p.Print("[%i] name: %s" % (di, array.Get(i + 0)))
- p.Print("[%i] details: %s %s enum %i pointer %i" % \
+ p.Print("[%i] details: %s %s field-index %i pointer %i" % \
self.Details(di, array.Get(i + 1)))
p.Print("[%i] value: %s" % (di, array.Get(i + 2)))
diff --git a/chromium/v8/tools/gyp/v8.gyp b/chromium/v8/tools/gyp/v8.gyp
index aa01a842f63..2164b74a85e 100644
--- a/chromium/v8/tools/gyp/v8.gyp
+++ b/chromium/v8/tools/gyp/v8.gyp
@@ -28,6 +28,7 @@
{
'variables': {
'v8_code': 1,
+ 'v8_random_seed%': 314159265,
},
'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
'targets': [
@@ -112,10 +113,15 @@
'dependencies': [
'mksnapshot.<(v8_target_arch)#host',
'js2c#host',
+ 'generate_trig_table#host',
],
}, {
'toolsets': ['target'],
- 'dependencies': ['mksnapshot.<(v8_target_arch)', 'js2c'],
+ 'dependencies': [
+ 'mksnapshot.<(v8_target_arch)',
+ 'js2c',
+ 'generate_trig_table',
+ ],
}],
['component=="shared_library"', {
'defines': [
@@ -139,6 +145,7 @@
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/trig-table.cc',
'<(INTERMEDIATE_DIR)/snapshot.cc',
],
'actions': [
@@ -155,6 +162,11 @@
'--log-snapshot-positions',
'--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
],
+ 'conditions': [
+ ['v8_random_seed!=0', {
+ 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
+ }],
+ ],
},
'action': [
'<@(_inputs)',
@@ -176,15 +188,16 @@
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/trig-table.cc',
'../../src/snapshot-empty.cc',
],
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
- 'dependencies': ['js2c#host'],
+ 'dependencies': ['js2c#host', 'generate_trig_table#host'],
}, {
'toolsets': ['target'],
- 'dependencies': ['js2c'],
+ 'dependencies': ['js2c', 'generate_trig_table'],
}],
['component=="shared_library"', {
'defines': [
@@ -194,6 +207,32 @@
}],
]
},
+ { 'target_name': 'generate_trig_table',
+ 'type': 'none',
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ 'actions': [
+ {
+ 'action_name': 'generate',
+ 'inputs': [
+ '../../tools/generate-trig-table.py',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/trig-table.cc',
+ ],
+ 'action': [
+ 'python',
+ '../../tools/generate-trig-table.py',
+ '<@(_outputs)',
+ ],
+ },
+ ]
+ },
{
'target_name': 'v8_base.<(v8_target_arch)',
'type': 'static_library',
@@ -208,6 +247,10 @@
'../../src/accessors.h',
'../../src/allocation.cc',
'../../src/allocation.h',
+ '../../src/allocation-site-scopes.cc',
+ '../../src/allocation-site-scopes.h',
+ '../../src/allocation-tracker.cc',
+ '../../src/allocation-tracker.h',
'../../src/api.cc',
'../../src/api.h',
'../../src/apiutils.h',
@@ -270,6 +313,8 @@
'../../src/debug-agent.h',
'../../src/debug.cc',
'../../src/debug.h',
+ '../../src/default-platform.cc',
+ '../../src/default-platform.h',
'../../src/deoptimizer.cc',
'../../src/deoptimizer.h',
'../../src/disasm.h',
@@ -289,6 +334,8 @@
'../../src/execution.h',
'../../src/extensions/externalize-string-extension.cc',
'../../src/extensions/externalize-string-extension.h',
+ '../../src/extensions/free-buffer-extension.cc',
+ '../../src/extensions/free-buffer-extension.h',
'../../src/extensions/gc-extension.cc',
'../../src/extensions/gc-extension.h',
'../../src/extensions/statistics-extension.cc',
@@ -333,16 +380,17 @@
'../../src/hydrogen-bch.h',
'../../src/hydrogen-canonicalize.cc',
'../../src/hydrogen-canonicalize.h',
+ '../../src/hydrogen-check-elimination.cc',
+ '../../src/hydrogen-check-elimination.h',
'../../src/hydrogen-dce.cc',
'../../src/hydrogen-dce.h',
'../../src/hydrogen-dehoist.cc',
'../../src/hydrogen-dehoist.h',
- '../../src/hydrogen-deoptimizing-mark.cc',
- '../../src/hydrogen-deoptimizing-mark.h',
'../../src/hydrogen-environment-liveness.cc',
'../../src/hydrogen-environment-liveness.h',
'../../src/hydrogen-escape-analysis.cc',
'../../src/hydrogen-escape-analysis.h',
+ '../../src/hydrogen-flow-engine.h',
'../../src/hydrogen-instructions.cc',
'../../src/hydrogen-instructions.h',
'../../src/hydrogen.cc',
@@ -353,8 +401,12 @@
'../../src/hydrogen-infer-representation.h',
'../../src/hydrogen-infer-types.cc',
'../../src/hydrogen-infer-types.h',
+ '../../src/hydrogen-load-elimination.cc',
+ '../../src/hydrogen-load-elimination.h',
'../../src/hydrogen-mark-deoptimize.cc',
'../../src/hydrogen-mark-deoptimize.h',
+ '../../src/hydrogen-mark-unreachable.cc',
+ '../../src/hydrogen-mark-unreachable.h',
'../../src/hydrogen-minus-zero.cc',
'../../src/hydrogen-minus-zero.h',
'../../src/hydrogen-osr.cc',
@@ -397,6 +449,8 @@
'../../src/lithium-allocator-inl.h',
'../../src/lithium-allocator.cc',
'../../src/lithium-allocator.h',
+ '../../src/lithium-codegen.cc',
+ '../../src/lithium-codegen.h',
'../../src/lithium.cc',
'../../src/lithium.h',
'../../src/liveedit.cc',
@@ -409,8 +463,6 @@
'../../src/macro-assembler.h',
'../../src/mark-compact.cc',
'../../src/mark-compact.h',
- '../../src/marking-thread.h',
- '../../src/marking-thread.cc',
'../../src/messages.cc',
'../../src/messages.h',
'../../src/natives.h',
@@ -430,7 +482,6 @@
'../../src/platform/elapsed-timer.h',
'../../src/platform/time.cc',
'../../src/platform/time.h',
- '../../src/platform-posix.h',
'../../src/platform.h',
'../../src/platform/condition-variable.cc',
'../../src/platform/condition-variable.h',
@@ -804,6 +855,9 @@
]},
],
['OS=="win"', {
+ 'defines': [
+ '_CRT_RAND_S' # for rand_s()
+ ],
'variables': {
'gyp_generators': '<!(echo $GYP_GENERATORS)',
},
@@ -855,8 +909,8 @@
}],
['v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
- '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
]
}, { # v8_enable_i18n_support==0
'sources!': [
@@ -866,7 +920,13 @@
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icudata',
+ '<(icu_gyp_path):icudata',
+ ],
+ }],
+ ['v8_use_default_platform==0', {
+ 'sources!': [
+ '../../src/default-platform.cc',
+ '../../src/default-platform.h',
],
}],
],
@@ -918,10 +978,12 @@
'../../src/proxy.js',
'../../src/collection.js',
'../../src/object-observe.js',
+ '../../src/promise.js',
'../../src/generator.js',
'../../src/array-iterator.js',
'../../src/harmony-string.js',
'../../src/harmony-array.js',
+ '../../src/harmony-math.js'
],
},
'actions': [
diff --git a/chromium/v8/tools/js2c.py b/chromium/v8/tools/js2c.py
index 9492b0030c0..f67d053ad26 100644..100755
--- a/chromium/v8/tools/js2c.py
+++ b/chromium/v8/tools/js2c.py
@@ -116,41 +116,47 @@ def ExpandConstants(lines, constants):
return lines
+def ExpandMacroDefinition(lines, pos, name_pattern, macro, expander):
+ pattern_match = name_pattern.search(lines, pos)
+ while pattern_match is not None:
+ # Scan over the arguments
+ height = 1
+ start = pattern_match.start()
+ end = pattern_match.end()
+ assert lines[end - 1] == '('
+ last_match = end
+ arg_index = [0] # Wrap state into array, to work around Python "scoping"
+ mapping = { }
+ def add_arg(str):
+ # Remember to expand recursively in the arguments
+ replacement = expander(str.strip())
+ mapping[macro.args[arg_index[0]]] = replacement
+ arg_index[0] += 1
+ while end < len(lines) and height > 0:
+ # We don't count commas at higher nesting levels.
+ if lines[end] == ',' and height == 1:
+ add_arg(lines[last_match:end])
+ last_match = end + 1
+ elif lines[end] in ['(', '{', '[']:
+ height = height + 1
+ elif lines[end] in [')', '}', ']']:
+ height = height - 1
+ end = end + 1
+ # Remember to add the last match.
+ add_arg(lines[last_match:end-1])
+ result = macro.expand(mapping)
+ # Replace the occurrence of the macro with the expansion
+ lines = lines[:start] + result + lines[end:]
+ pattern_match = name_pattern.search(lines, start + len(result))
+ return lines
+
def ExpandMacros(lines, macros):
# We allow macros to depend on the previously declared macros, but
# we don't allow self-dependecies or recursion.
for name_pattern, macro in reversed(macros):
- pattern_match = name_pattern.search(lines, 0)
- while pattern_match is not None:
- # Scan over the arguments
- height = 1
- start = pattern_match.start()
- end = pattern_match.end()
- assert lines[end - 1] == '('
- last_match = end
- arg_index = [0] # Wrap state into array, to work around Python "scoping"
- mapping = { }
- def add_arg(str):
- # Remember to expand recursively in the arguments
- replacement = ExpandMacros(str.strip(), macros)
- mapping[macro.args[arg_index[0]]] = replacement
- arg_index[0] += 1
- while end < len(lines) and height > 0:
- # We don't count commas at higher nesting levels.
- if lines[end] == ',' and height == 1:
- add_arg(lines[last_match:end])
- last_match = end + 1
- elif lines[end] in ['(', '{', '[']:
- height = height + 1
- elif lines[end] in [')', '}', ']']:
- height = height - 1
- end = end + 1
- # Remember to add the last match.
- add_arg(lines[last_match:end-1])
- result = macro.expand(mapping)
- # Replace the occurrence of the macro with the expansion
- lines = lines[:start] + result + lines[end:]
- pattern_match = name_pattern.search(lines, start + len(result))
+ def expander(s):
+ return ExpandMacros(s, macros)
+ lines = ExpandMacroDefinition(lines, 0, name_pattern, macro, expander)
return lines
class TextMacro:
@@ -210,6 +216,34 @@ def ReadMacros(lines):
raise ("Illegal line: " + line)
return (constants, macros)
+INLINE_MACRO_PATTERN = re.compile(r'macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*\n')
+INLINE_MACRO_END_PATTERN = re.compile(r'endmacro\s*\n')
+
+def ExpandInlineMacros(lines, filename):
+ pos = 0
+ while True:
+ macro_match = INLINE_MACRO_PATTERN.search(lines, pos)
+ if macro_match is None:
+ # no more macros
+ return lines
+ name = macro_match.group(1)
+ args = [match.strip() for match in macro_match.group(2).split(',')]
+ end_macro_match = INLINE_MACRO_END_PATTERN.search(lines, macro_match.end());
+ if end_macro_match is None:
+ raise ("Macro %s unclosed in %s" % (name, filename))
+ body = lines[macro_match.end():end_macro_match.start()]
+
+ # remove macro definition
+ lines = lines[:macro_match.start()] + lines[end_macro_match.end():]
+ name_pattern = re.compile("\\b%s\\(" % name)
+ macro = TextMacro(args, body)
+
+ # advance position to where the macro defintion was
+ pos = macro_match.start()
+
+ def non_expander(s):
+ return s
+ lines = ExpandMacroDefinition(lines, pos, name_pattern, macro, non_expander)
HEADER_TEMPLATE = """\
// Copyright 2011 Google Inc. All Rights Reserved.
@@ -325,6 +359,8 @@ def JS2C(source, target, env):
lines = ReadFile(filename)
lines = ExpandConstants(lines, consts)
lines = ExpandMacros(lines, macros)
+ lines = RemoveCommentsAndTrailingWhitespace(lines)
+ lines = ExpandInlineMacros(lines, filename)
Validate(lines, filename)
lines = minifier.JSMinify(lines)
id = (os.path.split(filename)[1])[:-3]
diff --git a/chromium/v8/tools/lexer-shell.cc b/chromium/v8/tools/lexer-shell.cc
new file mode 100644
index 00000000000..33f469fb9b4
--- /dev/null
+++ b/chromium/v8/tools/lexer-shell.cc
@@ -0,0 +1,267 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <assert.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string>
+#include <vector>
+#include "v8.h"
+
+#include "api.h"
+#include "ast.h"
+#include "char-predicates-inl.h"
+#include "messages.h"
+#include "platform.h"
+#include "runtime.h"
+#include "scanner-character-streams.h"
+#include "scopeinfo.h"
+#include "string-stream.h"
+#include "scanner.h"
+
+
+using namespace v8::internal;
+
+enum Encoding {
+ LATIN1,
+ UTF8,
+ UTF16
+};
+
+
+const byte* ReadFile(const char* name, Isolate* isolate,
+ int* size, int repeat) {
+ FILE* file = fopen(name, "rb");
+ *size = 0;
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int file_size = ftell(file);
+ rewind(file);
+
+ *size = file_size * repeat;
+
+ byte* chars = new byte[*size + 1];
+ for (int i = 0; i < file_size;) {
+ int read = static_cast<int>(fread(&chars[i], 1, file_size - i, file));
+ i += read;
+ }
+ fclose(file);
+
+ for (int i = file_size; i < *size; i++) {
+ chars[i] = chars[i - file_size];
+ }
+ chars[*size] = 0;
+
+ return chars;
+}
+
+
+class BaselineScanner {
+ public:
+ BaselineScanner(const char* fname,
+ Isolate* isolate,
+ Encoding encoding,
+ ElapsedTimer* timer,
+ int repeat)
+ : stream_(NULL) {
+ int length = 0;
+ source_ = ReadFile(fname, isolate, &length, repeat);
+ unicode_cache_ = new UnicodeCache();
+ scanner_ = new Scanner(unicode_cache_);
+ switch (encoding) {
+ case UTF8:
+ stream_ = new Utf8ToUtf16CharacterStream(source_, length);
+ break;
+ case UTF16: {
+ Handle<String> result = isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(source_),
+ length / 2));
+ stream_ =
+ new GenericStringUtf16CharacterStream(result, 0, result->length());
+ break;
+ }
+ case LATIN1: {
+ Handle<String> result = isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(source_, length));
+ stream_ =
+ new GenericStringUtf16CharacterStream(result, 0, result->length());
+ break;
+ }
+ }
+ timer->Start();
+ scanner_->Initialize(stream_);
+ }
+
+ ~BaselineScanner() {
+ delete scanner_;
+ delete stream_;
+ delete unicode_cache_;
+ delete[] source_;
+ }
+
+ Token::Value Next(int* beg_pos, int* end_pos) {
+ Token::Value res = scanner_->Next();
+ *beg_pos = scanner_->location().beg_pos;
+ *end_pos = scanner_->location().end_pos;
+ return res;
+ }
+
+ private:
+ UnicodeCache* unicode_cache_;
+ Scanner* scanner_;
+ const byte* source_;
+ BufferedUtf16CharacterStream* stream_;
+};
+
+
+struct TokenWithLocation {
+ Token::Value value;
+ size_t beg;
+ size_t end;
+ TokenWithLocation() : value(Token::ILLEGAL), beg(0), end(0) { }
+ TokenWithLocation(Token::Value value, size_t beg, size_t end) :
+ value(value), beg(beg), end(end) { }
+ bool operator==(const TokenWithLocation& other) {
+ return value == other.value && beg == other.beg && end == other.end;
+ }
+ bool operator!=(const TokenWithLocation& other) {
+ return !(*this == other);
+ }
+ void Print(const char* prefix) const {
+ printf("%s %11s at (%d, %d)\n",
+ prefix, Token::Name(value),
+ static_cast<int>(beg), static_cast<int>(end));
+ }
+};
+
+
+TimeDelta RunBaselineScanner(const char* fname,
+ Isolate* isolate,
+ Encoding encoding,
+ bool dump_tokens,
+ std::vector<TokenWithLocation>* tokens,
+ int repeat) {
+ ElapsedTimer timer;
+ BaselineScanner scanner(fname, isolate, encoding, &timer, repeat);
+ Token::Value token;
+ int beg, end;
+ do {
+ token = scanner.Next(&beg, &end);
+ if (dump_tokens) {
+ tokens->push_back(TokenWithLocation(token, beg, end));
+ }
+ } while (token != Token::EOS);
+ return timer.Elapsed();
+}
+
+
+void PrintTokens(const char* name,
+ const std::vector<TokenWithLocation>& tokens) {
+ printf("No of tokens: %d\n",
+ static_cast<int>(tokens.size()));
+ printf("%s:\n", name);
+ for (size_t i = 0; i < tokens.size(); ++i) {
+ tokens[i].Print("=>");
+ }
+}
+
+
+TimeDelta ProcessFile(
+ const char* fname,
+ Encoding encoding,
+ Isolate* isolate,
+ bool print_tokens,
+ int repeat) {
+ if (print_tokens) {
+ printf("Processing file %s\n", fname);
+ }
+ HandleScope handle_scope(isolate);
+ std::vector<TokenWithLocation> baseline_tokens;
+ TimeDelta baseline_time;
+ baseline_time = RunBaselineScanner(
+ fname, isolate, encoding, print_tokens,
+ &baseline_tokens, repeat);
+ if (print_tokens) {
+ PrintTokens("Baseline", baseline_tokens);
+ }
+ return baseline_time;
+}
+
+
+int main(int argc, char* argv[]) {
+ v8::V8::InitializeICU();
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ Encoding encoding = LATIN1;
+ bool print_tokens = false;
+ std::vector<std::string> fnames;
+ std::string benchmark;
+ int repeat = 1;
+ for (int i = 0; i < argc; ++i) {
+ if (strcmp(argv[i], "--latin1") == 0) {
+ encoding = LATIN1;
+ } else if (strcmp(argv[i], "--utf8") == 0) {
+ encoding = UTF8;
+ } else if (strcmp(argv[i], "--utf16") == 0) {
+ encoding = UTF16;
+ } else if (strcmp(argv[i], "--print-tokens") == 0) {
+ print_tokens = true;
+ } else if (strncmp(argv[i], "--benchmark=", 12) == 0) {
+ benchmark = std::string(argv[i]).substr(12);
+ } else if (strncmp(argv[i], "--repeat=", 9) == 0) {
+ std::string repeat_str = std::string(argv[i]).substr(9);
+ repeat = atoi(repeat_str.c_str());
+ } else if (i > 0 && argv[i][0] != '-') {
+ fnames.push_back(std::string(argv[i]));
+ }
+ }
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
+ v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ ASSERT(!context.IsEmpty());
+ {
+ v8::Context::Scope scope(context);
+ Isolate* isolate = Isolate::Current();
+ double baseline_total = 0;
+ for (size_t i = 0; i < fnames.size(); i++) {
+ TimeDelta time;
+ time = ProcessFile(fnames[i].c_str(), encoding, isolate, print_tokens,
+ repeat);
+ baseline_total += time.InMillisecondsF();
+ }
+ if (benchmark.empty()) benchmark = "Baseline";
+ printf("%s(RunTime): %.f ms\n", benchmark.c_str(), baseline_total);
+ }
+ }
+ v8::V8::Dispose();
+ return 0;
+}
diff --git a/chromium/v8/preparser/preparser.gyp b/chromium/v8/tools/lexer-shell.gyp
index 23cbfff6446..8e6ab7a844b 100644
--- a/chromium/v8/preparser/preparser.gyp
+++ b/chromium/v8/tools/lexer-shell.gyp
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -28,20 +28,21 @@
{
'variables': {
'v8_code': 1,
+ 'v8_enable_i18n_support%': 1,
},
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
{
- 'target_name': 'preparser',
+ 'target_name': 'lexer-shell',
'type': 'executable',
+ 'dependencies': [
+ '../tools/gyp/v8.gyp:v8',
+ ],
'conditions': [
- # preparser can't link against a shared library, so link against
- # the underlying static targets.
- ['v8_use_snapshot=="true"', {
- 'dependencies': ['../tools/gyp/v8.gyp:v8_snapshot'],
- }, {
+ ['v8_enable_i18n_support==1', {
'dependencies': [
- '../tools/gyp/v8.gyp:v8_nosnapshot.<(v8_target_arch)',
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
],
}],
],
@@ -49,9 +50,7 @@
'../src',
],
'sources': [
- 'preparser-process.cc',
- '../include/v8-preparser.h',
- '../src/preparser-api.cc',
+ 'lexer-shell.cc',
],
},
],
diff --git a/chromium/v8/tools/merge-to-branch.sh b/chromium/v8/tools/merge-to-branch.sh
index e0011edff02..0314cb4cb84 100755
--- a/chromium/v8/tools/merge-to-branch.sh
+++ b/chromium/v8/tools/merge-to-branch.sh
@@ -69,8 +69,9 @@ restore_patch_commit_hashes_if_unset() {
}
########## Option parsing
+REVERT_FROM_BLEEDING_EDGE=0
-while getopts ":hs:fp:rm:" OPTION ; do
+while getopts ":hs:fp:rm:R" OPTION ; do
case $OPTION in
h) usage
exit 0
@@ -85,6 +86,9 @@ while getopts ":hs:fp:rm:" OPTION ; do
;;
s) START_STEP=$OPTARG
;;
+ R) REVERSE_PATCH="--reverse"
+ REVERT_FROM_BLEEDING_EDGE=1
+ ;;
?) echo "Illegal option: -$OPTARG"
usage
exit 1
@@ -104,7 +108,8 @@ touch "$ALREADY_MERGING_SENTINEL_FILE"
initial_environment_checks
if [ $START_STEP -le $CURRENT_STEP ] ; then
- if [ ${#@} -lt 2 ] ; then
+ let MIN_EXPECTED_ARGS=2-$REVERT_FROM_BLEEDING_EDGE
+ if [ ${#@} -lt $MIN_EXPECTED_ARGS ] ; then
if [ -z "$EXTRA_PATCH" ] ; then
die "Either a patch file or revision numbers must be specified"
fi
@@ -113,9 +118,13 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
fi
fi
echo ">>> Step $CURRENT_STEP: Preparation"
- MERGE_TO_BRANCH=$1
- [[ -n "$MERGE_TO_BRANCH" ]] || die "Please specify a branch to merge to"
- shift
+ if [ $REVERT_FROM_BLEEDING_EDGE==1 ] ; then
+ MERGE_TO_BRANCH="bleeding_edge"
+ else
+ MERGE_TO_BRANCH=$1
+ [[ -n "$MERGE_TO_BRANCH" ]] || die "Please specify a branch to merge to"
+ shift
+ fi
persist "MERGE_TO_BRANCH"
common_prepare
fi
@@ -144,7 +153,11 @@ revisions associated with the patches."
done
if [ -n "$REVISION_LIST" ] ; then
if [ -n "$REVERSE_PATCH" ] ; then
- NEW_COMMIT_MSG="Rollback of$REVISION_LIST in $MERGE_TO_BRANCH branch."
+ if [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then
+ NEW_COMMIT_MSG="Rollback of$REVISION_LIST in $MERGE_TO_BRANCH branch."
+ else
+ NEW_COMMIT_MSG="Revert$REVISION_LIST."
+ fi
else
NEW_COMMIT_MSG="Merged$REVISION_LIST into $MERGE_TO_BRANCH branch."
fi;
@@ -189,14 +202,14 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
fi
let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
+if [ $START_STEP -le $CURRENT_STEP ] && [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then
echo ">>> Step $CURRENT_STEP: Prepare $VERSION_FILE."
# These version numbers are used again for creating the tag
read_and_persist_version
fi
let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
+if [ $START_STEP -le $CURRENT_STEP ] && [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then
echo ">>> Step $CURRENT_STEP: Increment version number."
restore_if_unset "PATCH"
NEWPATCH=$(($PATCH + 1))
@@ -229,11 +242,12 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
git checkout $BRANCHNAME \
|| die "cannot ensure that the current branch is $BRANCHNAME"
wait_for_lgtm
- git cl dcommit || die "failed to commit to $MERGE_TO_BRANCH"
+ PRESUBMIT_TREE_CHECK="skip" git cl dcommit \
+ || die "failed to commit to $MERGE_TO_BRANCH"
fi
let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
+if [ $START_STEP -le $CURRENT_STEP ] && [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then
echo ">>> Step $CURRENT_STEP: Determine svn commit revision"
restore_if_unset "NEW_COMMIT_MSG"
restore_if_unset "MERGE_TO_BRANCH"
@@ -247,7 +261,7 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
fi
let CURRENT_STEP+=1
-if [ $START_STEP -le $CURRENT_STEP ] ; then
+if [ $START_STEP -le $CURRENT_STEP ] && [ $REVERT_FROM_BLEEDING_EDGE -eq 0 ] ; then
echo ">>> Step $CURRENT_STEP: Create the tag."
restore_if_unset "SVN_REVISION"
restore_version_if_unset "NEW"
@@ -272,9 +286,11 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
restore_if_unset "REVISION_LIST"
restore_version_if_unset "NEW"
common_cleanup
- echo "*** SUMMARY ***"
- echo "version: $NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH"
- echo "branch: $TO_URL"
- echo "svn revision: $SVN_REVISION"
- [[ -n "$REVISION_LIST" ]] && echo "patches:$REVISION_LIST"
+ if [ $REVERT_FROM_BLEEDING_EDGE==0 ] ; then
+ echo "*** SUMMARY ***"
+ echo "version: $NEWMAJOR.$NEWMINOR.$NEWBUILD.$NEWPATCH"
+ echo "branch: $TO_URL"
+ echo "svn revision: $SVN_REVISION"
+ [[ -n "$REVISION_LIST" ]] && echo "patches:$REVISION_LIST"
+ fi
fi
diff --git a/chromium/v8/tools/presubmit.py b/chromium/v8/tools/presubmit.py
index 12475b33c48..56eac8ecea4 100755
--- a/chromium/v8/tools/presubmit.py
+++ b/chromium/v8/tools/presubmit.py
@@ -191,7 +191,8 @@ class SourceFileProcessor(object):
return True
def IgnoreDir(self, name):
- return name.startswith('.') or name == 'data' or name == 'sputniktests'
+ return (name.startswith('.') or
+ name in ('data', 'kraken', 'octane', 'sunspider'))
def IgnoreFile(self, name):
return name.startswith('.')
@@ -226,7 +227,7 @@ class CppLintProcessor(SourceFileProcessor):
or (name in CppLintProcessor.IGNORE_LINT))
def GetPathsToSearch(self):
- return ['src', 'preparser', 'include', 'samples', join('test', 'cctest')]
+ return ['src', 'include', 'samples', join('test', 'cctest')]
def GetCpplintScript(self, prio_path):
for path in [prio_path] + os.environ["PATH"].split(os.pathsep):
@@ -282,8 +283,8 @@ class SourceProcessor(SourceFileProcessor):
Check that all files include a copyright notice and no trailing whitespaces.
"""
- RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', 'SConscript',
- 'SConstruct', '.status', '.gyp', '.gypi']
+ RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c',
+ '.status', '.gyp', '.gypi']
# Overwriting the one in the parent class.
def FindFilesIn(self, path):
@@ -292,7 +293,7 @@ class SourceProcessor(SourceFileProcessor):
stdout=PIPE, cwd=path, shell=True)
result = []
for file in output.stdout.read().split():
- for dir_part in os.path.dirname(file).split(os.sep):
+ for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
if self.IgnoreDir(dir_part):
break
else:
@@ -312,12 +313,8 @@ class SourceProcessor(SourceFileProcessor):
return ['.']
def IgnoreDir(self, name):
- return (super(SourceProcessor, self).IgnoreDir(name)
- or (name == 'third_party')
- or (name == 'gyp')
- or (name == 'out')
- or (name == 'obj')
- or (name == 'DerivedSources'))
+ return (super(SourceProcessor, self).IgnoreDir(name) or
+ name in ('third_party', 'gyp', 'out', 'obj', 'DerivedSources'))
IGNORE_COPYRIGHTS = ['cpplint.py',
'daemon.py',
@@ -365,6 +362,9 @@ class SourceProcessor(SourceFileProcessor):
else:
print "%s has trailing whitespaces in line %s." % (name, linenumbers)
result = False
+ if not contents.endswith('\n') or contents.endswith('\n\n'):
+ print "%s does not end with a single new line." % name
+ result = False
# Check two empty lines between declarations.
if name.endswith(".cc"):
line = 0
diff --git a/chromium/v8/tools/profviz/composer.js b/chromium/v8/tools/profviz/composer.js
index 44dd7639de8..0c9437ff541 100644
--- a/chromium/v8/tools/profviz/composer.js
+++ b/chromium/v8/tools/profviz/composer.js
@@ -497,6 +497,8 @@ function PlotScriptComposer(kResX, kResY, error_output) {
}
// Label the longest pauses.
+ execution_pauses =
+ RestrictRangesTo(execution_pauses, range_start, range_end);
execution_pauses.sort(
function(a, b) { return b.duration() - a.duration(); });
diff --git a/chromium/v8/tools/push-to-trunk.sh b/chromium/v8/tools/push-to-trunk.sh
index 8512d128778..c91cd19f9a7 100755
--- a/chromium/v8/tools/push-to-trunk.sh
+++ b/chromium/v8/tools/push-to-trunk.sh
@@ -211,7 +211,8 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
};
print $0;
}' > "$CHANGELOG_ENTRY_FILE"
- git cl dcommit || die "'git cl dcommit' failed, please try again."
+ PRESUBMIT_TREE_CHECK="skip" git cl dcommit \
+ || die "'git cl dcommit' failed, please try again."
fi
let CURRENT_STEP+=1
diff --git a/chromium/v8/tools/push-to-trunk/auto_roll.py b/chromium/v8/tools/push-to-trunk/auto_roll.py
new file mode 100755
index 00000000000..895ae54f75a
--- /dev/null
+++ b/chromium/v8/tools/push-to-trunk/auto_roll.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import optparse
+import re
+import sys
+
+from common_includes import *
+
+CONFIG = {
+ PERSISTFILE_BASENAME: "/tmp/v8-auto-roll-tempfile",
+ DOT_GIT_LOCATION: ".git",
+}
+
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
+
+ def RunStep(self):
+ self.InitialEnvironmentChecks()
+ self.CommonPrepare()
+
+
+class FetchLatestRevision(Step):
+ MESSAGE = "Fetching latest V8 revision."
+
+ def RunStep(self):
+ log = self.Git("svn log -1 --oneline").strip()
+ match = re.match(r"^r(\d+) ", log)
+ if not match:
+ self.Die("Could not extract current svn revision from log.")
+ self.Persist("latest", match.group(1))
+
+
+class FetchLKGR(Step):
+ MESSAGE = "Fetching V8 LKGR."
+
+ def RunStep(self):
+ lkgr_url = "https://v8-status.appspot.com/lkgr"
+ # Retry several times since app engine might have issues.
+ self.Persist("lkgr", self.ReadURL(lkgr_url, wait_plan=[5, 20, 300, 300]))
+
+
+class PushToTrunk(Step):
+ MESSAGE = "Pushing to trunk if possible."
+
+ def RunStep(self):
+ self.RestoreIfUnset("latest")
+ self.RestoreIfUnset("lkgr")
+ latest = int(self._state["latest"])
+ lkgr = int(self._state["lkgr"])
+ if latest == lkgr:
+ print "ToT (r%d) is clean. Pushing to trunk." % latest
+ # TODO(machenbach): Call push to trunk script.
+ else:
+ print("ToT (r%d) is ahead of the LKGR (r%d). Skipping push to trunk."
+ % (latest, lkgr))
+
+
+def RunAutoRoll(config,
+ options,
+ side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
+ step_classes = [
+ Preparation,
+ FetchLatestRevision,
+ FetchLKGR,
+ PushToTrunk,
+ ]
+ RunScript(step_classes, config, options, side_effect_handler)
+
+
+def BuildOptions():
+ result = optparse.OptionParser()
+ result.add_option("-s", "--step", dest="s",
+ help="Specify the step where to start work. Default: 0.",
+ default=0, type="int")
+ return result
+
+
+def Main():
+ parser = BuildOptions()
+ (options, args) = parser.parse_args()
+ RunAutoRoll(CONFIG, options)
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/chromium/v8/tools/push-to-trunk/common_includes.py b/chromium/v8/tools/push-to-trunk/common_includes.py
new file mode 100644
index 00000000000..196593758e1
--- /dev/null
+++ b/chromium/v8/tools/push-to-trunk/common_includes.py
@@ -0,0 +1,486 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import re
+import subprocess
+import sys
+import textwrap
+import time
+import urllib2
+
+PERSISTFILE_BASENAME = "PERSISTFILE_BASENAME"
+TEMP_BRANCH = "TEMP_BRANCH"
+BRANCHNAME = "BRANCHNAME"
+DOT_GIT_LOCATION = "DOT_GIT_LOCATION"
+VERSION_FILE = "VERSION_FILE"
+CHANGELOG_FILE = "CHANGELOG_FILE"
+CHANGELOG_ENTRY_FILE = "CHANGELOG_ENTRY_FILE"
+COMMITMSG_FILE = "COMMITMSG_FILE"
+PATCH_FILE = "PATCH_FILE"
+
+
+def TextToFile(text, file_name):
+ with open(file_name, "w") as f:
+ f.write(text)
+
+
+def AppendToFile(text, file_name):
+ with open(file_name, "a") as f:
+ f.write(text)
+
+
+def LinesInFile(file_name):
+ with open(file_name) as f:
+ for line in f:
+ yield line
+
+
+def FileToText(file_name):
+ with open(file_name) as f:
+ return f.read()
+
+
+def MSub(rexp, replacement, text):
+ return re.sub(rexp, replacement, text, flags=re.MULTILINE)
+
+
+def Fill80(line):
+ # Replace tabs and remove surrounding space.
+ line = re.sub(r"\t", r" ", line.strip())
+
+ # Format with 8 characters indentation and line width 80.
+ return textwrap.fill(line, width=80, initial_indent=" ",
+ subsequent_indent=" ")
+
+
+def GetLastChangeLogEntries(change_log_file):
+ result = []
+ for line in LinesInFile(change_log_file):
+ if re.search(r"^\d{4}-\d{2}-\d{2}:", line) and result: break
+ result.append(line)
+ return "".join(result)
+
+
+def MakeComment(text):
+ return MSub(r"^( ?)", "#", text)
+
+
+def StripComments(text):
+ # Use split not splitlines to keep terminal newlines.
+ return "\n".join(filter(lambda x: not x.startswith("#"), text.split("\n")))
+
+
+def MakeChangeLogBody(commit_messages, auto_format=False):
+ result = ""
+ added_titles = set()
+ for (title, body, author) in commit_messages:
+ # TODO(machenbach): Better check for reverts. A revert should remove the
+ # original CL from the actual log entry.
+ title = title.strip()
+ if auto_format:
+ # Only add commits that set the LOG flag correctly.
+ log_exp = r"^[ \t]*LOG[ \t]*=[ \t]*(?:Y(?:ES)?)|TRUE"
+ if not re.search(log_exp, body, flags=re.I | re.M):
+ continue
+ # Never include reverts.
+ if title.startswith("Revert "):
+ continue
+ # Don't include duplicates.
+ if title in added_titles:
+ continue
+
+ # Add and format the commit's title and bug reference. Move dot to the end.
+ added_titles.add(title)
+ raw_title = re.sub(r"(\.|\?|!)$", "", title)
+ bug_reference = MakeChangeLogBugReference(body)
+ space = " " if bug_reference else ""
+ result += "%s\n" % Fill80("%s%s%s." % (raw_title, space, bug_reference))
+
+ # Append the commit's author for reference if not in auto-format mode.
+ if not auto_format:
+ result += "%s\n" % Fill80("(%s)" % author.strip())
+
+ result += "\n"
+ return result
+
+
+def MakeChangeLogBugReference(body):
+ """Grep for "BUG=xxxx" lines in the commit message and convert them to
+ "(issue xxxx)".
+ """
+ crbugs = []
+ v8bugs = []
+
+ def AddIssues(text):
+ ref = re.match(r"^BUG[ \t]*=[ \t]*(.+)$", text.strip())
+ if not ref:
+ return
+ for bug in ref.group(1).split(","):
+ bug = bug.strip()
+ match = re.match(r"^v8:(\d+)$", bug)
+ if match: v8bugs.append(int(match.group(1)))
+ else:
+ match = re.match(r"^(?:chromium:)?(\d+)$", bug)
+ if match: crbugs.append(int(match.group(1)))
+
+ # Add issues to crbugs and v8bugs.
+ map(AddIssues, body.splitlines())
+
+ # Filter duplicates, sort, stringify.
+ crbugs = map(str, sorted(set(crbugs)))
+ v8bugs = map(str, sorted(set(v8bugs)))
+
+ bug_groups = []
+ def FormatIssues(prefix, bugs):
+ if len(bugs) > 0:
+ plural = "s" if len(bugs) > 1 else ""
+ bug_groups.append("%sissue%s %s" % (prefix, plural, ", ".join(bugs)))
+
+ FormatIssues("", v8bugs)
+ FormatIssues("Chromium ", crbugs)
+
+ if len(bug_groups) > 0:
+ return "(%s)" % ", ".join(bug_groups)
+ else:
+ return ""
+
+
+# Some commands don't like the pipe, e.g. calling vi from within the script or
+# from subscripts like git cl upload.
+def Command(cmd, args="", prefix="", pipe=True):
+ # TODO(machenbach): Use timeout.
+ cmd_line = "%s %s %s" % (prefix, cmd, args)
+ print "Command: %s" % cmd_line
+ try:
+ if pipe:
+ return subprocess.check_output(cmd_line, shell=True)
+ else:
+ return subprocess.check_call(cmd_line, shell=True)
+ except subprocess.CalledProcessError:
+ return None
+
+
+# Wrapper for side effects.
+class SideEffectHandler(object):
+ def Command(self, cmd, args="", prefix="", pipe=True):
+ return Command(cmd, args, prefix, pipe)
+
+ def ReadLine(self):
+ return sys.stdin.readline().strip()
+
+ def ReadURL(self, url):
+ # pylint: disable=E1121
+ url_fh = urllib2.urlopen(url, None, 60)
+ try:
+ return url_fh.read()
+ finally:
+ url_fh.close()
+
+ def Sleep(seconds):
+ time.sleep(seconds)
+
+DEFAULT_SIDE_EFFECT_HANDLER = SideEffectHandler()
+
+
+class Step(object):
+ def __init__(self, text, requires, number, config, state, options, handler):
+ self._text = text
+ self._requires = requires
+ self._number = number
+ self._config = config
+ self._state = state
+ self._options = options
+ self._side_effect_handler = handler
+ assert self._number >= 0
+ assert self._config is not None
+ assert self._state is not None
+ assert self._side_effect_handler is not None
+
+ def Config(self, key):
+ return self._config[key]
+
+ def Run(self):
+ if self._requires:
+ self.RestoreIfUnset(self._requires)
+ if not self._state[self._requires]:
+ return
+ print ">>> Step %d: %s" % (self._number, self._text)
+ self.RunStep()
+
+ def RunStep(self):
+ raise NotImplementedError
+
+ def Retry(self, cb, retry_on=None, wait_plan=None):
+ """ Retry a function.
+ Params:
+ cb: The function to retry.
+ retry_on: A callback that takes the result of the function and returns
+ True if the function should be retried. A function throwing an
+ exception is always retried.
+ wait_plan: A list of waiting delays between retries in seconds. The
+ maximum number of retries is len(wait_plan).
+ """
+ retry_on = retry_on or (lambda x: False)
+ wait_plan = list(wait_plan or [])
+ wait_plan.reverse()
+ while True:
+ got_exception = False
+ try:
+ result = cb()
+ except Exception:
+ got_exception = True
+ if got_exception or retry_on(result):
+ if not wait_plan:
+ raise Exception("Retried too often. Giving up.")
+ wait_time = wait_plan.pop()
+ print "Waiting for %f seconds." % wait_time
+ self._side_effect_handler.Sleep(wait_time)
+ print "Retrying..."
+ else:
+ return result
+
+ def ReadLine(self, default=None):
+ # Don't prompt in forced mode.
+ if self._options and self._options.f and default is not None:
+ print "%s (forced)" % default
+ return default
+ else:
+ return self._side_effect_handler.ReadLine()
+
+ def Git(self, args="", prefix="", pipe=True, retry_on=None):
+ cmd = lambda: self._side_effect_handler.Command("git", args, prefix, pipe)
+ return self.Retry(cmd, retry_on, [5, 30])
+
+ def Editor(self, args):
+ return self._side_effect_handler.Command(os.environ["EDITOR"], args,
+ pipe=False)
+
+ def ReadURL(self, url, retry_on=None, wait_plan=None):
+ wait_plan = wait_plan or [3, 60, 600]
+ cmd = lambda: self._side_effect_handler.ReadURL(url)
+ return self.Retry(cmd, retry_on, wait_plan)
+
+ def Die(self, msg=""):
+ if msg != "":
+ print "Error: %s" % msg
+ print "Exiting"
+ raise Exception(msg)
+
+ def DieInForcedMode(self, msg=""):
+ if self._options and self._options.f:
+ msg = msg or "Not implemented in forced mode."
+ self.Die(msg)
+
+ def Confirm(self, msg):
+ print "%s [Y/n] " % msg,
+ answer = self.ReadLine(default="Y")
+ return answer == "" or answer == "Y" or answer == "y"
+
+ def DeleteBranch(self, name):
+ git_result = self.Git("branch").strip()
+ for line in git_result.splitlines():
+ if re.match(r".*\s+%s$" % name, line):
+ msg = "Branch %s exists, do you want to delete it?" % name
+ if self.Confirm(msg):
+ if self.Git("branch -D %s" % name) is None:
+ self.Die("Deleting branch '%s' failed." % name)
+ print "Branch %s deleted." % name
+ else:
+ msg = "Can't continue. Please delete branch %s and try again." % name
+ self.Die(msg)
+
+ def Persist(self, var, value):
+ value = value or "__EMPTY__"
+ TextToFile(value, "%s-%s" % (self._config[PERSISTFILE_BASENAME], var))
+
+ def Restore(self, var):
+ value = FileToText("%s-%s" % (self._config[PERSISTFILE_BASENAME], var))
+ value = value or self.Die("Variable '%s' could not be restored." % var)
+ return "" if value == "__EMPTY__" else value
+
+ def RestoreIfUnset(self, var_name):
+ if self._state.get(var_name) is None:
+ self._state[var_name] = self.Restore(var_name)
+
+ def InitialEnvironmentChecks(self):
+ # Cancel if this is not a git checkout.
+ if not os.path.exists(self._config[DOT_GIT_LOCATION]):
+ self.Die("This is not a git checkout, this script won't work for you.")
+
+ # TODO(machenbach): Don't use EDITOR in forced mode as soon as script is
+ # well tested.
+ # Cancel if EDITOR is unset or not executable.
+ if (not os.environ.get("EDITOR") or
+ Command("which", os.environ["EDITOR"]) is None):
+ self.Die("Please set your EDITOR environment variable, you'll need it.")
+
+ def CommonPrepare(self):
+ # Check for a clean workdir.
+ if self.Git("status -s -uno").strip() != "":
+ self.Die("Workspace is not clean. Please commit or undo your changes.")
+
+ # Persist current branch.
+ current_branch = ""
+ git_result = self.Git("status -s -b -uno").strip()
+ for line in git_result.splitlines():
+ match = re.match(r"^## (.+)", line)
+ if match:
+ current_branch = match.group(1)
+ break
+ self.Persist("current_branch", current_branch)
+
+ # Fetch unfetched revisions.
+ if self.Git("svn fetch") is None:
+ self.Die("'git svn fetch' failed.")
+
+ def PrepareBranch(self):
+ # Get ahold of a safe temporary branch and check it out.
+ self.RestoreIfUnset("current_branch")
+ if self._state["current_branch"] != self._config[TEMP_BRANCH]:
+ self.DeleteBranch(self._config[TEMP_BRANCH])
+ self.Git("checkout -b %s" % self._config[TEMP_BRANCH])
+
+ # Delete the branch that will be created later if it exists already.
+ self.DeleteBranch(self._config[BRANCHNAME])
+
+ def CommonCleanup(self):
+ self.RestoreIfUnset("current_branch")
+ self.Git("checkout -f %s" % self._state["current_branch"])
+ if self._config[TEMP_BRANCH] != self._state["current_branch"]:
+ self.Git("branch -D %s" % self._config[TEMP_BRANCH])
+ if self._config[BRANCHNAME] != self._state["current_branch"]:
+ self.Git("branch -D %s" % self._config[BRANCHNAME])
+
+ # Clean up all temporary files.
+ Command("rm", "-f %s*" % self._config[PERSISTFILE_BASENAME])
+
+ def ReadAndPersistVersion(self, prefix=""):
+ def ReadAndPersist(var_name, def_name):
+ match = re.match(r"^#define %s\s+(\d*)" % def_name, line)
+ if match:
+ value = match.group(1)
+ self.Persist("%s%s" % (prefix, var_name), value)
+ self._state["%s%s" % (prefix, var_name)] = value
+ for line in LinesInFile(self._config[VERSION_FILE]):
+ for (var_name, def_name) in [("major", "MAJOR_VERSION"),
+ ("minor", "MINOR_VERSION"),
+ ("build", "BUILD_NUMBER"),
+ ("patch", "PATCH_LEVEL")]:
+ ReadAndPersist(var_name, def_name)
+
+ def RestoreVersionIfUnset(self, prefix=""):
+ for v in ["major", "minor", "build", "patch"]:
+ self.RestoreIfUnset("%s%s" % (prefix, v))
+
+ def WaitForLGTM(self):
+ print ("Please wait for an LGTM, then type \"LGTM<Return>\" to commit "
+ "your change. (If you need to iterate on the patch or double check "
+ "that it's sane, do so in another shell, but remember to not "
+ "change the headline of the uploaded CL.")
+ answer = ""
+ while answer != "LGTM":
+ print "> ",
+ # TODO(machenbach): Add default="LGTM" to avoid prompt when script is
+ # well tested and when prepare push cl has TBR flag.
+ answer = self.ReadLine()
+ if answer != "LGTM":
+ print "That was not 'LGTM'."
+
+ def WaitForResolvingConflicts(self, patch_file):
+ print("Applying the patch \"%s\" failed. Either type \"ABORT<Return>\", "
+ "or resolve the conflicts, stage *all* touched files with "
+ "'git add', and type \"RESOLVED<Return>\"")
+ self.DieInForcedMode()
+ answer = ""
+ while answer != "RESOLVED":
+ if answer == "ABORT":
+ self.Die("Applying the patch failed.")
+ if answer != "":
+ print "That was not 'RESOLVED' or 'ABORT'."
+ print "> ",
+ answer = self.ReadLine()
+
+ # Takes a file containing the patch to apply as first argument.
+ def ApplyPatch(self, patch_file, reverse_patch=""):
+ args = "apply --index --reject %s \"%s\"" % (reverse_patch, patch_file)
+ if self.Git(args) is None:
+ self.WaitForResolvingConflicts(patch_file)
+
+
+class UploadStep(Step):
+ MESSAGE = "Upload for code review."
+
+ def RunStep(self):
+ if self._options.r:
+ print "Using account %s for review." % self._options.r
+ reviewer = self._options.r
+ else:
+ print "Please enter the email address of a V8 reviewer for your patch: ",
+ self.DieInForcedMode("A reviewer must be specified in forced mode.")
+ reviewer = self.ReadLine()
+ force_flag = " -f" if self._options.f else ""
+ args = "cl upload -r \"%s\" --send-mail%s" % (reviewer, force_flag)
+ # TODO(machenbach): Check output in forced mode. Verify that all required
+ # base files were uploaded, if not retry.
+ if self.Git(args, pipe=False) is None:
+ self.Die("'git cl upload' failed, please try again.")
+
+
+def MakeStep(step_class=Step, number=0, state=None, config=None,
+ options=None, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
+ # Allow to pass in empty dictionaries.
+ state = state if state is not None else {}
+ config = config if config is not None else {}
+
+ try:
+ message = step_class.MESSAGE
+ except AttributeError:
+ message = step_class.__name__
+ try:
+ requires = step_class.REQUIRES
+ except AttributeError:
+ requires = None
+
+ return step_class(message, requires, number=number, config=config,
+ state=state, options=options,
+ handler=side_effect_handler)
+
+
+def RunScript(step_classes,
+ config,
+ options,
+ side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
+ state = {}
+ steps = []
+ for (number, step_class) in enumerate(step_classes):
+ steps.append(MakeStep(step_class, number, state, config,
+ options, side_effect_handler))
+
+ for step in steps[options.s:]:
+ step.Run()
diff --git a/chromium/v8/tools/push-to-trunk/push_to_trunk.py b/chromium/v8/tools/push-to-trunk/push_to_trunk.py
new file mode 100755
index 00000000000..cee871fc6e4
--- /dev/null
+++ b/chromium/v8/tools/push-to-trunk/push_to_trunk.py
@@ -0,0 +1,581 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import datetime
+import optparse
+import sys
+import tempfile
+import urllib2
+
+from common_includes import *
+
+TRUNKBRANCH = "TRUNKBRANCH"
+CHROMIUM = "CHROMIUM"
+DEPS_FILE = "DEPS_FILE"
+
+CONFIG = {
+ BRANCHNAME: "prepare-push",
+ TRUNKBRANCH: "trunk-push",
+ PERSISTFILE_BASENAME: "/tmp/v8-push-to-trunk-tempfile",
+ TEMP_BRANCH: "prepare-push-temporary-branch-created-by-script",
+ DOT_GIT_LOCATION: ".git",
+ VERSION_FILE: "src/version.cc",
+ CHANGELOG_FILE: "ChangeLog",
+ CHANGELOG_ENTRY_FILE: "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
+ PATCH_FILE: "/tmp/v8-push-to-trunk-tempfile-patch-file",
+ COMMITMSG_FILE: "/tmp/v8-push-to-trunk-tempfile-commitmsg",
+ DEPS_FILE: "DEPS",
+}
+
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
+
+ def RunStep(self):
+ self.InitialEnvironmentChecks()
+ self.CommonPrepare()
+ self.PrepareBranch()
+ self.DeleteBranch(self.Config(TRUNKBRANCH))
+
+
+class FreshBranch(Step):
+ MESSAGE = "Create a fresh branch."
+
+ def RunStep(self):
+ args = "checkout -b %s svn/bleeding_edge" % self.Config(BRANCHNAME)
+ if self.Git(args) is None:
+ self.Die("Creating branch %s failed." % self.Config(BRANCHNAME))
+
+
+class DetectLastPush(Step):
+ MESSAGE = "Detect commit ID of last push to trunk."
+
+ def RunStep(self):
+ last_push = (self._options.l or
+ self.Git("log -1 --format=%H ChangeLog").strip())
+ while True:
+ # Print assumed commit, circumventing git's pager.
+ print self.Git("log -1 %s" % last_push)
+ if self.Confirm("Is the commit printed above the last push to trunk?"):
+ break
+ args = "log -1 --format=%H %s^ ChangeLog" % last_push
+ last_push = self.Git(args).strip()
+ self.Persist("last_push", last_push)
+ self._state["last_push"] = last_push
+
+
+class PrepareChangeLog(Step):
+ MESSAGE = "Prepare raw ChangeLog entry."
+
+ def Reload(self, body):
+ """Attempts to reload the commit message from rietveld in order to allow
+ late changes to the LOG flag. Note: This is brittle to future changes of
+ the web page name or structure.
+ """
+ match = re.search(r"^Review URL: https://codereview\.chromium\.org/(\d+)$",
+ body, flags=re.M)
+ if match:
+ cl_url = "https://codereview.chromium.org/%s/description" % match.group(1)
+ try:
+ # Fetch from Rietveld but only retry once with one second delay since
+ # there might be many revisions.
+ body = self.ReadURL(cl_url, wait_plan=[1])
+ except urllib2.URLError:
+ pass
+ return body
+
+ def RunStep(self):
+ self.RestoreIfUnset("last_push")
+
+ # These version numbers are used again later for the trunk commit.
+ self.ReadAndPersistVersion()
+
+ date = datetime.date.today().strftime("%Y-%m-%d")
+ self.Persist("date", date)
+ output = "%s: Version %s.%s.%s\n\n" % (date,
+ self._state["major"],
+ self._state["minor"],
+ self._state["build"])
+ TextToFile(output, self.Config(CHANGELOG_ENTRY_FILE))
+
+ args = "log %s..HEAD --format=%%H" % self._state["last_push"]
+ commits = self.Git(args).strip()
+
+ # Cache raw commit messages.
+ commit_messages = [
+ [
+ self.Git("log -1 %s --format=\"%%s\"" % commit),
+ self.Reload(self.Git("log -1 %s --format=\"%%B\"" % commit)),
+ self.Git("log -1 %s --format=\"%%an\"" % commit),
+ ] for commit in commits.splitlines()
+ ]
+
+ # Auto-format commit messages.
+ body = MakeChangeLogBody(commit_messages, auto_format=True)
+ AppendToFile(body, self.Config(CHANGELOG_ENTRY_FILE))
+
+ msg = (" Performance and stability improvements on all platforms."
+ "\n#\n# The change log above is auto-generated. Please review if "
+ "all relevant\n# commit messages from the list below are included."
+ "\n# All lines starting with # will be stripped.\n#\n")
+ AppendToFile(msg, self.Config(CHANGELOG_ENTRY_FILE))
+
+ # Include unformatted commit messages as a reference in a comment.
+ comment_body = MakeComment(MakeChangeLogBody(commit_messages))
+ AppendToFile(comment_body, self.Config(CHANGELOG_ENTRY_FILE))
+
+
+class EditChangeLog(Step):
+ MESSAGE = "Edit ChangeLog entry."
+
+ def RunStep(self):
+ print ("Please press <Return> to have your EDITOR open the ChangeLog "
+ "entry, then edit its contents to your liking. When you're done, "
+ "save the file and exit your EDITOR. ")
+ self.ReadLine(default="")
+
+ # TODO(machenbach): Don't use EDITOR in forced mode as soon as script is
+ # well tested.
+ self.Editor(self.Config(CHANGELOG_ENTRY_FILE))
+ handle, new_changelog = tempfile.mkstemp()
+ os.close(handle)
+
+ # Strip comments and reformat with correct indentation.
+ changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE)).rstrip()
+ changelog_entry = StripComments(changelog_entry)
+ changelog_entry = "\n".join(map(Fill80, changelog_entry.splitlines()))
+ changelog_entry = changelog_entry.lstrip()
+
+ if changelog_entry == "":
+ self.Die("Empty ChangeLog entry.")
+
+ with open(new_changelog, "w") as f:
+ f.write(changelog_entry)
+ f.write("\n\n\n") # Explicitly insert two empty lines.
+
+ AppendToFile(FileToText(self.Config(CHANGELOG_FILE)), new_changelog)
+ TextToFile(FileToText(new_changelog), self.Config(CHANGELOG_FILE))
+ os.remove(new_changelog)
+
+
+class IncrementVersion(Step):
+ MESSAGE = "Increment version number."
+
+ def RunStep(self):
+ self.RestoreIfUnset("build")
+ new_build = str(int(self._state["build"]) + 1)
+
+ if self.Confirm(("Automatically increment BUILD_NUMBER? (Saying 'n' will "
+ "fire up your EDITOR on %s so you can make arbitrary "
+ "changes. When you're done, save the file and exit your "
+ "EDITOR.)" % self.Config(VERSION_FILE))):
+ text = FileToText(self.Config(VERSION_FILE))
+ text = MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
+ r"\g<space>%s" % new_build,
+ text)
+ TextToFile(text, self.Config(VERSION_FILE))
+ else:
+ self.Editor(self.Config(VERSION_FILE))
+
+ self.ReadAndPersistVersion("new_")
+
+
+class CommitLocal(Step):
+ MESSAGE = "Commit to local branch."
+
+ def RunStep(self):
+ self.RestoreVersionIfUnset("new_")
+ prep_commit_msg = ("Prepare push to trunk. "
+ "Now working on version %s.%s.%s." % (self._state["new_major"],
+ self._state["new_minor"],
+ self._state["new_build"]))
+ self.Persist("prep_commit_msg", prep_commit_msg)
+ if self.Git("commit -a -m \"%s\"" % prep_commit_msg) is None:
+ self.Die("'git commit -a' failed.")
+
+
+class CommitRepository(Step):
+ MESSAGE = "Commit to the repository."
+
+ def RunStep(self):
+ self.WaitForLGTM()
+ # Re-read the ChangeLog entry (to pick up possible changes).
+ # FIXME(machenbach): This was hanging once with a broken pipe.
+ TextToFile(GetLastChangeLogEntries(self.Config(CHANGELOG_FILE)),
+ self.Config(CHANGELOG_ENTRY_FILE))
+
+ if self.Git("cl dcommit -f", "PRESUBMIT_TREE_CHECK=\"skip\"") is None:
+ self.Die("'git cl dcommit' failed, please try again.")
+
+
+class StragglerCommits(Step):
+ MESSAGE = ("Fetch straggler commits that sneaked in since this script was "
+ "started.")
+
+ def RunStep(self):
+ if self.Git("svn fetch") is None:
+ self.Die("'git svn fetch' failed.")
+ self.Git("checkout svn/bleeding_edge")
+ self.RestoreIfUnset("prep_commit_msg")
+ args = "log -1 --format=%%H --grep=\"%s\"" % self._state["prep_commit_msg"]
+ prepare_commit_hash = self.Git(args).strip()
+ self.Persist("prepare_commit_hash", prepare_commit_hash)
+
+
+class SquashCommits(Step):
+ MESSAGE = "Squash commits into one."
+
+ def RunStep(self):
+ # Instead of relying on "git rebase -i", we'll just create a diff, because
+ # that's easier to automate.
+ self.RestoreIfUnset("prepare_commit_hash")
+ args = "diff svn/trunk %s" % self._state["prepare_commit_hash"]
+ TextToFile(self.Git(args), self.Config(PATCH_FILE))
+
+ # Convert the ChangeLog entry to commit message format:
+ # - remove date
+ # - remove indentation
+ # - merge paragraphs into single long lines, keeping empty lines between
+ # them.
+ self.RestoreIfUnset("date")
+ changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE))
+
+ # TODO(machenbach): This could create a problem if the changelog contained
+ # any quotation marks.
+ text = Command("echo \"%s\" \
+ | sed -e \"s/^%s: //\" \
+ | sed -e 's/^ *//' \
+ | awk '{ \
+ if (need_space == 1) {\
+ printf(\" \");\
+ };\
+ printf(\"%%s\", $0);\
+ if ($0 ~ /^$/) {\
+ printf(\"\\n\\n\");\
+ need_space = 0;\
+ } else {\
+ need_space = 1;\
+ }\
+ }'" % (changelog_entry, self._state["date"]))
+
+ if not text:
+ self.Die("Commit message editing failed.")
+ TextToFile(text, self.Config(COMMITMSG_FILE))
+ os.remove(self.Config(CHANGELOG_ENTRY_FILE))
+
+
+class NewBranch(Step):
+ MESSAGE = "Create a new branch from trunk."
+
+ def RunStep(self):
+ if self.Git("checkout -b %s svn/trunk" % self.Config(TRUNKBRANCH)) is None:
+ self.Die("Checking out a new branch '%s' failed." %
+ self.Config(TRUNKBRANCH))
+
+
+class ApplyChanges(Step):
+ MESSAGE = "Apply squashed changes."
+
+ def RunStep(self):
+ self.ApplyPatch(self.Config(PATCH_FILE))
+ Command("rm", "-f %s*" % self.Config(PATCH_FILE))
+
+
+class SetVersion(Step):
+ MESSAGE = "Set correct version for trunk."
+
+ def RunStep(self):
+ self.RestoreVersionIfUnset()
+ output = ""
+ for line in FileToText(self.Config(VERSION_FILE)).splitlines():
+ if line.startswith("#define MAJOR_VERSION"):
+ line = re.sub("\d+$", self._state["major"], line)
+ elif line.startswith("#define MINOR_VERSION"):
+ line = re.sub("\d+$", self._state["minor"], line)
+ elif line.startswith("#define BUILD_NUMBER"):
+ line = re.sub("\d+$", self._state["build"], line)
+ elif line.startswith("#define PATCH_LEVEL"):
+ line = re.sub("\d+$", "0", line)
+ elif line.startswith("#define IS_CANDIDATE_VERSION"):
+ line = re.sub("\d+$", "0", line)
+ output += "%s\n" % line
+ TextToFile(output, self.Config(VERSION_FILE))
+
+
+class CommitTrunk(Step):
+ MESSAGE = "Commit to local trunk branch."
+
+ def RunStep(self):
+ self.Git("add \"%s\"" % self.Config(VERSION_FILE))
+ if self.Git("commit -F \"%s\"" % self.Config(COMMITMSG_FILE)) is None:
+ self.Die("'git commit' failed.")
+ Command("rm", "-f %s*" % self.Config(COMMITMSG_FILE))
+
+
+class SanityCheck(Step):
+ MESSAGE = "Sanity check."
+
+ def RunStep(self):
+ if not self.Confirm("Please check if your local checkout is sane: Inspect "
+ "%s, compile, run tests. Do you want to commit this new trunk "
+ "revision to the repository?" % self.Config(VERSION_FILE)):
+ self.Die("Execution canceled.")
+
+
+class CommitSVN(Step):
+ MESSAGE = "Commit to SVN."
+
+ def RunStep(self):
+ result = self.Git("svn dcommit 2>&1")
+ if not result:
+ self.Die("'git svn dcommit' failed.")
+ result = filter(lambda x: re.search(r"^Committed r[0-9]+", x),
+ result.splitlines())
+ if len(result) > 0:
+ trunk_revision = re.sub(r"^Committed r([0-9]+)", r"\1", result[0])
+
+ # Sometimes grepping for the revision fails. No idea why. If you figure
+ # out why it is flaky, please do fix it properly.
+ if not trunk_revision:
+ print("Sorry, grepping for the SVN revision failed. Please look for it "
+ "in the last command's output above and provide it manually (just "
+ "the number, without the leading \"r\").")
+ self.DieInForcedMode("Can't prompt in forced mode.")
+ while not trunk_revision:
+ print "> ",
+ trunk_revision = self.ReadLine()
+ self.Persist("trunk_revision", trunk_revision)
+
+
+class TagRevision(Step):
+ MESSAGE = "Tag the new revision."
+
+ def RunStep(self):
+ self.RestoreVersionIfUnset()
+ ver = "%s.%s.%s" % (self._state["major"],
+ self._state["minor"],
+ self._state["build"])
+ if self.Git("svn tag %s -m \"Tagging version %s\"" % (ver, ver)) is None:
+ self.Die("'git svn tag' failed.")
+
+
+class CheckChromium(Step):
+ MESSAGE = "Ask for chromium checkout."
+
+ def Run(self):
+ chrome_path = self._options.c
+ if not chrome_path:
+ self.DieInForcedMode("Please specify the path to a Chromium checkout in "
+ "forced mode.")
+ print ("Do you have a \"NewGit\" Chromium checkout and want "
+ "this script to automate creation of the roll CL? If yes, enter the "
+ "path to (and including) the \"src\" directory here, otherwise just "
+ "press <Return>: "),
+ chrome_path = self.ReadLine()
+ self.Persist("chrome_path", chrome_path)
+
+
+class SwitchChromium(Step):
+ MESSAGE = "Switch to Chromium checkout."
+ REQUIRES = "chrome_path"
+
+ def RunStep(self):
+ v8_path = os.getcwd()
+ self.Persist("v8_path", v8_path)
+ os.chdir(self._state["chrome_path"])
+ self.InitialEnvironmentChecks()
+ # Check for a clean workdir.
+ if self.Git("status -s -uno").strip() != "":
+ self.Die("Workspace is not clean. Please commit or undo your changes.")
+ # Assert that the DEPS file is there.
+ if not os.path.exists(self.Config(DEPS_FILE)):
+ self.Die("DEPS file not present.")
+
+
+class UpdateChromiumCheckout(Step):
+ MESSAGE = "Update the checkout and create a new branch."
+ REQUIRES = "chrome_path"
+
+ def RunStep(self):
+ os.chdir(self._state["chrome_path"])
+ if self.Git("checkout master") is None:
+ self.Die("'git checkout master' failed.")
+ if self.Git("pull") is None:
+ self.Die("'git pull' failed, please try again.")
+
+ self.RestoreIfUnset("trunk_revision")
+ args = "checkout -b v8-roll-%s" % self._state["trunk_revision"]
+ if self.Git(args) is None:
+ self.Die("Failed to checkout a new branch.")
+
+
+class UploadCL(Step):
+ MESSAGE = "Create and upload CL."
+ REQUIRES = "chrome_path"
+
+ def RunStep(self):
+ os.chdir(self._state["chrome_path"])
+
+ # Patch DEPS file.
+ self.RestoreIfUnset("trunk_revision")
+ deps = FileToText(self.Config(DEPS_FILE))
+ deps = re.sub("(?<=\"v8_revision\": \")([0-9]+)(?=\")",
+ self._state["trunk_revision"],
+ deps)
+ TextToFile(deps, self.Config(DEPS_FILE))
+
+ self.RestoreVersionIfUnset()
+ ver = "%s.%s.%s" % (self._state["major"],
+ self._state["minor"],
+ self._state["build"])
+ if self._options and self._options.r:
+ print "Using account %s for review." % self._options.r
+ rev = self._options.r
+ else:
+ print "Please enter the email address of a reviewer for the roll CL: ",
+ self.DieInForcedMode("A reviewer must be specified in forced mode.")
+ rev = self.ReadLine()
+ args = "commit -am \"Update V8 to version %s.\n\nTBR=%s\"" % (ver, rev)
+ if self.Git(args) is None:
+ self.Die("'git commit' failed.")
+ force_flag = " -f" if self._options.f else ""
+ if self.Git("cl upload --send-mail%s" % force_flag, pipe=False) is None:
+ self.Die("'git cl upload' failed, please try again.")
+ print "CL uploaded."
+
+
+class SwitchV8(Step):
+ MESSAGE = "Returning to V8 checkout."
+ REQUIRES = "chrome_path"
+
+ def RunStep(self):
+ self.RestoreIfUnset("v8_path")
+ os.chdir(self._state["v8_path"])
+
+
+class CleanUp(Step):
+ MESSAGE = "Done!"
+
+ def RunStep(self):
+ self.RestoreVersionIfUnset()
+ ver = "%s.%s.%s" % (self._state["major"],
+ self._state["minor"],
+ self._state["build"])
+ self.RestoreIfUnset("trunk_revision")
+ self.RestoreIfUnset("chrome_path")
+
+ if self._state["chrome_path"]:
+ print("Congratulations, you have successfully created the trunk "
+ "revision %s and rolled it into Chromium. Please don't forget to "
+ "update the v8rel spreadsheet:" % ver)
+ else:
+ print("Congratulations, you have successfully created the trunk "
+ "revision %s. Please don't forget to roll this new version into "
+ "Chromium, and to update the v8rel spreadsheet:" % ver)
+ print "%s\ttrunk\t%s" % (ver, self._state["trunk_revision"])
+
+ self.CommonCleanup()
+ if self.Config(TRUNKBRANCH) != self._state["current_branch"]:
+ self.Git("branch -D %s" % self.Config(TRUNKBRANCH))
+
+
+def RunPushToTrunk(config,
+ options,
+ side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
+ step_classes = [
+ Preparation,
+ FreshBranch,
+ DetectLastPush,
+ PrepareChangeLog,
+ EditChangeLog,
+ IncrementVersion,
+ CommitLocal,
+ UploadStep,
+ CommitRepository,
+ StragglerCommits,
+ SquashCommits,
+ NewBranch,
+ ApplyChanges,
+ SetVersion,
+ CommitTrunk,
+ SanityCheck,
+ CommitSVN,
+ TagRevision,
+ CheckChromium,
+ SwitchChromium,
+ UpdateChromiumCheckout,
+ UploadCL,
+ SwitchV8,
+ CleanUp,
+ ]
+
+ RunScript(step_classes, config, options, side_effect_handler)
+
+
+def BuildOptions():
+ result = optparse.OptionParser()
+ result.add_option("-c", "--chromium", dest="c",
+ help=("Specify the path to your Chromium src/ "
+ "directory to automate the V8 roll."))
+ result.add_option("-f", "--force", dest="f",
+ help="Don't prompt the user.",
+ default=False, action="store_true")
+ result.add_option("-l", "--last-push", dest="l",
+ help=("Manually specify the git commit ID "
+ "of the last push to trunk."))
+ result.add_option("-r", "--reviewer", dest="r",
+ help=("Specify the account name to be used for reviews."))
+ result.add_option("-s", "--step", dest="s",
+ help="Specify the step where to start work. Default: 0.",
+ default=0, type="int")
+ return result
+
+
+def ProcessOptions(options):
+ if options.s < 0:
+ print "Bad step number %d" % options.s
+ return False
+ if options.f and not options.r:
+ print "A reviewer (-r) is required in forced mode."
+ return False
+ if options.f and not options.c:
+ print "A chromium checkout (-c) is required in forced mode."
+ return False
+ return True
+
+
+def Main():
+ parser = BuildOptions()
+ (options, args) = parser.parse_args()
+ if not ProcessOptions(options):
+ parser.print_help()
+ return 1
+ RunPushToTrunk(CONFIG, options)
+
+if __name__ == "__main__":
+ sys.exit(Main())
diff --git a/chromium/v8/tools/push-to-trunk/test_scripts.py b/chromium/v8/tools/push-to-trunk/test_scripts.py
new file mode 100644
index 00000000000..acb28a097c7
--- /dev/null
+++ b/chromium/v8/tools/push-to-trunk/test_scripts.py
@@ -0,0 +1,730 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import tempfile
+import unittest
+
+import common_includes
+from common_includes import *
+import push_to_trunk
+from push_to_trunk import *
+import auto_roll
+
+
+TEST_CONFIG = {
+ BRANCHNAME: "test-prepare-push",
+ TRUNKBRANCH: "test-trunk-push",
+ PERSISTFILE_BASENAME: "/tmp/test-v8-push-to-trunk-tempfile",
+ TEMP_BRANCH: "test-prepare-push-temporary-branch-created-by-script",
+ DOT_GIT_LOCATION: None,
+ VERSION_FILE: None,
+ CHANGELOG_FILE: None,
+ CHANGELOG_ENTRY_FILE: "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
+ PATCH_FILE: "/tmp/test-v8-push-to-trunk-tempfile-patch",
+ COMMITMSG_FILE: "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
+ CHROMIUM: "/tmp/test-v8-push-to-trunk-tempfile-chromium",
+ DEPS_FILE: "/tmp/test-v8-push-to-trunk-tempfile-chromium/DEPS",
+}
+
+
+class ToplevelTest(unittest.TestCase):
+ def testMakeComment(self):
+ self.assertEquals("# Line 1\n# Line 2\n#",
+ MakeComment(" Line 1\n Line 2\n"))
+ self.assertEquals("#Line 1\n#Line 2",
+ MakeComment("Line 1\n Line 2"))
+
+ def testStripComments(self):
+ self.assertEquals(" Line 1\n Line 3\n",
+ StripComments(" Line 1\n# Line 2\n Line 3\n#\n"))
+ self.assertEquals("\nLine 2 ### Test\n #",
+ StripComments("###\n# \n\n# Line 1\nLine 2 ### Test\n #"))
+
+ def testMakeChangeLogBodySimple(self):
+ commits = [
+ ["Title text 1",
+ "Title text 1\n\nBUG=\n",
+ "author1@chromium.org"],
+ ["Title text 2.",
+ "Title text 2\n\nBUG=1234\n",
+ "author2@chromium.org"],
+ ]
+ self.assertEquals(" Title text 1.\n"
+ " (author1@chromium.org)\n\n"
+ " Title text 2 (Chromium issue 1234).\n"
+ " (author2@chromium.org)\n\n",
+ MakeChangeLogBody(commits))
+
+ def testMakeChangeLogBodyEmpty(self):
+ self.assertEquals("", MakeChangeLogBody([]))
+
+ def testMakeChangeLogBodyAutoFormat(self):
+ commits = [
+ ["Title text 1!",
+ "Title text 1\nLOG=y\nBUG=\n",
+ "author1@chromium.org"],
+ ["Title text 2",
+ "Title text 2\n\nBUG=1234\n",
+ "author2@chromium.org"],
+ ["Title text 3",
+ "Title text 3\n\nBUG=1234\nLOG = Yes\n",
+ "author3@chromium.org"],
+ ["Title text 3",
+ "Title text 4\n\nBUG=1234\nLOG=\n",
+ "author4@chromium.org"],
+ ]
+ self.assertEquals(" Title text 1.\n\n"
+ " Title text 3 (Chromium issue 1234).\n\n",
+ MakeChangeLogBody(commits, True))
+
+ def testMakeChangeLogBugReferenceEmpty(self):
+ self.assertEquals("", MakeChangeLogBugReference(""))
+ self.assertEquals("", MakeChangeLogBugReference("LOG="))
+ self.assertEquals("", MakeChangeLogBugReference(" BUG ="))
+ self.assertEquals("", MakeChangeLogBugReference("BUG=none\t"))
+
+ def testMakeChangeLogBugReferenceSimple(self):
+ self.assertEquals("(issue 987654)",
+ MakeChangeLogBugReference("BUG = v8:987654"))
+ self.assertEquals("(Chromium issue 987654)",
+ MakeChangeLogBugReference("BUG=987654 "))
+
+ def testMakeChangeLogBugReferenceFromBody(self):
+ self.assertEquals("(Chromium issue 1234567)",
+ MakeChangeLogBugReference("Title\n\nTBR=\nBUG=\n"
+ " BUG=\tchromium:1234567\t\n"
+ "R=somebody\n"))
+
+ def testMakeChangeLogBugReferenceMultiple(self):
+ # All issues should be sorted and grouped. Multiple references to the same
+ # issue should be filtered.
+ self.assertEquals("(issues 123, 234, Chromium issue 345)",
+ MakeChangeLogBugReference("Title\n\n"
+ "BUG=v8:234\n"
+ " BUG\t= 345, \tv8:234,\n"
+ "BUG=v8:123\n"
+ "R=somebody\n"))
+ self.assertEquals("(Chromium issues 123, 234)",
+ MakeChangeLogBugReference("Title\n\n"
+ "BUG=234,,chromium:123 \n"
+ "R=somebody\n"))
+ self.assertEquals("(Chromium issues 123, 234)",
+ MakeChangeLogBugReference("Title\n\n"
+ "BUG=chromium:234, , 123\n"
+ "R=somebody\n"))
+ self.assertEquals("(issues 345, 456)",
+ MakeChangeLogBugReference("Title\n\n"
+ "\t\tBUG=v8:345,v8:456\n"
+ "R=somebody\n"))
+ self.assertEquals("(issue 123, Chromium issues 345, 456)",
+ MakeChangeLogBugReference("Title\n\n"
+ "BUG=chromium:456\n"
+ "BUG = none\n"
+ "R=somebody\n"
+ "BUG=456,v8:123, 345"))
+
+ # TODO(machenbach): These test don't make much sense when the formatting is
+ # done later.
+ def testMakeChangeLogBugReferenceLong(self):
+ # -----------------00--------10--------20--------30--------
+ self.assertEquals("(issues 234, 1234567890, 1234567"
+ "8901234567890, Chromium issues 12345678,"
+ " 123456789)",
+ MakeChangeLogBugReference("BUG=v8:234\n"
+ "BUG=v8:1234567890\n"
+ "BUG=v8:12345678901234567890\n"
+ "BUG=123456789\n"
+ "BUG=12345678\n"))
+ # -----------------00--------10--------20--------30--------
+ self.assertEquals("(issues 234, 1234567890, 1234567"
+ "8901234567890, Chromium issues"
+ " 123456789, 1234567890)",
+ MakeChangeLogBugReference("BUG=v8:234\n"
+ "BUG=v8:12345678901234567890\n"
+ "BUG=v8:1234567890\n"
+ "BUG=123456789\n"
+ "BUG=1234567890\n"))
+ # -----------------00--------10--------20--------30--------
+ self.assertEquals("(Chromium issues 234, 1234567890"
+ ", 12345678901234567, "
+ "1234567890123456789)",
+ MakeChangeLogBugReference("BUG=234\n"
+ "BUG=12345678901234567\n"
+ "BUG=1234567890123456789\n"
+ "BUG=1234567890\n"))
+
+
+class SimpleMock(object):
+ def __init__(self, name):
+ self._name = name
+ self._recipe = []
+ self._index = -1
+
+ def Expect(self, recipe):
+ self._recipe = recipe
+
+ def Call(self, *args):
+ self._index += 1
+ try:
+ expected_call = self._recipe[self._index]
+ except IndexError:
+ raise Exception("Calling %s %s" % (self._name, " ".join(args)))
+
+ # Pack expectations without arguments into a list.
+ if not isinstance(expected_call, list):
+ expected_call = [expected_call]
+
+ # The number of arguments in the expectation must match the actual
+ # arguments.
+ if len(args) > len(expected_call):
+ raise Exception("When calling %s with arguments, the expectations "
+ "must consist of at least as many arguments.")
+
+ # Compare expected and actual arguments.
+ for (expected_arg, actual_arg) in zip(expected_call, args):
+ if expected_arg != actual_arg:
+ raise Exception("Expected: %s - Actual: %s"
+ % (expected_arg, actual_arg))
+
+ # The expectation list contains a mandatory return value and an optional
+ # callback for checking the context at the time of the call.
+ if len(expected_call) == len(args) + 2:
+ expected_call[len(args) + 1]()
+ return_value = expected_call[len(args)]
+
+ # If the return value is an exception, raise it instead of returning.
+ if isinstance(return_value, Exception):
+ raise return_value
+ return return_value
+
+ def AssertFinished(self):
+ if self._index < len(self._recipe) -1:
+ raise Exception("Called %s too seldom: %d vs. %d"
+ % (self._name, self._index, len(self._recipe)))
+
+
+class ScriptTest(unittest.TestCase):
+ def MakeEmptyTempFile(self):
+ handle, name = tempfile.mkstemp()
+ os.close(handle)
+ self._tmp_files.append(name)
+ return name
+
+ def MakeTempVersionFile(self):
+ name = self.MakeEmptyTempFile()
+ with open(name, "w") as f:
+ f.write(" // Some line...\n")
+ f.write("\n")
+ f.write("#define MAJOR_VERSION 3\n")
+ f.write("#define MINOR_VERSION 22\n")
+ f.write("#define BUILD_NUMBER 5\n")
+ f.write("#define PATCH_LEVEL 0\n")
+ f.write(" // Some line...\n")
+ f.write("#define IS_CANDIDATE_VERSION 0\n")
+ return name
+
+ def MakeStep(self, step_class=Step, state=None):
+ """Convenience wrapper."""
+ return MakeStep(step_class=step_class, number=0, state=state,
+ config=TEST_CONFIG, options=None, side_effect_handler=self)
+
+ def GitMock(self, cmd, args="", pipe=True):
+ return self._git_mock.Call(args)
+
+ def LogMock(self, cmd, args=""):
+ print "Log: %s %s" % (cmd, args)
+
+ MOCKS = {
+ "git": GitMock,
+ "vi": LogMock,
+ }
+
+ def Command(self, cmd, args="", prefix="", pipe=True):
+ return ScriptTest.MOCKS[cmd](self, cmd, args)
+
+ def ReadLine(self):
+ return self._rl_mock.Call()
+
+ def ReadURL(self, url):
+ return self._url_mock.Call(url)
+
+ def Sleep(self, seconds):
+ pass
+
+ def ExpectGit(self, *args):
+ """Convenience wrapper."""
+ self._git_mock.Expect(*args)
+
+ def ExpectReadline(self, *args):
+ """Convenience wrapper."""
+ self._rl_mock.Expect(*args)
+
+ def ExpectReadURL(self, *args):
+ """Convenience wrapper."""
+ self._url_mock.Expect(*args)
+
+ def setUp(self):
+ self._git_mock = SimpleMock("git")
+ self._rl_mock = SimpleMock("readline")
+ self._url_mock = SimpleMock("readurl")
+ self._tmp_files = []
+
+ def tearDown(self):
+ Command("rm", "-rf %s*" % TEST_CONFIG[PERSISTFILE_BASENAME])
+
+ # Clean up temps. Doesn't work automatically.
+ for name in self._tmp_files:
+ if os.path.exists(name):
+ os.remove(name)
+
+ self._git_mock.AssertFinished()
+ self._rl_mock.AssertFinished()
+ self._url_mock.AssertFinished()
+
+ def testPersistRestore(self):
+ self.MakeStep().Persist("test1", "")
+ self.assertEquals("", self.MakeStep().Restore("test1"))
+ self.MakeStep().Persist("test2", "AB123")
+ self.assertEquals("AB123", self.MakeStep().Restore("test2"))
+
+ def testGitOrig(self):
+ self.assertTrue(Command("git", "--version").startswith("git version"))
+
+ def testGitMock(self):
+ self.ExpectGit([["--version", "git version 1.2.3"], ["dummy", ""]])
+ self.assertEquals("git version 1.2.3", self.MakeStep().Git("--version"))
+ self.assertEquals("", self.MakeStep().Git("dummy"))
+
+ def testCommonPrepareDefault(self):
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch"],
+ ["svn fetch", ""],
+ ["branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]],
+ ["branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""],
+ ["checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""],
+ ["branch", ""],
+ ])
+ self.ExpectReadline(["Y"])
+ self.MakeStep().CommonPrepare()
+ self.MakeStep().PrepareBranch()
+ self.assertEquals("some_branch", self.MakeStep().Restore("current_branch"))
+
+ def testCommonPrepareNoConfirm(self):
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch"],
+ ["svn fetch", ""],
+ ["branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]],
+ ])
+ self.ExpectReadline(["n"])
+ self.MakeStep().CommonPrepare()
+ self.assertRaises(Exception, self.MakeStep().PrepareBranch)
+ self.assertEquals("some_branch", self.MakeStep().Restore("current_branch"))
+
+ def testCommonPrepareDeleteBranchFailure(self):
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch"],
+ ["svn fetch", ""],
+ ["branch", " branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]],
+ ["branch -D %s" % TEST_CONFIG[TEMP_BRANCH], None],
+ ])
+ self.ExpectReadline(["Y"])
+ self.MakeStep().CommonPrepare()
+ self.assertRaises(Exception, self.MakeStep().PrepareBranch)
+ self.assertEquals("some_branch", self.MakeStep().Restore("current_branch"))
+
+ def testInitialEnvironmentChecks(self):
+ TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+ os.environ["EDITOR"] = "vi"
+ self.MakeStep().InitialEnvironmentChecks()
+
+ def testReadAndPersistVersion(self):
+ TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
+ step = self.MakeStep()
+ step.ReadAndPersistVersion()
+ self.assertEquals("3", self.MakeStep().Restore("major"))
+ self.assertEquals("22", self.MakeStep().Restore("minor"))
+ self.assertEquals("5", self.MakeStep().Restore("build"))
+ self.assertEquals("0", self.MakeStep().Restore("patch"))
+ self.assertEquals("3", step._state["major"])
+ self.assertEquals("22", step._state["minor"])
+ self.assertEquals("5", step._state["build"])
+ self.assertEquals("0", step._state["patch"])
+
+ def testRegex(self):
+ self.assertEqual("(issue 321)",
+ re.sub(r"BUG=v8:(.*)$", r"(issue \1)", "BUG=v8:321"))
+ self.assertEqual("(Chromium issue 321)",
+ re.sub(r"BUG=(.*)$", r"(Chromium issue \1)", "BUG=321"))
+
+ cl = " too little\n\ttab\ttab\n too much\n trailing "
+ cl = MSub(r"\t", r" ", cl)
+ cl = MSub(r"^ {1,7}([^ ])", r" \1", cl)
+ cl = MSub(r"^ {9,80}([^ ])", r" \1", cl)
+ cl = MSub(r" +$", r"", cl)
+ self.assertEqual(" too little\n"
+ " tab tab\n"
+ " too much\n"
+ " trailing", cl)
+
+ self.assertEqual("//\n#define BUILD_NUMBER 3\n",
+ MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
+ r"\g<space>3",
+ "//\n#define BUILD_NUMBER 321\n"))
+
+ def testPrepareChangeLog(self):
+ TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
+ TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
+
+ self.ExpectGit([
+ ["log 1234..HEAD --format=%H", "rev1\nrev2\nrev3\nrev4"],
+ ["log -1 rev1 --format=\"%s\"", "Title text 1"],
+ ["log -1 rev1 --format=\"%B\"", "Title\n\nBUG=\nLOG=y\n"],
+ ["log -1 rev1 --format=\"%an\"", "author1@chromium.org"],
+ ["log -1 rev2 --format=\"%s\"", "Title text 2."],
+ ["log -1 rev2 --format=\"%B\"", "Title\n\nBUG=123\nLOG= \n"],
+ ["log -1 rev2 --format=\"%an\"", "author2@chromium.org"],
+ ["log -1 rev3 --format=\"%s\"", "Title text 3"],
+ ["log -1 rev3 --format=\"%B\"", "Title\n\nBUG=321\nLOG=true\n"],
+ ["log -1 rev3 --format=\"%an\"", "author3@chromium.org"],
+ ["log -1 rev4 --format=\"%s\"", "Title text 4"],
+ ["log -1 rev4 --format=\"%B\"",
+ ("Title\n\nBUG=456\nLOG=Y\n\n"
+ "Review URL: https://codereview.chromium.org/9876543210\n")],
+ ["log -1 rev4 --format=\"%an\"", "author4@chromium.org"],
+ ])
+
+ # The cl for rev4 on rietveld has an updated LOG flag.
+ self.ExpectReadURL([
+ ["https://codereview.chromium.org/9876543210/description",
+ "Title\n\nBUG=456\nLOG=N\n\n"],
+ ])
+
+ self.MakeStep().Persist("last_push", "1234")
+ self.MakeStep(PrepareChangeLog).Run()
+
+ actual_cl = FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE])
+
+ # TODO(machenbach): Mock out call to date() in order to make a fixed
+ # comparison here instead of a regexp match.
+ expected_cl = """\\d+\\-\\d+\\-\\d+: Version 3\\.22\\.5
+
+ Title text 1.
+
+ Title text 3 \\(Chromium issue 321\\).
+
+ Performance and stability improvements on all platforms\\.
+#
+# The change log above is auto-generated\\. Please review if all relevant
+# commit messages from the list below are included\\.
+# All lines starting with # will be stripped\\.
+#
+# Title text 1.
+# \\(author1@chromium\\.org\\)
+#
+# Title text 2 \\(Chromium issue 123\\).
+# \\(author2@chromium\\.org\\)
+#
+# Title text 3 \\(Chromium issue 321\\).
+# \\(author3@chromium\\.org\\)
+#
+# Title text 4 \\(Chromium issue 456\\).
+# \\(author4@chromium\\.org\\)
+#
+#"""
+
+ self.assertTrue(re.match(expected_cl, actual_cl))
+ self.assertEquals("3", self.MakeStep().Restore("major"))
+ self.assertEquals("22", self.MakeStep().Restore("minor"))
+ self.assertEquals("5", self.MakeStep().Restore("build"))
+ self.assertEquals("0", self.MakeStep().Restore("patch"))
+
+ def testEditChangeLog(self):
+ TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
+ TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
+ TextToFile(" Original CL", TEST_CONFIG[CHANGELOG_FILE])
+ TextToFile(" New \n\tLines \n", TEST_CONFIG[CHANGELOG_ENTRY_FILE])
+ os.environ["EDITOR"] = "vi"
+
+ self.ExpectReadline([
+ "", # Open editor.
+ ])
+
+ self.MakeStep(EditChangeLog).Run()
+
+ self.assertEquals("New\n Lines\n\n\n Original CL",
+ FileToText(TEST_CONFIG[CHANGELOG_FILE]))
+
+ def testIncrementVersion(self):
+ TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
+ self.MakeStep().Persist("build", "5")
+
+ self.ExpectReadline([
+ "Y", # Increment build number.
+ ])
+
+ self.MakeStep(IncrementVersion).Run()
+
+ self.assertEquals("3", self.MakeStep().Restore("new_major"))
+ self.assertEquals("22", self.MakeStep().Restore("new_minor"))
+ self.assertEquals("6", self.MakeStep().Restore("new_build"))
+ self.assertEquals("0", self.MakeStep().Restore("new_patch"))
+
+ def testLastChangeLogEntries(self):
+ TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
+ l = """
+ Fixed something.
+ (issue 1234)\n"""
+ for _ in xrange(10): l = l + l
+
+ cl_chunk = """2013-11-12: Version 3.23.2\n%s
+ Performance and stability improvements on all platforms.\n\n\n""" % l
+
+ cl_chunk_full = cl_chunk + cl_chunk + cl_chunk
+ TextToFile(cl_chunk_full, TEST_CONFIG[CHANGELOG_FILE])
+
+ cl = GetLastChangeLogEntries(TEST_CONFIG[CHANGELOG_FILE])
+ self.assertEquals(cl_chunk, cl)
+
+ def testSquashCommits(self):
+ TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
+ with open(TEST_CONFIG[CHANGELOG_ENTRY_FILE], "w") as f:
+ f.write("1999-11-11: Version 3.22.5\n")
+ f.write("\n")
+ f.write(" Log text 1.\n")
+ f.write(" Chromium issue 12345\n")
+ f.write("\n")
+ f.write(" Performance and stability improvements on all "
+ "platforms.\n")
+
+ self.ExpectGit([
+ ["diff svn/trunk hash1", "patch content"],
+ ])
+
+ self.MakeStep().Persist("prepare_commit_hash", "hash1")
+ self.MakeStep().Persist("date", "1999-11-11")
+
+ self.MakeStep(SquashCommits).Run()
+
+ msg = FileToText(TEST_CONFIG[COMMITMSG_FILE])
+ self.assertTrue(re.search(r"Version 3\.22\.5", msg))
+ self.assertTrue(re.search(r"Performance and stability", msg))
+ self.assertTrue(re.search(r"Log text 1\. Chromium issue 12345", msg))
+ self.assertFalse(re.search(r"\d+\-\d+\-\d+", msg))
+
+ patch = FileToText(TEST_CONFIG[ PATCH_FILE])
+ self.assertTrue(re.search(r"patch content", patch))
+
+ def _PushToTrunk(self, force=False):
+ TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+ TEST_CONFIG[VERSION_FILE] = self.MakeTempVersionFile()
+ TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
+ TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
+ if not os.path.exists(TEST_CONFIG[CHROMIUM]):
+ os.makedirs(TEST_CONFIG[CHROMIUM])
+ TextToFile("1999-04-05: Version 3.22.4", TEST_CONFIG[CHANGELOG_FILE])
+ TextToFile("Some line\n \"v8_revision\": \"123444\",\n some line",
+ TEST_CONFIG[DEPS_FILE])
+ os.environ["EDITOR"] = "vi"
+
+ def CheckPreparePush():
+ cl = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+ self.assertTrue(re.search(r"Version 3.22.5", cl))
+ self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
+ self.assertFalse(re.search(r" \(author1@chromium\.org\)", cl))
+
+ # Make sure all comments got stripped.
+ self.assertFalse(re.search(r"^#", cl, flags=re.M))
+
+ version = FileToText(TEST_CONFIG[VERSION_FILE])
+ self.assertTrue(re.search(r"#define BUILD_NUMBER\s+6", version))
+
+ def CheckUpload():
+ cl = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+
+ def CheckSVNCommit():
+ commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
+ self.assertTrue(re.search(r"Version 3.22.5", commit))
+ self.assertTrue(re.search(r"Log text 1 \(issue 321\).", commit))
+ version = FileToText(TEST_CONFIG[VERSION_FILE])
+ self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
+ self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
+ self.assertFalse(re.search(r"#define BUILD_NUMBER\s+6", version))
+ self.assertTrue(re.search(r"#define PATCH_LEVEL\s+0", version))
+ self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
+
+ force_flag = " -f" if force else ""
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch\n"],
+ ["svn fetch", ""],
+ ["branch", " branch1\n* branch2\n"],
+ ["checkout -b %s" % TEST_CONFIG[TEMP_BRANCH], ""],
+ ["branch", " branch1\n* branch2\n"],
+ ["branch", " branch1\n* branch2\n"],
+ ["checkout -b %s svn/bleeding_edge" % TEST_CONFIG[BRANCHNAME], ""],
+ ["log -1 --format=%H ChangeLog", "1234\n"],
+ ["log -1 1234", "Last push ouput\n"],
+ ["log 1234..HEAD --format=%H", "rev1\n"],
+ ["log -1 rev1 --format=\"%s\"", "Log text 1.\n"],
+ ["log -1 rev1 --format=\"%B\"", "Text\nLOG=YES\nBUG=v8:321\nText\n"],
+ ["log -1 rev1 --format=\"%an\"", "author1@chromium.org\n"],
+ [("commit -a -m \"Prepare push to trunk. "
+ "Now working on version 3.22.6.\""),
+ " 2 files changed\n",
+ CheckPreparePush],
+ ["cl upload -r \"reviewer@chromium.org\" --send-mail%s" % force_flag,
+ "done\n"],
+ ["cl dcommit -f", "Closing issue\n"],
+ ["svn fetch", "fetch result\n"],
+ ["checkout svn/bleeding_edge", ""],
+ [("log -1 --format=%H --grep=\"Prepare push to trunk. "
+ "Now working on version 3.22.6.\""),
+ "hash1\n"],
+ ["diff svn/trunk hash1", "patch content\n"],
+ ["checkout -b %s svn/trunk" % TEST_CONFIG[TRUNKBRANCH], ""],
+ ["apply --index --reject \"%s\"" % TEST_CONFIG[PATCH_FILE], ""],
+ ["add \"%s\"" % TEST_CONFIG[VERSION_FILE], ""],
+ ["commit -F \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], "", CheckSVNCommit],
+ ["svn dcommit 2>&1", "Some output\nCommitted r123456\nSome output\n"],
+ ["svn tag 3.22.5 -m \"Tagging version 3.22.5\"", ""],
+ ["status -s -uno", ""],
+ ["checkout master", ""],
+ ["pull", ""],
+ ["checkout -b v8-roll-123456", ""],
+ [("commit -am \"Update V8 to version 3.22.5.\n\n"
+ "TBR=reviewer@chromium.org\""),
+ ""],
+ ["cl upload --send-mail%s" % force_flag, ""],
+ ["checkout -f some_branch", ""],
+ ["branch -D %s" % TEST_CONFIG[TEMP_BRANCH], ""],
+ ["branch -D %s" % TEST_CONFIG[BRANCHNAME], ""],
+ ["branch -D %s" % TEST_CONFIG[TRUNKBRANCH], ""],
+ ])
+ self.ExpectReadline([
+ "Y", # Confirm last push.
+ "", # Open editor.
+ "Y", # Increment build number.
+ "reviewer@chromium.org", # V8 reviewer.
+ "LGTX", # Enter LGTM for V8 CL (wrong).
+ "LGTM", # Enter LGTM for V8 CL.
+ "Y", # Sanity check.
+ "reviewer@chromium.org", # Chromium reviewer.
+ ])
+ if force:
+ # TODO(machenbach): The lgtm for the prepare push is just temporary.
+ # There should be no user input in "force" mode.
+ self.ExpectReadline([
+ "LGTM", # Enter LGTM for V8 CL.
+ ])
+
+ class Options( object ):
+ pass
+
+ options = Options()
+ options.s = 0
+ options.l = None
+ options.f = force
+ options.r = "reviewer@chromium.org" if force else None
+ options.c = TEST_CONFIG[CHROMIUM]
+ RunPushToTrunk(TEST_CONFIG, options, self)
+
+ deps = FileToText(TEST_CONFIG[DEPS_FILE])
+ self.assertTrue(re.search("\"v8_revision\": \"123456\"", deps))
+
+ cl = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+ self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
+ self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
+ self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
+
+ # Note: The version file is on build number 5 again in the end of this test
+ # since the git command that merges to the bleeding edge branch is mocked
+ # out.
+
+ def testPushToTrunk(self):
+ self._PushToTrunk()
+
+ def testPushToTrunkForced(self):
+ self._PushToTrunk(force=True)
+
+ def testAutoRoll(self):
+ TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+
+ # TODO(machenbach): Get rid of the editor check in automatic mode.
+ os.environ["EDITOR"] = "vi"
+
+ self.ExpectReadURL([
+ ["https://v8-status.appspot.com/lkgr", Exception("Network problem")],
+ ["https://v8-status.appspot.com/lkgr", "100"],
+ ])
+
+ self.ExpectGit([
+ ["status -s -uno", ""],
+ ["status -s -b -uno", "## some_branch\n"],
+ ["svn fetch", ""],
+ ["svn log -1 --oneline", "r101 | Text"],
+ ])
+
+ # TODO(machenbach): Make a convenience wrapper for this.
+ class Options( object ):
+ pass
+
+ options = Options()
+ options.s = 0
+
+ auto_roll.RunAutoRoll(TEST_CONFIG, options, self)
+
+ self.assertEquals("100", self.MakeStep().Restore("lkgr"))
+ self.assertEquals("101", self.MakeStep().Restore("latest"))
+
+
+class SystemTest(unittest.TestCase):
+ def testReload(self):
+ step = MakeStep(step_class=PrepareChangeLog, number=0, state={}, config={},
+ options=None,
+ side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER)
+ body = step.Reload(
+"""------------------------------------------------------------------------
+r17997 | machenbach@chromium.org | 2013-11-22 11:04:04 +0100 (...) | 6 lines
+
+Prepare push to trunk. Now working on version 3.23.11.
+
+R=danno@chromium.org
+
+Review URL: https://codereview.chromium.org/83173002
+
+------------------------------------------------------------------------""")
+ self.assertEquals(
+"""Prepare push to trunk. Now working on version 3.23.11.
+
+R=danno@chromium.org
+
+Committed: https://code.google.com/p/v8/source/detail?r=17997""", body)
diff --git a/chromium/v8/tools/run-deopt-fuzzer.py b/chromium/v8/tools/run-deopt-fuzzer.py
index f8cc93748ab..292cf002f9b 100755
--- a/chromium/v8/tools/run-deopt-fuzzer.py
+++ b/chromium/v8/tools/run-deopt-fuzzer.py
@@ -358,7 +358,8 @@ def Execute(arch, mode, args, options, suites, workspace):
mode_flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
- options.extra_flags)
+ options.extra_flags,
+ False)
# Find available test suites and read test cases from them.
variables = {
@@ -367,6 +368,7 @@ def Execute(arch, mode, args, options, suites, workspace):
"system": utils.GuessOS(),
"isolates": options.isolates,
"deopt_fuzzer": True,
+ "no_i18n": False,
}
all_tests = []
num_tests = 0
diff --git a/chromium/v8/tools/run-tests.py b/chromium/v8/tools/run-tests.py
index 48682d4444e..2344f907ec8 100755
--- a/chromium/v8/tools/run-tests.py
+++ b/chromium/v8/tools/run-tests.py
@@ -28,6 +28,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import itertools
import multiprocessing
import optparse
import os
@@ -53,9 +54,13 @@ TIMEOUT_SCALEFACTOR = {"debug" : 4,
"release" : 1 }
# Use this to run several variants of the tests.
-VARIANT_FLAGS = [[],
- ["--stress-opt", "--always-opt"],
- ["--nocrankshaft"]]
+VARIANT_FLAGS = {
+ "default": [],
+ "stress": ["--stress-opt", "--always-opt"],
+ "nocrankshaft": ["--nocrankshaft"]}
+
+VARIANTS = ["default", "stress", "nocrankshaft"]
+
MODE_FLAGS = {
"debug" : ["--nobreak-on-abort", "--nodead-code-elimination",
"--nofold-constants", "--enable-slow-asserts",
@@ -97,6 +102,12 @@ def BuildOptions():
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="dontcare")
+ result.add_option("--slow-tests",
+ help="Regard slow tests (run|skip|dontcare)",
+ default="dontcare")
+ result.add_option("--pass-fail-tests",
+ help="Regard pass|fail tests (run|skip|dontcare)",
+ default="dontcare")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
@@ -112,6 +123,9 @@ def BuildOptions():
result.add_option("-m", "--mode",
help="The test modes in which to run (comma-separated)",
default="release,debug")
+ result.add_option("--no-i18n", "--noi18n",
+ help="Skip internationalization tests",
+ default=False, action="store_true")
result.add_option("--no-network", "--nonetwork",
help="Don't distribute tests on the network",
default=(utils.GuessOS() != "linux"),
@@ -122,6 +136,11 @@ def BuildOptions():
result.add_option("--no-stress", "--nostress",
help="Don't run crankshaft --always-opt --stress-op test",
default=False, dest="no_stress", action="store_true")
+ result.add_option("--no-variants", "--novariants",
+ help="Don't run any testing variants",
+ default=False, dest="no_variants", action="store_true")
+ result.add_option("--variants",
+ help="Comma-separated list of testing variants")
result.add_option("--outdir", help="Base directory with compile output",
default="out")
result.add_option("-p", "--progress",
@@ -161,12 +180,14 @@ def BuildOptions():
def ProcessOptions(options):
global VARIANT_FLAGS
+ global VARIANTS
# Architecture and mode related stuff.
if options.arch_and_mode:
- tokens = options.arch_and_mode.split(".")
- options.arch = tokens[0]
- options.mode = tokens[1]
+ options.arch_and_mode = [arch_and_mode.split(".")
+ for arch_and_mode in options.arch_and_mode.split(",")]
+ options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
+ options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
options.mode = options.mode.split(",")
for mode in options.mode:
if not mode.lower() in ["debug", "release"]:
@@ -180,6 +201,11 @@ def ProcessOptions(options):
print "Unknown architecture %s" % arch
return False
+ # Store the final configuration in arch_and_mode list. Don't overwrite
+ # predefined arch_and_mode since it is more expressive than arch and mode.
+ if not options.arch_and_mode:
+ options.arch_and_mode = itertools.product(options.arch, options.mode)
+
# Special processing of other options, sorted alphabetically.
if options.buildbot:
@@ -194,22 +220,49 @@ def ProcessOptions(options):
options.extra_flags = shlex.split(options.extra_flags)
if options.j == 0:
options.j = multiprocessing.cpu_count()
+
+ def excl(*args):
+ """Returns true if zero or one of multiple arguments are true."""
+ return reduce(lambda x, y: x + y, args) <= 1
+
+ if not excl(options.no_stress, options.stress_only, options.no_variants,
+ bool(options.variants)):
+ print("Use only one of --no-stress, --stress-only, --no-variants or "
+ "--variants.")
+ return False
if options.no_stress:
- VARIANT_FLAGS = [[], ["--nocrankshaft"]]
+ VARIANTS = ["default", "nocrankshaft"]
+ if options.no_variants:
+ VARIANTS = ["default"]
+ if options.stress_only:
+ VARIANTS = ["stress"]
+ if options.variants:
+ VARIANTS = options.variants.split(",")
+ if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
+ print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
+ return False
if not options.shell_dir:
if options.shell:
print "Warning: --shell is deprecated, use --shell-dir instead."
options.shell_dir = os.path.dirname(options.shell)
- if options.stress_only:
- VARIANT_FLAGS = [["--stress-opt", "--always-opt"]]
if options.valgrind:
run_valgrind = os.path.join("tools", "run-valgrind.py")
# This is OK for distributed running, so we don't need to set no_network.
options.command_prefix = (["python", "-u", run_valgrind] +
options.command_prefix)
- if not options.flaky_tests in ["run", "skip", "dontcare"]:
- print "Unknown flaky test mode %s" % options.flaky_tests
+ def CheckTestMode(name, option):
+ if not option in ["run", "skip", "dontcare"]:
+ print "Unknown %s mode %s" % (name, option)
+ return False
+ return True
+ if not CheckTestMode("flaky test", options.flaky_tests):
+ return False
+ if not CheckTestMode("slow test", options.slow_tests):
return False
+ if not CheckTestMode("pass|fail test", options.pass_fail_tests):
+ return False
+ if not options.no_i18n:
+ DEFAULT_TESTS.append("intl")
return True
@@ -247,14 +300,14 @@ def Main():
suite_paths = utils.GetSuitePaths(join(workspace, "test"))
if len(args) == 0:
- suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
+ suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
else:
args_suites = set()
for arg in args:
suite = arg.split(os.path.sep)[0]
if not suite in args_suites:
args_suites.add(suite)
- suite_paths = [ s for s in suite_paths if s in args_suites ]
+ suite_paths = [ s for s in args_suites if s in suite_paths ]
suites = []
for root in suite_paths:
@@ -267,10 +320,9 @@ def Main():
for s in suites:
s.DownloadData()
- for mode in options.mode:
- for arch in options.arch:
- code = Execute(arch, mode, args, options, suites, workspace)
- exit_code = exit_code or code
+ for (arch, mode) in options.arch_and_mode:
+ code = Execute(arch, mode, args, options, suites, workspace)
+ exit_code = exit_code or code
return exit_code
@@ -302,7 +354,8 @@ def Execute(arch, mode, args, options, suites, workspace):
mode_flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
- options.extra_flags)
+ options.extra_flags,
+ options.no_i18n)
# Find available test suites and read test cases from them.
variables = {
@@ -311,6 +364,7 @@ def Execute(arch, mode, args, options, suites, workspace):
"system": utils.GuessOS(),
"isolates": options.isolates,
"deopt_fuzzer": False,
+ "no_i18n": options.no_i18n,
}
all_tests = []
num_tests = 0
@@ -321,12 +375,15 @@ def Execute(arch, mode, args, options, suites, workspace):
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
- s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests)
+ s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
+ options.slow_tests, options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
- variant_flags = s.VariantFlags() or VARIANT_FLAGS
- s.tests = [ t.CopyAddingFlags(v) for t in s.tests for v in variant_flags ]
+ variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
+ s.tests = [ t.CopyAddingFlags(v)
+ for t in s.tests
+ for v in s.VariantFlags(t, variant_flags) ]
s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
num_tests += len(s.tests)
for t in s.tests:
diff --git a/chromium/v8/tools/sodium/index.html b/chromium/v8/tools/sodium/index.html
new file mode 100644
index 00000000000..cbfe49902d0
--- /dev/null
+++ b/chromium/v8/tools/sodium/index.html
@@ -0,0 +1,36 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>Sodium</title>
+ <meta charset="utf-8">
+ <link href="styles.css" rel="stylesheet" type="text/css">
+ </head>
+ <script src="https://google-code-prettify.googlecode.com/svn/loader/run_prettify.js"></script>
+ <script src="./sodium.js"></script>
+ <script type="text/javascript"></script>
+ <body>
+ <table style='top:5px; width:100%;'>
+ <tr><td id='table-header'>
+ <input type='file' id='log-file-id' />
+ <select id="kind-selector-id" onchange="Sodium.kindChangedHandler(this);"></select><br>
+ <select id="function-selector-id" onchange="Sodium.functionChangedHandler();"></select>
+ </td></tr>
+ <tr>
+ <table style='height:90%;'>
+ <tr>
+ <td id='asm-container'>
+ <div id='asm-text'></div>
+ </td>
+ <td id='source-container'>
+ <div id='source-text'><pre id='source-text-pre'/></div>
+ </td>
+ </tr>
+ </table>
+ </tr>
+ </table>
+ <script>
+ Sodium.buildFunctionKindSelector(document.getElementById('kind-selector-id'));
+ document.getElementById('log-file-id').addEventListener('change', Sodium.readLog, false);
+ </script>
+ </body>
+</html>
diff --git a/chromium/v8/tools/sodium/sodium.js b/chromium/v8/tools/sodium/sodium.js
new file mode 100644
index 00000000000..44475a177f7
--- /dev/null
+++ b/chromium/v8/tools/sodium/sodium.js
@@ -0,0 +1,409 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var Sodium = (function() {
+ "use strict";
+
+ var kinds = ["FUNCTION", "OPTIMIZED_FUNCTION", "STUB", "BUILTIN",
+ "LOAD_IC", "KEYED_LOAD_IC", "CALL_IC", "KEYED_CALL_IC",
+ "STORE_IC", "KEYED_STORE_IC", "BINARY_OP_IC", "COMPARE_IC",
+ "COMPARE_NIL_IC", "TO_BOOLEAN_IC"];
+ var kindsWithSource = {
+ 'FUNCTION': true,
+ 'OPTIMIZED_FUNCTION': true
+ };
+
+ var addressRegEx = "0x[0-9a-f]{8,16}";
+ var nameFinder = new RegExp("^name = (.+)$");
+ var kindFinder = new RegExp("^kind = (.+)$");
+ var firstPositionFinder = new RegExp("^source_position = (\\d+)$");
+ var separatorFilter = new RegExp("^--- (.)+ ---$");
+ var rawSourceFilter = new RegExp("^--- Raw source ---$");
+ var codeEndFinder = new RegExp("^--- End code ---$");
+ var whiteSpaceLineFinder = new RegExp("^\\W*$");
+ var instructionBeginFinder =
+ new RegExp("^Instructions\\W+\\(size = \\d+\\)");
+ var instructionFinder =
+ new RegExp("^\(" + addressRegEx + "\)\(\\W+\\d+\\W+.+\)");
+ var positionFinder =
+ new RegExp("^(" + addressRegEx + ")\\W+position\\W+\\((\\d+)\\)");
+ var addressFinder = new RegExp("\(" + addressRegEx + "\)");
+ var addressReplacer = new RegExp("\(" + addressRegEx + "\)", "gi");
+
+ var fileContent = "";
+ var selectedFunctionKind = "";
+ var currentFunctionKind = "";
+
+ var currentFunctionName = "";
+ var firstSourcePosition = 0;
+ var startAddress = "";
+ var readingSource = false;
+ var readingAsm = false;
+ var sourceBegin = -1;
+ var sourceEnd = -1;
+ var asmBegin = -1;
+ var asmEnd = -1;
+ var codeObjects = [];
+ var selectedAsm = null;
+ var selectedSource = null;
+ var selectedSourceClass = "";
+
+ function Code(name, kind, sourceBegin, sourceEnd, asmBegin, asmEnd,
+ firstSourcePosition, startAddress) {
+ this.name = name;
+ this.kind = kind;
+ this.sourceBegin = sourceBegin;
+ this.sourceEnd = sourceEnd;
+ this.asmBegin = asmBegin;
+ this.asmEnd = asmEnd;
+ this.firstSourcePosition = firstSourcePosition;
+ this.startAddress = startAddress;
+ }
+
+ function getCurrentCodeObject() {
+ var functionSelect = document.getElementById('function-selector-id');
+ return functionSelect.options[functionSelect.selectedIndex].codeObject;
+ }
+
+ function getCurrentSourceText() {
+ var code = getCurrentCodeObject();
+ if (code.sourceBegin == -1 || code.sourceEnd == -1) return "";
+ return fileContent.substring(code.sourceBegin, code.sourceEnd);
+ }
+
+ function getCurrentAsmText() {
+ var code = getCurrentCodeObject();
+ if (code.asmBegin == -1 || code.asmEnd == -1) return "";
+ return fileContent.substring(code.asmBegin, code.asmEnd);
+ }
+
+ function setKindByIndex(index) {
+ selectedFunctionKind = kinds[index];
+ }
+
+ function processLine(text, begin, end) {
+ var line = text.substring(begin, end);
+ if (readingSource) {
+ if (separatorFilter.exec(line) != null) {
+ readingSource = false;
+ } else {
+ if (sourceBegin == -1) {
+ sourceBegin = begin;
+ }
+ sourceEnd = end;
+ }
+ } else {
+ if (readingAsm) {
+ if (codeEndFinder.exec(line) != null) {
+ readingAsm = false;
+ asmEnd = begin;
+ var newCode =
+ new Code(currentFunctionName, currentFunctionKind,
+ sourceBegin, sourceEnd, asmBegin, asmEnd,
+ firstSourcePosition, startAddress);
+ codeObjects.push(newCode);
+ currentFunctionKind = null;
+ } else {
+ if (asmBegin == -1) {
+ matches = instructionBeginFinder.exec(line);
+ if (matches != null) {
+ asmBegin = begin;
+ }
+ }
+ if (startAddress == "") {
+ matches = instructionFinder.exec(line);
+ if (matches != null) {
+ startAddress = matches[1];
+ }
+ }
+ }
+ } else {
+ var matches = kindFinder.exec(line);
+ if (matches != null) {
+ currentFunctionKind = matches[1];
+ if (!kindsWithSource[currentFunctionKind]) {
+ sourceBegin = -1;
+ sourceEnd = -1;
+ }
+ } else if (currentFunctionKind != null) {
+ matches = nameFinder.exec(line);
+ if (matches != null) {
+ readingAsm = true;
+ asmBegin = -1;
+ currentFunctionName = matches[1];
+ }
+ } else if (rawSourceFilter.exec(line) != null) {
+ readingSource = true;
+ sourceBegin = -1;
+ } else {
+ var matches = firstPositionFinder.exec(line);
+ if (matches != null) {
+ firstSourcePosition = parseInt(matches[1]);
+ }
+ }
+ }
+ }
+ }
+
+ function processLines(source, size, processLine) {
+ var firstChar = 0;
+ for (var x = 0; x < size; x++) {
+ var curChar = source[x];
+ if (curChar == '\n' || curChar == '\r') {
+ processLine(source, firstChar, x);
+ firstChar = x + 1;
+ }
+ }
+ if (firstChar != size - 1) {
+ processLine(source, firstChar, size - 1);
+ }
+ }
+
+ function processFileContent() {
+ document.getElementById('source-text-pre').innerHTML = '';
+ sourceBegin = -1;
+ codeObjects = [];
+ processLines(fileContent, fileContent.length, processLine);
+ var functionSelectElement = document.getElementById('function-selector-id');
+ functionSelectElement.innerHTML = '';
+ var length = codeObjects.length;
+ for (var i = 0; i < codeObjects.length; ++i) {
+ var code = codeObjects[i];
+ if (code.kind == selectedFunctionKind) {
+ var optionElement = document.createElement("option");
+ optionElement.codeObject = code;
+ optionElement.text = code.name;
+ functionSelectElement.add(optionElement, null);
+ }
+ }
+ }
+
+ function asmClick(element) {
+ if (element == selectedAsm) return;
+ if (selectedAsm != null) {
+ selectedAsm.classList.remove('highlight-yellow');
+ }
+ selectedAsm = element;
+ selectedAsm.classList.add('highlight-yellow');
+
+ var pc = element.firstChild.innerText;
+ var sourceLine = null;
+ if (addressFinder.exec(pc) != null) {
+ var position = findSourcePosition(pc);
+ var line = findSourceLine(position);
+ sourceLine = document.getElementById('source-line-' + line);
+ var sourceLineTop = sourceLine.offsetTop;
+ makeSourcePosVisible(sourceLineTop);
+ }
+ if (selectedSource == sourceLine) return;
+ if (selectedSource != null) {
+ selectedSource.classList.remove('highlight-yellow');
+ selectedSource.classList.add(selectedSourceClass);
+ }
+ if (sourceLine != null) {
+ selectedSourceClass = sourceLine.classList[0];
+ sourceLine.classList.remove(selectedSourceClass);
+ sourceLine.classList.add('highlight-yellow');
+ }
+ selectedSource = sourceLine;
+ }
+
+ function makeContainerPosVisible(container, newTop) {
+ var height = container.offsetHeight;
+ var margin = Math.floor(height / 4);
+ if (newTop < container.scrollTop + margin) {
+ newTop -= margin;
+ if (newTop < 0) newTop = 0;
+ container.scrollTop = newTop;
+ return;
+ }
+ if (newTop > (container.scrollTop + 3 * margin)) {
+ newTop = newTop - 3 * margin;
+ container.scrollTop = newTop;
+ }
+ }
+
+ function makeAsmPosVisible(newTop) {
+ var asmContainer = document.getElementById('asm-container');
+ makeContainerPosVisible(asmContainer, newTop);
+ }
+
+ function makeSourcePosVisible(newTop) {
+ var sourceContainer = document.getElementById('source-container');
+ makeContainerPosVisible(sourceContainer, newTop);
+ }
+
+ function addressClick(element, event) {
+ event.stopPropagation();
+ var asmLineId = 'address-' + element.innerText;
+ var asmLineElement = document.getElementById(asmLineId);
+ if (asmLineElement != null) {
+ var asmLineTop = asmLineElement.parentNode.offsetTop;
+ makeAsmPosVisible(asmLineTop);
+ asmLineElement.classList.add('highlight-flash-blue');
+ window.setTimeout(function() {
+ asmLineElement.classList.remove('highlight-flash-blue');
+ }, 1500);
+ }
+ }
+
+ function prepareAsm(originalSource) {
+ var newSource = "";
+ var lineNumber = 1;
+ var functionProcessLine = function(text, begin, end) {
+ var currentLine = text.substring(begin, end);
+ var matches = instructionFinder.exec(currentLine);
+ var clickHandler = "";
+ if (matches != null) {
+ var restOfLine = matches[2];
+ restOfLine = restOfLine.replace(
+ addressReplacer,
+ '<span class="hover-underline" ' +
+ 'onclick="Sodium.addressClick(this, event);">\$1</span>');
+ currentLine = '<span id="address-' + matches[1] + '" >' +
+ matches[1] + '</span>' + restOfLine;
+ clickHandler = 'onclick=\'Sodium.asmClick(this)\' ';
+ } else if (whiteSpaceLineFinder.exec(currentLine)) {
+ currentLine = "<br>";
+ }
+ newSource += '<pre style=\'margin-bottom: -12px;\' ' + clickHandler + '>' +
+ currentLine + '</pre>';
+ lineNumber++;
+ }
+ processLines(originalSource, originalSource.length, functionProcessLine);
+ return newSource;
+ }
+
+ function findSourcePosition(pcToSearch) {
+ var position = 0;
+ var distance = 0x7FFFFFFF;
+ var pcToSearchOffset = parseInt(pcToSearch);
+ var processOneLine = function(text, begin, end) {
+ var currentLine = text.substring(begin, end);
+ var matches = positionFinder.exec(currentLine);
+ if (matches != null) {
+ var pcOffset = parseInt(matches[1]);
+ if (pcOffset <= pcToSearchOffset) {
+ var dist = pcToSearchOffset - pcOffset;
+ var pos = parseInt(matches[2]);
+ if ((dist < distance) || (dist == distance && pos > position)) {
+ position = pos;
+ distance = dist;
+ }
+ }
+ }
+ }
+ var asmText = getCurrentAsmText();
+ processLines(asmText, asmText.length, processOneLine);
+ var code = getCurrentCodeObject();
+ if (position == 0) return 0;
+ return position - code.firstSourcePosition;
+ }
+
+ function findSourceLine(position) {
+ if (position == 0) return 1;
+ var line = 0;
+ var processOneLine = function(text, begin, end) {
+ if (begin < position) {
+ line++;
+ }
+ }
+ var sourceText = getCurrentSourceText();
+ processLines(sourceText, sourceText.length, processOneLine);
+ return line;
+ }
+
+ function functionChangedHandler() {
+ var functionSelect = document.getElementById('function-selector-id');
+ var source = getCurrentSourceText();
+ var sourceDivElement = document.getElementById('source-text');
+ var code = getCurrentCodeObject();
+ var newHtml = "<pre class=\"prettyprint linenums\" id=\"source-text\">"
+ + 'function ' + code.name + source + "</pre>";
+ sourceDivElement.innerHTML = newHtml;
+ try {
+ // Wrap in try to work when offline.
+ PR.prettyPrint();
+ } catch (e) {
+ }
+ var sourceLineContainer = sourceDivElement.firstChild.firstChild;
+ var lineCount = sourceLineContainer.childElementCount;
+ var current = sourceLineContainer.firstChild;
+ for (var i = 1; i < lineCount; ++i) {
+ current.id = "source-line-" + i;
+ current = current.nextElementSibling;
+ }
+
+ var asm = getCurrentAsmText();
+ document.getElementById('asm-text').innerHTML = prepareAsm(asm);
+ }
+
+ function kindChangedHandler(element) {
+ setKindByIndex(element.selectedIndex);
+ processFileContent();
+ functionChangedHandler();
+ }
+
+ function readLog(evt) {
+ //Retrieve the first (and only!) File from the FileList object
+ var f = evt.target.files[0];
+ if (f) {
+ var r = new FileReader();
+ r.onload = function(e) {
+ var file = evt.target.files[0];
+ currentFunctionKind = "";
+ fileContent = e.target.result;
+ processFileContent();
+ functionChangedHandler();
+ }
+ r.readAsText(f);
+ } else {
+ alert("Failed to load file");
+ }
+ }
+
+ function buildFunctionKindSelector(kindSelectElement) {
+ for (var x = 0; x < kinds.length; ++x) {
+ var optionElement = document.createElement("option");
+ optionElement.value = x;
+ optionElement.text = kinds[x];
+ kindSelectElement.add(optionElement, null);
+ }
+ kindSelectElement.selectedIndex = 1;
+ setKindByIndex(1);
+ }
+
+ return {
+ buildFunctionKindSelector: buildFunctionKindSelector,
+ kindChangedHandler: kindChangedHandler,
+ functionChangedHandler: functionChangedHandler,
+ asmClick: asmClick,
+ addressClick: addressClick,
+ readLog: readLog
+ };
+
+})();
diff --git a/chromium/v8/tools/sodium/styles.css b/chromium/v8/tools/sodium/styles.css
new file mode 100755
index 00000000000..4f7d89ee786
--- /dev/null
+++ b/chromium/v8/tools/sodium/styles.css
@@ -0,0 +1,70 @@
+#table-header {
+ background-color: rgba(150, 150, 255, 0.4);
+}
+
+#asm-container {
+ background-color: rgba(200, 200, 255, 0.4);
+ position:absolute;
+ overflow:auto;
+ cursor:default;
+ width:50%;
+ height:92%;
+}
+
+#source-container {
+ position:absolute;
+ overflow:auto;
+ width:48%;
+ left:51%;
+ height:92%;
+}
+
+table {
+ border-collapse: collapse;
+}
+
+.hover-underline:hover {
+ text-decoration: underline;
+}
+
+.highlight-flash-blue {
+ -webkit-transition: all 1s ease;
+ background-color: rgba(50, 50, 245, 0.4);
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
+
+
+.highlight-green {
+ background-color: rgba(0, 255, 0, 0.4);
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
+
+.highlight-yellow {
+ background-color: rgba(255, 255, 0, 0.4);
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
+
+.highlight-gray {
+ background-color: rgba(128, 128, 128, 0.4);
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
+
+.highlight-red {
+ background-color: rgba(255, 0, 0, 0.4);
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+}
diff --git a/chromium/v8/tools/test-push-to-trunk.sh b/chromium/v8/tools/test-push-to-trunk.sh
new file mode 100755
index 00000000000..6c201e46288
--- /dev/null
+++ b/chromium/v8/tools/test-push-to-trunk.sh
@@ -0,0 +1,246 @@
+#!/bin/bash
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Tests the push-to-trunk.sh script. Needs to be run in V8 base dir:
+# ./tools/test-push-to-trunk.sh
+
+# TODO(machenbach): Check automatically if expectations match.
+# TODO(machenbach): Mock out version number retrieval.
+# TODO(machenbach): Allow multiple different test cases.
+# TODO(machenbach): Allow multi line mock output.
+# TODO(machenbach): Represent test expectations/mock output without an array
+# index increment.
+
+########## Stdin for push-to-trunk.sh
+
+# Confirm push to trunk commit ID
+INPUT[0]="Y"
+# Open editor
+INPUT[1]=""
+# Confirm increment version number
+INPUT[2]="Y"
+# Reviewer for V8 CL
+INPUT[3]="reviewer@chromium.org"
+# Enter LGTM for V8 CL
+INPUT[4]="LGTM"
+# Confirm checkout sanity
+INPUT[5]="Y"
+# Manually type in trunk revision
+INPUT[6]="12345"
+# Reviewer for Chromium CL
+INPUT[7]="reviewer@chromium.org"
+
+########## Expected commands and mock output
+
+EXP[0]="git status -s -uno"
+OUT[0]=""
+EXP[1]="git status -s -b -uno"
+OUT[1]="## some_branch"
+EXP[2]="git svn fetch"
+OUT[2]=""
+EXP[3]="git branch"
+OUT[3]="not the temp branch"
+EXP[4]="git checkout -b prepare-push-temporary-branch-created-by-script"
+OUT[4]=""
+EXP[5]="git branch"
+OUT[5]="not the branch"
+EXP[6]="git branch"
+OUT[6]="not the trunk branch"
+EXP[7]="git checkout -b prepare-push svn/bleeding_edge"
+OUT[7]=""
+EXP[8]="git log -1 --format=%H ChangeLog"
+OUT[8]="hash1"
+EXP[9]="git log -1 hash1"
+OUT[9]=""
+EXP[10]="git log hash1..HEAD --format=%H"
+OUT[10]="hash2"
+EXP[11]="git log -1 hash2 --format=\"%w(80,8,8)%s\""
+OUT[11]="Log line..."
+EXP[12]="git log -1 hash2 --format=\"%B\""
+OUT[12]="BUG=6789"
+EXP[13]="git log -1 hash2 --format=\"%w(80,8,8)(%an)\""
+OUT[13]=" (author@chromium.org)"
+EXP[14]="git commit -a -m \"Prepare push to trunk. Now working on version 3.4.5.\""
+OUT[14]=""
+EXP[15]="git cl upload -r reviewer@chromium.org --send-mail"
+OUT[15]=""
+EXP[16]="git cl dcommit"
+OUT[16]=""
+EXP[17]="git svn fetch"
+OUT[17]=""
+EXP[18]="git checkout svn/bleeding_edge"
+OUT[18]=""
+EXP[19]="git log -1 --format=%H --grep=Prepare push to trunk. Now working on version 3.4.5."
+OUT[19]="hash3"
+EXP[20]="git diff svn/trunk"
+OUT[20]="patch1"
+EXP[21]="git checkout -b trunk-push svn/trunk"
+OUT[21]=""
+EXP[22]="git apply --index --reject /tmp/v8-push-to-trunk-tempfile-patch"
+OUT[22]=""
+EXP[23]="git add src/version.cc"
+OUT[23]=""
+EXP[24]="git commit -F /tmp/v8-push-to-trunk-tempfile-commitmsg"
+OUT[24]=""
+EXP[25]="git svn dcommit"
+OUT[25]="r1234"
+EXP[26]="git svn tag 3.4.5 -m \"Tagging version 3.4.5\""
+OUT[26]=""
+EXP[27]="git status -s -uno"
+OUT[27]=""
+EXP[28]="git checkout master"
+OUT[28]=""
+EXP[29]="git pull"
+OUT[29]=""
+EXP[30]="git checkout -b v8-roll-12345"
+OUT[30]=""
+EXP[31]="git commit -am Update V8 to version 3.4.5."
+OUT[31]=""
+EXP[32]="git cl upload --send-mail"
+OUT[32]=""
+EXP[33]="git checkout -f some_branch"
+OUT[33]=""
+EXP[34]="git branch -D prepare-push-temporary-branch-created-by-script"
+OUT[34]=""
+EXP[35]="git branch -D prepare-push"
+OUT[35]=""
+EXP[36]="git branch -D trunk-push"
+OUT[36]=""
+
+########## Global temp files for test input/output
+
+export TEST_OUTPUT=$(mktemp)
+export INDEX=$(mktemp)
+export MOCK_OUTPUT=$(mktemp)
+export EXPECTED_COMMANDS=$(mktemp)
+
+########## Command index
+
+inc_index() {
+ local I="$(command cat $INDEX)"
+ let "I+=1"
+ echo "$I" > $INDEX
+ echo $I
+}
+
+echo "-1" > $INDEX
+export -f inc_index
+
+########## Mock output accessor
+
+get_mock_output() {
+ local I=$1
+ let "I+=1"
+ command sed "${I}q;d" $MOCK_OUTPUT
+}
+
+export -f get_mock_output
+
+for E in "${OUT[@]}"; do
+ echo $E
+done > $MOCK_OUTPUT
+
+########## Expected commands accessor
+
+get_expected_command() {
+ local I=$1
+ let "I+=1"
+ command sed "${I}q;d" $EXPECTED_COMMANDS
+}
+
+export -f get_expected_command
+
+for E in "${EXP[@]}"; do
+ echo $E
+done > $EXPECTED_COMMANDS
+
+########## Mock commands
+
+git() {
+ # All calls to git are mocked out. Expected calls and mock output are stored
+ # in the EXP/OUT arrays above.
+ local I=$(inc_index)
+ local OUT=$(get_mock_output $I)
+ local EXP=$(get_expected_command $I)
+ echo "#############################" >> $TEST_OUTPUT
+ echo "Com. Index: $I" >> $TEST_OUTPUT
+ echo "Expected: ${EXP}" >> $TEST_OUTPUT
+ echo "Actual: git $@" >> $TEST_OUTPUT
+ echo "Mock Output: ${OUT}" >> $TEST_OUTPUT
+ echo "${OUT}"
+}
+
+mv() {
+ echo "#############################" >> $TEST_OUTPUT
+ echo "mv $@" >> $TEST_OUTPUT
+}
+
+sed() {
+ # Only calls to sed * -i * are mocked out.
+ echo "#############################" >> $TEST_OUTPUT
+ local arr=$@
+ if [[ "${arr[@]}" =~ "-i" || "${arr[${#arr[@]}-1]}" == "-i" ]]; then
+ echo "sed $@" >> $TEST_OUTPUT
+ else
+ echo "sed $@" >> $TEST_OUTPUT
+ command sed "$@"
+ fi
+}
+
+editor() {
+ echo "#############################" >> $TEST_OUTPUT
+ echo "editor $@" >> $TEST_OUTPUT
+}
+
+cd() {
+ echo "#############################" >> $TEST_OUTPUT
+ echo "cd $@" >> $TEST_OUTPUT
+}
+
+export -f git
+export -f mv
+export -f sed
+export -f cd
+export -f editor
+export EDITOR=editor
+
+########## Invoke script with test stdin
+
+for i in "${INPUT[@]}"; do
+ echo $i
+done | tools/push-to-trunk.sh -c "path/to/chromium"
+
+echo "Collected output:"
+command cat $TEST_OUTPUT
+
+########## Clean up
+
+rm -rf $TEST_OUTPUT
+rm -rf $INDEX
+rm -rf $MOCK_OUTPUT
+rm -rf $EXPECTED_COMMANDS
diff --git a/chromium/v8/tools/testrunner/README b/chromium/v8/tools/testrunner/README
index 8f0c01f52ab..0771ef9dc27 100644
--- a/chromium/v8/tools/testrunner/README
+++ b/chromium/v8/tools/testrunner/README
@@ -87,12 +87,6 @@ This section is written from the point of view of the tools/ directory.
Implementation needed to run tests locally. Used by run-tests.py. Inspired by
(and partly copied verbatim from) the original test.py script.
-./testrunner/local/old_statusfile.py:
- Provides functionality to read an old-style <testsuite>.status file and
- convert it to new-style syntax. This can be removed once the new-style
- syntax becomes authoritative (and old-style syntax is no longer supported).
- ./status-file-converter.py provides a stand-alone interface to this.
-
./testrunner/objects/*:
A bunch of data container classes, used by the scripts in the various other
directories; serializable for transmission over the network.
diff --git a/chromium/v8/tools/testrunner/local/junit_output.py b/chromium/v8/tools/testrunner/local/junit_output.py
index 437adb17893..d2748febd98 100644
--- a/chromium/v8/tools/testrunner/local/junit_output.py
+++ b/chromium/v8/tools/testrunner/local/junit_output.py
@@ -46,4 +46,3 @@ class JUnitTestOutput:
def FinishAndWrite(self, file):
xml.ElementTree(self.root).write(file, "UTF-8")
-
diff --git a/chromium/v8/tools/testrunner/local/old_statusfile.py b/chromium/v8/tools/testrunner/local/old_statusfile.py
deleted file mode 100644
index d634e3ec955..00000000000
--- a/chromium/v8/tools/testrunner/local/old_statusfile.py
+++ /dev/null
@@ -1,462 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import cStringIO
-import re
-
-# These outcomes can occur in a TestCase's outcomes list:
-SKIP = 'SKIP'
-FAIL = 'FAIL'
-PASS = 'PASS'
-OKAY = 'OKAY'
-TIMEOUT = 'TIMEOUT'
-CRASH = 'CRASH'
-SLOW = 'SLOW'
-FLAKY = 'FLAKY'
-# These are just for the status files and are mapped below in DEFS:
-FAIL_OK = 'FAIL_OK'
-PASS_OR_FAIL = 'PASS_OR_FAIL'
-
-KEYWORDS = {SKIP: SKIP,
- FAIL: FAIL,
- PASS: PASS,
- OKAY: OKAY,
- TIMEOUT: TIMEOUT,
- CRASH: CRASH,
- SLOW: SLOW,
- FLAKY: FLAKY,
- FAIL_OK: FAIL_OK,
- PASS_OR_FAIL: PASS_OR_FAIL}
-
-class Expression(object):
- pass
-
-
-class Constant(Expression):
-
- def __init__(self, value):
- self.value = value
-
- def Evaluate(self, env, defs):
- return self.value
-
-
-class Variable(Expression):
-
- def __init__(self, name):
- self.name = name
-
- def GetOutcomes(self, env, defs):
- if self.name in env: return set([env[self.name]])
- else: return set([])
-
- def Evaluate(self, env, defs):
- return env[self.name]
-
- def __str__(self):
- return self.name
-
- def string(self, logical):
- return self.__str__()
-
-
-class Outcome(Expression):
-
- def __init__(self, name):
- self.name = name
-
- def GetOutcomes(self, env, defs):
- if self.name in defs:
- return defs[self.name].GetOutcomes(env, defs)
- else:
- return set([self.name])
-
- def __str__(self):
- if self.name in KEYWORDS:
- return "%s" % KEYWORDS[self.name]
- return "'%s'" % self.name
-
- def string(self, logical):
- if logical:
- return "%s" % self.name
- return self.__str__()
-
-
-class Operation(Expression):
-
- def __init__(self, left, op, right):
- self.left = left
- self.op = op
- self.right = right
-
- def Evaluate(self, env, defs):
- if self.op == '||' or self.op == ',':
- return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
- elif self.op == 'if':
- return False
- elif self.op == '==':
- return not self.left.GetOutcomes(env, defs).isdisjoint(self.right.GetOutcomes(env, defs))
- elif self.op == '!=':
- return self.left.GetOutcomes(env, defs).isdisjoint(self.right.GetOutcomes(env, defs))
- else:
- assert self.op == '&&'
- return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
-
- def GetOutcomes(self, env, defs):
- if self.op == '||' or self.op == ',':
- return self.left.GetOutcomes(env, defs) | self.right.GetOutcomes(env, defs)
- elif self.op == 'if':
- if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
- else: return set([])
- else:
- assert self.op == '&&'
- return self.left.GetOutcomes(env, defs) & self.right.GetOutcomes(env, defs)
-
- def __str__(self):
- return self.string(False)
-
- def string(self, logical=False):
- if self.op == 'if':
- return "['%s', %s]" % (self.right.string(True), self.left.string(logical))
- elif self.op == "||" or self.op == ",":
- if logical:
- return "%s or %s" % (self.left.string(True), self.right.string(True))
- else:
- return "%s, %s" % (self.left, self.right)
- elif self.op == "&&":
- return "%s and %s" % (self.left.string(True), self.right.string(True))
- return "%s %s %s" % (self.left.string(logical), self.op,
- self.right.string(logical))
-
-
-def IsAlpha(string):
- for char in string:
- if not (char.isalpha() or char.isdigit() or char == '_'):
- return False
- return True
-
-
-class Tokenizer(object):
- """A simple string tokenizer that chops expressions into variables,
- parens and operators"""
-
- def __init__(self, expr):
- self.index = 0
- self.expr = expr
- self.length = len(expr)
- self.tokens = None
-
- def Current(self, length=1):
- if not self.HasMore(length): return ""
- return self.expr[self.index:self.index + length]
-
- def HasMore(self, length=1):
- return self.index < self.length + (length - 1)
-
- def Advance(self, count=1):
- self.index = self.index + count
-
- def AddToken(self, token):
- self.tokens.append(token)
-
- def SkipSpaces(self):
- while self.HasMore() and self.Current().isspace():
- self.Advance()
-
- def Tokenize(self):
- self.tokens = [ ]
- while self.HasMore():
- self.SkipSpaces()
- if not self.HasMore():
- return None
- if self.Current() == '(':
- self.AddToken('(')
- self.Advance()
- elif self.Current() == ')':
- self.AddToken(')')
- self.Advance()
- elif self.Current() == '$':
- self.AddToken('$')
- self.Advance()
- elif self.Current() == ',':
- self.AddToken(',')
- self.Advance()
- elif IsAlpha(self.Current()):
- buf = ""
- while self.HasMore() and IsAlpha(self.Current()):
- buf += self.Current()
- self.Advance()
- self.AddToken(buf)
- elif self.Current(2) == '&&':
- self.AddToken('&&')
- self.Advance(2)
- elif self.Current(2) == '||':
- self.AddToken('||')
- self.Advance(2)
- elif self.Current(2) == '==':
- self.AddToken('==')
- self.Advance(2)
- elif self.Current(2) == '!=':
- self.AddToken('!=')
- self.Advance(2)
- else:
- return None
- return self.tokens
-
-
-class Scanner(object):
- """A simple scanner that can serve out tokens from a given list"""
-
- def __init__(self, tokens):
- self.tokens = tokens
- self.length = len(tokens)
- self.index = 0
-
- def HasMore(self):
- return self.index < self.length
-
- def Current(self):
- return self.tokens[self.index]
-
- def Advance(self):
- self.index = self.index + 1
-
-
-def ParseAtomicExpression(scan):
- if scan.Current() == "true":
- scan.Advance()
- return Constant(True)
- elif scan.Current() == "false":
- scan.Advance()
- return Constant(False)
- elif IsAlpha(scan.Current()):
- name = scan.Current()
- scan.Advance()
- return Outcome(name)
- elif scan.Current() == '$':
- scan.Advance()
- if not IsAlpha(scan.Current()):
- return None
- name = scan.Current()
- scan.Advance()
- return Variable(name.lower())
- elif scan.Current() == '(':
- scan.Advance()
- result = ParseLogicalExpression(scan)
- if (not result) or (scan.Current() != ')'):
- return None
- scan.Advance()
- return result
- else:
- return None
-
-
-BINARIES = ['==', '!=']
-def ParseOperatorExpression(scan):
- left = ParseAtomicExpression(scan)
- if not left: return None
- while scan.HasMore() and (scan.Current() in BINARIES):
- op = scan.Current()
- scan.Advance()
- right = ParseOperatorExpression(scan)
- if not right:
- return None
- left = Operation(left, op, right)
- return left
-
-
-def ParseConditionalExpression(scan):
- left = ParseOperatorExpression(scan)
- if not left: return None
- while scan.HasMore() and (scan.Current() == 'if'):
- scan.Advance()
- right = ParseOperatorExpression(scan)
- if not right:
- return None
- left = Operation(left, 'if', right)
- return left
-
-
-LOGICALS = ["&&", "||", ","]
-def ParseLogicalExpression(scan):
- left = ParseConditionalExpression(scan)
- if not left: return None
- while scan.HasMore() and (scan.Current() in LOGICALS):
- op = scan.Current()
- scan.Advance()
- right = ParseConditionalExpression(scan)
- if not right:
- return None
- left = Operation(left, op, right)
- return left
-
-
-def ParseCondition(expr):
- """Parses a logical expression into an Expression object"""
- tokens = Tokenizer(expr).Tokenize()
- if not tokens:
- print "Malformed expression: '%s'" % expr
- return None
- scan = Scanner(tokens)
- ast = ParseLogicalExpression(scan)
- if not ast:
- print "Malformed expression: '%s'" % expr
- return None
- if scan.HasMore():
- print "Malformed expression: '%s'" % expr
- return None
- return ast
-
-
-class Section(object):
- """A section of the configuration file. Sections are enabled or
- disabled prior to running the tests, based on their conditions"""
-
- def __init__(self, condition):
- self.condition = condition
- self.rules = [ ]
-
- def AddRule(self, rule):
- self.rules.append(rule)
-
-
-class Rule(object):
- """A single rule that specifies the expected outcome for a single
- test."""
-
- def __init__(self, raw_path, path, value):
- self.raw_path = raw_path
- self.path = path
- self.value = value
-
- def GetOutcomes(self, env, defs):
- return self.value.GetOutcomes(env, defs)
-
- def Contains(self, path):
- if len(self.path) > len(path):
- return False
- for i in xrange(len(self.path)):
- if not self.path[i].match(path[i]):
- return False
- return True
-
-
-HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
-RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
-DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
-PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
-
-
-class ConvertNotation(object):
- def __init__(self, path):
- self.path = path
- self.indent = ""
- self.comment = []
- self.init = False
- self.section = False
- self.out = cStringIO.StringIO()
-
- def OpenGlobal(self):
- if self.init: return
- self.WriteComment()
- print >> self.out, "["
- self.init = True
-
- def CloseGlobal(self):
- if not self.init: self.OpenGlobal()
- print >> self.out, "]"
- self.init = False
-
- def OpenSection(self, condition="ALWAYS"):
- if self.section: return
- self.OpenGlobal()
- if type(condition) != str:
- condition = "'%s'" % condition.string(True)
- print >> self.out, "%s[%s, {" % (self.indent, condition)
- self.indent += " " * 2
- self.section = condition
-
- def CloseSection(self):
- if not self.section: return
- self.indent = self.indent[:-2]
- print >> self.out, "%s}], # %s" % (self.indent, self.section)
- self.section = False
-
- def WriteComment(self):
- if not self.comment: return
- for c in self.comment:
- if len(c.strip()) == 0:
- print >> self.out, ""
- else:
- print >> self.out, "%s%s" % (self.indent, c),
- self.comment = []
-
- def GetOutput(self):
- with open(self.path) as f:
- for line in f:
- if line[0] == '#':
- self.comment += [line]
- continue
- if len(line.strip()) == 0:
- self.comment += [line]
- continue
- header_match = HEADER_PATTERN.match(line)
- if header_match:
- condition = ParseCondition(header_match.group(1).strip())
- self.CloseSection()
- self.WriteComment()
- self.OpenSection(condition)
- continue
- rule_match = RULE_PATTERN.match(line)
- if rule_match:
- self.OpenSection()
- self.WriteComment()
- path = rule_match.group(1).strip()
- value_str = rule_match.group(2).strip()
- comment = ""
- if '#' in value_str:
- pos = value_str.find('#')
- comment = " %s" % value_str[pos:].strip()
- value_str = value_str[:pos].strip()
- value = ParseCondition(value_str)
- print >> self.out, ("%s'%s': [%s],%s" %
- (self.indent, path, value, comment))
- continue
- def_match = DEF_PATTERN.match(line)
- if def_match:
- # Custom definitions are deprecated.
- continue
- prefix_match = PREFIX_PATTERN.match(line)
- if prefix_match:
- continue
- print "Malformed line: '%s'." % line
- self.CloseSection()
- self.CloseGlobal()
- result = self.out.getvalue()
- self.out.close()
- return result
diff --git a/chromium/v8/tools/testrunner/local/progress.py b/chromium/v8/tools/testrunner/local/progress.py
index a663be23eba..03116ee768d 100644
--- a/chromium/v8/tools/testrunner/local/progress.py
+++ b/chromium/v8/tools/testrunner/local/progress.py
@@ -89,6 +89,7 @@ class SimpleProgressIndicator(ProgressIndicator):
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(self.runner.GetCommand(failed))
if failed.output.HasCrashed():
+ print "exit code: %d" % failed.output.exit_code
print "--- CRASHED ---"
if failed.output.HasTimedOut():
print "--- TIMEOUT ---"
diff --git a/chromium/v8/tools/testrunner/local/statusfile.py b/chromium/v8/tools/testrunner/local/statusfile.py
index 1d30fe3d3c1..e290122fb79 100644
--- a/chromium/v8/tools/testrunner/local/statusfile.py
+++ b/chromium/v8/tools/testrunner/local/statusfile.py
@@ -26,14 +26,6 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# These imports are required for the on-demand conversion from
-# old to new status file format.
-from os.path import exists
-from os.path import getmtime
-
-from . import old_statusfile
-
-
# These outcomes can occur in a TestCase's outcomes list:
SKIP = "SKIP"
FAIL = "FAIL"
@@ -43,6 +35,7 @@ TIMEOUT = "TIMEOUT"
CRASH = "CRASH"
SLOW = "SLOW"
FLAKY = "FLAKY"
+NO_VARIANTS = "NO_VARIANTS"
# These are just for the status files and are mapped below in DEFS:
FAIL_OK = "FAIL_OK"
PASS_OR_FAIL = "PASS_OR_FAIL"
@@ -51,7 +44,7 @@ ALWAYS = "ALWAYS"
KEYWORDS = {}
for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK,
- PASS_OR_FAIL, ALWAYS]:
+ NO_VARIANTS, PASS_OR_FAIL, ALWAYS]:
KEYWORDS[key] = key
DEFS = {FAIL_OK: [FAIL, OKAY],
@@ -60,12 +53,21 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "android_arm", "android_ia32", "arm", "ia32",
- "mipsel", "x64", "nacl_ia32", "nacl_x64"]:
+ "mipsel", "x64", "nacl_ia32", "nacl_x64", "macos", "windows",
+ "linux"]:
VARIABLES[var] = var
def DoSkip(outcomes):
- return SKIP in outcomes or SLOW in outcomes
+ return SKIP in outcomes
+
+
+def IsSlow(outcomes):
+ return SLOW in outcomes
+
+
+def OnlyStandardVariant(outcomes):
+ return NO_VARIANTS in outcomes
def IsFlaky(outcomes):
@@ -116,18 +118,6 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables):
def ReadStatusFile(path, variables):
- # As long as the old-format .status files are authoritative, just
- # create the converted version on demand and cache it to speed up
- # subsequent runs.
- if path.endswith(".status"):
- newpath = path + "2"
- if not exists(newpath) or getmtime(newpath) < getmtime(path):
- print "Converting status file."
- converted = old_statusfile.ConvertNotation(path).GetOutput()
- with open(newpath, 'w') as f:
- f.write(converted)
- path = newpath
-
with open(path) as f:
global KEYWORDS
contents = eval(f.read(), KEYWORDS)
diff --git a/chromium/v8/tools/testrunner/local/testsuite.py b/chromium/v8/tools/testrunner/local/testsuite.py
index b0372e7f739..ff51196a563 100644
--- a/chromium/v8/tools/testrunner/local/testsuite.py
+++ b/chromium/v8/tools/testrunner/local/testsuite.py
@@ -74,8 +74,10 @@ class TestSuite(object):
def ListTests(self, context):
raise NotImplementedError
- def VariantFlags(self):
- return None
+ def VariantFlags(self, testcase, default_flags):
+ if testcase.outcomes and statusfile.OnlyStandardVariant(testcase.outcomes):
+ return [[]]
+ return default_flags
def DownloadData(self):
pass
@@ -91,11 +93,24 @@ class TestSuite(object):
def _FilterFlaky(flaky, mode):
return (mode == "run" and not flaky) or (mode == "skip" and flaky)
- def FilterTestCasesByStatus(self, warn_unused_rules, flaky_tests="dontcare"):
+ @staticmethod
+ def _FilterSlow(slow, mode):
+ return (mode == "run" and not slow) or (mode == "skip" and slow)
+
+ @staticmethod
+ def _FilterPassFail(pass_fail, mode):
+ return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
+
+ def FilterTestCasesByStatus(self, warn_unused_rules,
+ flaky_tests="dontcare",
+ slow_tests="dontcare",
+ pass_fail_tests="dontcare"):
filtered = []
used_rules = set()
for t in self.tests:
flaky = False
+ slow = False
+ pass_fail = False
testname = self.CommonTestName(t)
if testname in self.rules:
used_rules.add(testname)
@@ -105,6 +120,8 @@ class TestSuite(object):
if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|.
flaky = statusfile.IsFlaky(t.outcomes)
+ slow = statusfile.IsSlow(t.outcomes)
+ pass_fail = statusfile.IsPassOrFail(t.outcomes)
skip = False
for rule in self.wildcards:
assert rule[-1] == '*'
@@ -115,7 +132,11 @@ class TestSuite(object):
skip = True
break # "for rule in self.wildcards"
flaky = flaky or statusfile.IsFlaky(t.outcomes)
- if skip or self._FilterFlaky(flaky, flaky_tests):
+ slow = slow or statusfile.IsSlow(t.outcomes)
+ pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
+ if (skip or self._FilterFlaky(flaky, flaky_tests)
+ or self._FilterSlow(slow, slow_tests)
+ or self._FilterPassFail(pass_fail, pass_fail_tests)):
continue # "for t in self.tests"
filtered.append(t)
self.tests = filtered
diff --git a/chromium/v8/tools/testrunner/local/utils.py b/chromium/v8/tools/testrunner/local/utils.py
index b7caa121f3a..61ee7dac67b 100644
--- a/chromium/v8/tools/testrunner/local/utils.py
+++ b/chromium/v8/tools/testrunner/local/utils.py
@@ -65,7 +65,7 @@ def GuessOS():
elif system == 'Windows' or system == 'Microsoft':
# On Windows Vista platform.system() can return 'Microsoft' with some
# versions of Python, see http://bugs.python.org/issue1082
- return 'win32'
+ return 'windows'
elif system == 'FreeBSD':
return 'freebsd'
elif system == 'OpenBSD':
@@ -105,4 +105,4 @@ def GuessWordsize():
def IsWindows():
- return GuessOS() == 'win32'
+ return GuessOS() == 'windows'
diff --git a/chromium/v8/tools/testrunner/objects/context.py b/chromium/v8/tools/testrunner/objects/context.py
index 3ea215a708d..1f525b76b31 100644
--- a/chromium/v8/tools/testrunner/objects/context.py
+++ b/chromium/v8/tools/testrunner/objects/context.py
@@ -28,7 +28,7 @@
class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
- isolates, command_prefix, extra_flags):
+ isolates, command_prefix, extra_flags, noi18n):
self.arch = arch
self.mode = mode
self.shell_dir = shell_dir
@@ -38,13 +38,14 @@ class Context():
self.isolates = isolates
self.command_prefix = command_prefix
self.extra_flags = extra_flags
+ self.noi18n = noi18n
def Pack(self):
return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
- self.command_prefix, self.extra_flags]
+ self.command_prefix, self.extra_flags, self.noi18n]
@staticmethod
def Unpack(packed):
# For the order of the fields, refer to Pack() above.
return Context(packed[0], packed[1], None, packed[2], False,
- packed[3], packed[4], packed[5], packed[6])
+ packed[3], packed[4], packed[5], packed[6], packed[7])
diff --git a/chromium/v8/tools/tickprocessor.js b/chromium/v8/tools/tickprocessor.js
index ff5254172fd..187e647033f 100644
--- a/chromium/v8/tools/tickprocessor.js
+++ b/chromium/v8/tools/tickprocessor.js
@@ -950,4 +950,3 @@ ArgumentsProcessor.prototype.printUsageAndExit = function() {
}
quit(2);
};
-
diff --git a/chromium/v8/tools/v8heapconst.py b/chromium/v8/tools/v8heapconst.py
index 1f72c372361..06141c2f96a 100644
--- a/chromium/v8/tools/v8heapconst.py
+++ b/chromium/v8/tools/v8heapconst.py
@@ -71,53 +71,54 @@ INSTANCE_TYPES = {
144: "EXTERNAL_FLOAT_ARRAY_TYPE",
145: "EXTERNAL_DOUBLE_ARRAY_TYPE",
146: "EXTERNAL_PIXEL_ARRAY_TYPE",
- 148: "FILLER_TYPE",
- 149: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
- 150: "DECLARED_ACCESSOR_INFO_TYPE",
- 151: "EXECUTABLE_ACCESSOR_INFO_TYPE",
- 152: "ACCESSOR_PAIR_TYPE",
- 153: "ACCESS_CHECK_INFO_TYPE",
- 154: "INTERCEPTOR_INFO_TYPE",
- 155: "CALL_HANDLER_INFO_TYPE",
- 156: "FUNCTION_TEMPLATE_INFO_TYPE",
- 157: "OBJECT_TEMPLATE_INFO_TYPE",
- 158: "SIGNATURE_INFO_TYPE",
- 159: "TYPE_SWITCH_INFO_TYPE",
- 161: "ALLOCATION_MEMENTO_TYPE",
- 160: "ALLOCATION_SITE_TYPE",
- 162: "SCRIPT_TYPE",
- 163: "CODE_CACHE_TYPE",
- 164: "POLYMORPHIC_CODE_CACHE_TYPE",
- 165: "TYPE_FEEDBACK_INFO_TYPE",
- 166: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 167: "BOX_TYPE",
- 170: "FIXED_ARRAY_TYPE",
+ 149: "FILLER_TYPE",
+ 150: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
+ 151: "DECLARED_ACCESSOR_INFO_TYPE",
+ 152: "EXECUTABLE_ACCESSOR_INFO_TYPE",
+ 153: "ACCESSOR_PAIR_TYPE",
+ 154: "ACCESS_CHECK_INFO_TYPE",
+ 155: "INTERCEPTOR_INFO_TYPE",
+ 156: "CALL_HANDLER_INFO_TYPE",
+ 157: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 158: "OBJECT_TEMPLATE_INFO_TYPE",
+ 159: "SIGNATURE_INFO_TYPE",
+ 160: "TYPE_SWITCH_INFO_TYPE",
+ 162: "ALLOCATION_MEMENTO_TYPE",
+ 161: "ALLOCATION_SITE_TYPE",
+ 163: "SCRIPT_TYPE",
+ 164: "CODE_CACHE_TYPE",
+ 165: "POLYMORPHIC_CODE_CACHE_TYPE",
+ 166: "TYPE_FEEDBACK_INFO_TYPE",
+ 167: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 168: "BOX_TYPE",
+ 171: "FIXED_ARRAY_TYPE",
147: "FIXED_DOUBLE_ARRAY_TYPE",
- 171: "SHARED_FUNCTION_INFO_TYPE",
- 172: "JS_MESSAGE_OBJECT_TYPE",
- 175: "JS_VALUE_TYPE",
- 176: "JS_DATE_TYPE",
- 177: "JS_OBJECT_TYPE",
- 178: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 179: "JS_GENERATOR_OBJECT_TYPE",
- 180: "JS_MODULE_TYPE",
- 181: "JS_GLOBAL_OBJECT_TYPE",
- 182: "JS_BUILTINS_OBJECT_TYPE",
- 183: "JS_GLOBAL_PROXY_TYPE",
- 184: "JS_ARRAY_TYPE",
- 185: "JS_ARRAY_BUFFER_TYPE",
- 186: "JS_TYPED_ARRAY_TYPE",
- 187: "JS_DATA_VIEW_TYPE",
- 174: "JS_PROXY_TYPE",
- 188: "JS_SET_TYPE",
- 189: "JS_MAP_TYPE",
- 190: "JS_WEAK_MAP_TYPE",
- 191: "JS_WEAK_SET_TYPE",
- 192: "JS_REGEXP_TYPE",
- 193: "JS_FUNCTION_TYPE",
- 173: "JS_FUNCTION_PROXY_TYPE",
- 168: "DEBUG_INFO_TYPE",
- 169: "BREAK_POINT_INFO_TYPE",
+ 148: "CONSTANT_POOL_ARRAY_TYPE",
+ 172: "SHARED_FUNCTION_INFO_TYPE",
+ 173: "JS_MESSAGE_OBJECT_TYPE",
+ 176: "JS_VALUE_TYPE",
+ 177: "JS_DATE_TYPE",
+ 178: "JS_OBJECT_TYPE",
+ 179: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 180: "JS_GENERATOR_OBJECT_TYPE",
+ 181: "JS_MODULE_TYPE",
+ 182: "JS_GLOBAL_OBJECT_TYPE",
+ 183: "JS_BUILTINS_OBJECT_TYPE",
+ 184: "JS_GLOBAL_PROXY_TYPE",
+ 185: "JS_ARRAY_TYPE",
+ 186: "JS_ARRAY_BUFFER_TYPE",
+ 187: "JS_TYPED_ARRAY_TYPE",
+ 188: "JS_DATA_VIEW_TYPE",
+ 175: "JS_PROXY_TYPE",
+ 189: "JS_SET_TYPE",
+ 190: "JS_MAP_TYPE",
+ 191: "JS_WEAK_MAP_TYPE",
+ 192: "JS_WEAK_SET_TYPE",
+ 193: "JS_REGEXP_TYPE",
+ 194: "JS_FUNCTION_TYPE",
+ 174: "JS_FUNCTION_PROXY_TYPE",
+ 169: "DEBUG_INFO_TYPE",
+ 170: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
@@ -126,85 +127,86 @@ KNOWN_MAPS = {
0x080a9: (129, "MetaMap"),
0x080d1: (131, "OddballMap"),
0x080f9: (4, "AsciiInternalizedStringMap"),
- 0x08121: (170, "FixedArrayMap"),
+ 0x08121: (171, "FixedArrayMap"),
0x08149: (134, "HeapNumberMap"),
0x08171: (137, "FreeSpaceMap"),
- 0x08199: (148, "OnePointerFillerMap"),
- 0x081c1: (148, "TwoPointerFillerMap"),
+ 0x08199: (149, "OnePointerFillerMap"),
+ 0x081c1: (149, "TwoPointerFillerMap"),
0x081e9: (132, "CellMap"),
0x08211: (133, "GlobalPropertyCellMap"),
- 0x08239: (171, "SharedFunctionInfoMap"),
- 0x08261: (170, "NativeContextMap"),
+ 0x08239: (172, "SharedFunctionInfoMap"),
+ 0x08261: (171, "NativeContextMap"),
0x08289: (130, "CodeMap"),
- 0x082b1: (170, "ScopeInfoMap"),
- 0x082d9: (170, "FixedCOWArrayMap"),
+ 0x082b1: (171, "ScopeInfoMap"),
+ 0x082d9: (171, "FixedCOWArrayMap"),
0x08301: (147, "FixedDoubleArrayMap"),
- 0x08329: (170, "HashTableMap"),
- 0x08351: (128, "SymbolMap"),
- 0x08379: (64, "StringMap"),
- 0x083a1: (68, "AsciiStringMap"),
- 0x083c9: (65, "ConsStringMap"),
- 0x083f1: (69, "ConsAsciiStringMap"),
- 0x08419: (67, "SlicedStringMap"),
- 0x08441: (71, "SlicedAsciiStringMap"),
- 0x08469: (66, "ExternalStringMap"),
- 0x08491: (74, "ExternalStringWithOneByteDataMap"),
- 0x084b9: (70, "ExternalAsciiStringMap"),
- 0x084e1: (82, "ShortExternalStringMap"),
- 0x08509: (90, "ShortExternalStringWithOneByteDataMap"),
- 0x08531: (0, "InternalizedStringMap"),
- 0x08559: (1, "ConsInternalizedStringMap"),
- 0x08581: (5, "ConsAsciiInternalizedStringMap"),
- 0x085a9: (2, "ExternalInternalizedStringMap"),
- 0x085d1: (10, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x085f9: (6, "ExternalAsciiInternalizedStringMap"),
- 0x08621: (18, "ShortExternalInternalizedStringMap"),
- 0x08649: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x08671: (22, "ShortExternalAsciiInternalizedStringMap"),
- 0x08699: (86, "ShortExternalAsciiStringMap"),
- 0x086c1: (64, "UndetectableStringMap"),
- 0x086e9: (68, "UndetectableAsciiStringMap"),
- 0x08711: (138, "ExternalByteArrayMap"),
- 0x08739: (139, "ExternalUnsignedByteArrayMap"),
- 0x08761: (140, "ExternalShortArrayMap"),
- 0x08789: (141, "ExternalUnsignedShortArrayMap"),
- 0x087b1: (142, "ExternalIntArrayMap"),
- 0x087d9: (143, "ExternalUnsignedIntArrayMap"),
- 0x08801: (144, "ExternalFloatArrayMap"),
- 0x08829: (145, "ExternalDoubleArrayMap"),
- 0x08851: (146, "ExternalPixelArrayMap"),
- 0x08879: (170, "NonStrictArgumentsElementsMap"),
- 0x088a1: (170, "FunctionContextMap"),
- 0x088c9: (170, "CatchContextMap"),
- 0x088f1: (170, "WithContextMap"),
- 0x08919: (170, "BlockContextMap"),
- 0x08941: (170, "ModuleContextMap"),
- 0x08969: (170, "GlobalContextMap"),
- 0x08991: (172, "JSMessageObjectMap"),
- 0x089b9: (135, "ForeignMap"),
- 0x089e1: (177, "NeanderMap"),
- 0x08a09: (161, "AllocationMementoMap"),
- 0x08a31: (160, "AllocationSiteMap"),
- 0x08a59: (164, "PolymorphicCodeCacheMap"),
- 0x08a81: (162, "ScriptMap"),
- 0x08ad1: (177, "ExternalMap"),
- 0x08af9: (167, "BoxMap"),
- 0x08b21: (149, "DeclaredAccessorDescriptorMap"),
- 0x08b49: (150, "DeclaredAccessorInfoMap"),
- 0x08b71: (151, "ExecutableAccessorInfoMap"),
- 0x08b99: (152, "AccessorPairMap"),
- 0x08bc1: (153, "AccessCheckInfoMap"),
- 0x08be9: (154, "InterceptorInfoMap"),
- 0x08c11: (155, "CallHandlerInfoMap"),
- 0x08c39: (156, "FunctionTemplateInfoMap"),
- 0x08c61: (157, "ObjectTemplateInfoMap"),
- 0x08c89: (158, "SignatureInfoMap"),
- 0x08cb1: (159, "TypeSwitchInfoMap"),
- 0x08cd9: (163, "CodeCacheMap"),
- 0x08d01: (165, "TypeFeedbackInfoMap"),
- 0x08d29: (166, "AliasedArgumentsEntryMap"),
- 0x08d51: (168, "DebugInfoMap"),
- 0x08d79: (169, "BreakPointInfoMap"),
+ 0x08329: (148, "ConstantPoolArrayMap"),
+ 0x08351: (171, "HashTableMap"),
+ 0x08379: (128, "SymbolMap"),
+ 0x083a1: (64, "StringMap"),
+ 0x083c9: (68, "AsciiStringMap"),
+ 0x083f1: (65, "ConsStringMap"),
+ 0x08419: (69, "ConsAsciiStringMap"),
+ 0x08441: (67, "SlicedStringMap"),
+ 0x08469: (71, "SlicedAsciiStringMap"),
+ 0x08491: (66, "ExternalStringMap"),
+ 0x084b9: (74, "ExternalStringWithOneByteDataMap"),
+ 0x084e1: (70, "ExternalAsciiStringMap"),
+ 0x08509: (82, "ShortExternalStringMap"),
+ 0x08531: (90, "ShortExternalStringWithOneByteDataMap"),
+ 0x08559: (0, "InternalizedStringMap"),
+ 0x08581: (1, "ConsInternalizedStringMap"),
+ 0x085a9: (5, "ConsAsciiInternalizedStringMap"),
+ 0x085d1: (2, "ExternalInternalizedStringMap"),
+ 0x085f9: (10, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x08621: (6, "ExternalAsciiInternalizedStringMap"),
+ 0x08649: (18, "ShortExternalInternalizedStringMap"),
+ 0x08671: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x08699: (22, "ShortExternalAsciiInternalizedStringMap"),
+ 0x086c1: (86, "ShortExternalAsciiStringMap"),
+ 0x086e9: (64, "UndetectableStringMap"),
+ 0x08711: (68, "UndetectableAsciiStringMap"),
+ 0x08739: (138, "ExternalByteArrayMap"),
+ 0x08761: (139, "ExternalUnsignedByteArrayMap"),
+ 0x08789: (140, "ExternalShortArrayMap"),
+ 0x087b1: (141, "ExternalUnsignedShortArrayMap"),
+ 0x087d9: (142, "ExternalIntArrayMap"),
+ 0x08801: (143, "ExternalUnsignedIntArrayMap"),
+ 0x08829: (144, "ExternalFloatArrayMap"),
+ 0x08851: (145, "ExternalDoubleArrayMap"),
+ 0x08879: (146, "ExternalPixelArrayMap"),
+ 0x088a1: (171, "NonStrictArgumentsElementsMap"),
+ 0x088c9: (171, "FunctionContextMap"),
+ 0x088f1: (171, "CatchContextMap"),
+ 0x08919: (171, "WithContextMap"),
+ 0x08941: (171, "BlockContextMap"),
+ 0x08969: (171, "ModuleContextMap"),
+ 0x08991: (171, "GlobalContextMap"),
+ 0x089b9: (173, "JSMessageObjectMap"),
+ 0x089e1: (135, "ForeignMap"),
+ 0x08a09: (178, "NeanderMap"),
+ 0x08a31: (162, "AllocationMementoMap"),
+ 0x08a59: (161, "AllocationSiteMap"),
+ 0x08a81: (165, "PolymorphicCodeCacheMap"),
+ 0x08aa9: (163, "ScriptMap"),
+ 0x08af9: (178, "ExternalMap"),
+ 0x08b21: (168, "BoxMap"),
+ 0x08b49: (150, "DeclaredAccessorDescriptorMap"),
+ 0x08b71: (151, "DeclaredAccessorInfoMap"),
+ 0x08b99: (152, "ExecutableAccessorInfoMap"),
+ 0x08bc1: (153, "AccessorPairMap"),
+ 0x08be9: (154, "AccessCheckInfoMap"),
+ 0x08c11: (155, "InterceptorInfoMap"),
+ 0x08c39: (156, "CallHandlerInfoMap"),
+ 0x08c61: (157, "FunctionTemplateInfoMap"),
+ 0x08c89: (158, "ObjectTemplateInfoMap"),
+ 0x08cb1: (159, "SignatureInfoMap"),
+ 0x08cd9: (160, "TypeSwitchInfoMap"),
+ 0x08d01: (164, "CodeCacheMap"),
+ 0x08d29: (166, "TypeFeedbackInfoMap"),
+ 0x08d51: (167, "AliasedArgumentsEntryMap"),
+ 0x08d79: (169, "DebugInfoMap"),
+ 0x08da1: (170, "BreakPointInfoMap"),
}
# List of known V8 objects.
@@ -250,6 +252,6 @@ KNOWN_OBJECTS = {
("OLD_DATA_SPACE", 0x082c9): "EmptyExternalPixelArray",
("OLD_DATA_SPACE", 0x082d5): "InfinityValue",
("OLD_DATA_SPACE", 0x082e1): "MinusZeroValue",
- ("CODE_SPACE", 0x11141): "JsConstructEntryCode",
- ("CODE_SPACE", 0x18da1): "JsEntryCode",
+ ("CODE_SPACE", 0x111a1): "JsConstructEntryCode",
+ ("CODE_SPACE", 0x18bc1): "JsEntryCode",
}