summaryrefslogtreecommitdiff
path: root/chromium/v8
diff options
context:
space:
mode:
authorAndras Becsi <andras.becsi@digia.com>2013-12-11 21:33:03 +0100
committerAndras Becsi <andras.becsi@digia.com>2013-12-13 12:34:07 +0100
commitf2a33ff9cbc6d19943f1c7fbddd1f23d23975577 (patch)
tree0586a32aa390ade8557dfd6b4897f43a07449578 /chromium/v8
parent5362912cdb5eea702b68ebe23702468d17c3017a (diff)
downloadqtwebengine-chromium-f2a33ff9cbc6d19943f1c7fbddd1f23d23975577.tar.gz
Update Chromium to branch 1650 (31.0.1650.63)
Change-Id: I57d8c832eaec1eb2364e0a8e7352a6dd354db99f Reviewed-by: Jocelyn Turcotte <jocelyn.turcotte@digia.com>
Diffstat (limited to 'chromium/v8')
-rw-r--r--chromium/v8/ChangeLog270
-rw-r--r--chromium/v8/Makefile9
-rw-r--r--chromium/v8/build/toolchain.gypi22
-rwxr-xr-xchromium/v8/include/v8-debug.h82
-rw-r--r--chromium/v8/include/v8-preparser.h44
-rw-r--r--chromium/v8/include/v8-profiler.h115
-rw-r--r--chromium/v8/include/v8-testing.h34
-rw-r--r--chromium/v8/include/v8.h1695
-rw-r--r--chromium/v8/include/v8config.h451
-rw-r--r--chromium/v8/include/v8stdint.h6
-rw-r--r--chromium/v8/samples/process.cc5
-rw-r--r--chromium/v8/src/accessors.cc175
-rw-r--r--chromium/v8/src/accessors.h98
-rw-r--r--chromium/v8/src/api.cc1004
-rw-r--r--chromium/v8/src/api.h12
-rw-r--r--chromium/v8/src/apinatives.js65
-rw-r--r--chromium/v8/src/arguments.cc138
-rw-r--r--chromium/v8/src/arguments.h118
-rw-r--r--chromium/v8/src/arm/assembler-arm-inl.h5
-rw-r--r--chromium/v8/src/arm/assembler-arm.cc162
-rw-r--r--chromium/v8/src/arm/assembler-arm.h9
-rw-r--r--chromium/v8/src/arm/builtins-arm.cc202
-rw-r--r--chromium/v8/src/arm/code-stubs-arm.cc575
-rw-r--r--chromium/v8/src/arm/code-stubs-arm.h38
-rw-r--r--chromium/v8/src/arm/codegen-arm.cc4
-rw-r--r--chromium/v8/src/arm/codegen-arm.h6
-rw-r--r--chromium/v8/src/arm/constants-arm.h2
-rw-r--r--chromium/v8/src/arm/cpu-arm.cc9
-rw-r--r--chromium/v8/src/arm/debug-arm.cc12
-rw-r--r--chromium/v8/src/arm/deoptimizer-arm.cc203
-rw-r--r--chromium/v8/src/arm/disasm-arm.cc3
-rw-r--r--chromium/v8/src/arm/full-codegen-arm.cc81
-rw-r--r--chromium/v8/src/arm/ic-arm.cc8
-rw-r--r--chromium/v8/src/arm/lithium-arm.cc311
-rw-r--r--chromium/v8/src/arm/lithium-arm.h625
-rw-r--r--chromium/v8/src/arm/lithium-codegen-arm.cc735
-rw-r--r--chromium/v8/src/arm/lithium-codegen-arm.h48
-rw-r--r--chromium/v8/src/arm/lithium-gap-resolver-arm.cc2
-rw-r--r--chromium/v8/src/arm/lithium-gap-resolver-arm.h2
-rw-r--r--chromium/v8/src/arm/macro-assembler-arm.cc324
-rw-r--r--chromium/v8/src/arm/macro-assembler-arm.h75
-rw-r--r--chromium/v8/src/arm/regexp-macro-assembler-arm.cc119
-rw-r--r--chromium/v8/src/arm/regexp-macro-assembler-arm.h11
-rw-r--r--chromium/v8/src/arm/simulator-arm.cc95
-rw-r--r--chromium/v8/src/arm/stub-cache-arm.cc752
-rw-r--r--chromium/v8/src/array-iterator.js7
-rw-r--r--chromium/v8/src/assembler.cc11
-rw-r--r--chromium/v8/src/assembler.h34
-rw-r--r--chromium/v8/src/assert-scope.h9
-rw-r--r--chromium/v8/src/ast.cc56
-rw-r--r--chromium/v8/src/ast.h536
-rw-r--r--chromium/v8/src/atomicops_internals_tsan.h12
-rw-r--r--chromium/v8/src/bootstrapper.cc168
-rw-r--r--chromium/v8/src/bootstrapper.h4
-rw-r--r--chromium/v8/src/builtins.cc51
-rw-r--r--chromium/v8/src/builtins.h30
-rw-r--r--chromium/v8/src/checks.cc28
-rw-r--r--chromium/v8/src/checks.h17
-rw-r--r--chromium/v8/src/circular-queue-inl.h62
-rw-r--r--chromium/v8/src/circular-queue.cc125
-rw-r--r--chromium/v8/src/circular-queue.h72
-rw-r--r--chromium/v8/src/code-stubs-hydrogen.cc362
-rw-r--r--chromium/v8/src/code-stubs.cc125
-rw-r--r--chromium/v8/src/code-stubs.h204
-rw-r--r--chromium/v8/src/codegen.cc17
-rw-r--r--chromium/v8/src/compiler.cc232
-rw-r--r--chromium/v8/src/compiler.h77
-rw-r--r--chromium/v8/src/contexts.cc43
-rw-r--r--chromium/v8/src/contexts.h22
-rw-r--r--chromium/v8/src/counters.cc10
-rw-r--r--chromium/v8/src/counters.h14
-rw-r--r--chromium/v8/src/cpu-profiler-inl.h21
-rw-r--r--chromium/v8/src/cpu-profiler.cc137
-rw-r--r--chromium/v8/src/cpu-profiler.h39
-rw-r--r--chromium/v8/src/cpu.cc466
-rw-r--r--chromium/v8/src/cpu.h91
-rw-r--r--chromium/v8/src/d8-debug.cc22
-rw-r--r--chromium/v8/src/d8-debug.h7
-rw-r--r--chromium/v8/src/d8.cc116
-rw-r--r--chromium/v8/src/d8.h16
-rw-r--r--chromium/v8/src/debug-agent.cc111
-rw-r--r--chromium/v8/src/debug-agent.h36
-rw-r--r--chromium/v8/src/debug-debugger.js9
-rw-r--r--chromium/v8/src/debug.cc187
-rw-r--r--chromium/v8/src/debug.h38
-rw-r--r--chromium/v8/src/deoptimizer.cc1044
-rw-r--r--chromium/v8/src/deoptimizer.h189
-rw-r--r--chromium/v8/src/disassembler.cc6
-rw-r--r--chromium/v8/src/effects.h361
-rw-r--r--chromium/v8/src/elements-kind.cc30
-rw-r--r--chromium/v8/src/elements-kind.h1
-rw-r--r--chromium/v8/src/elements.cc12
-rw-r--r--chromium/v8/src/execution.cc123
-rw-r--r--chromium/v8/src/execution.h51
-rw-r--r--chromium/v8/src/extensions/externalize-string-extension.cc6
-rw-r--r--chromium/v8/src/extensions/gc-extension.cc5
-rw-r--r--chromium/v8/src/extensions/i18n/break-iterator.cc333
-rw-r--r--chromium/v8/src/extensions/i18n/break-iterator.h85
-rw-r--r--chromium/v8/src/extensions/i18n/break-iterator.js197
-rw-r--r--chromium/v8/src/extensions/i18n/collator.cc366
-rw-r--r--chromium/v8/src/extensions/i18n/collator.h68
-rw-r--r--chromium/v8/src/extensions/i18n/collator.js212
-rw-r--r--chromium/v8/src/extensions/i18n/date-format.cc329
-rw-r--r--chromium/v8/src/extensions/i18n/date-format.h71
-rw-r--r--chromium/v8/src/extensions/i18n/date-format.js478
-rw-r--r--chromium/v8/src/extensions/i18n/footer.js40
-rw-r--r--chromium/v8/src/extensions/i18n/globals.js168
-rw-r--r--chromium/v8/src/extensions/i18n/header.js41
-rw-r--r--chromium/v8/src/extensions/i18n/i18n-extension.cc117
-rw-r--r--chromium/v8/src/extensions/i18n/i18n-extension.h51
-rw-r--r--chromium/v8/src/extensions/i18n/i18n-utils.cc175
-rw-r--r--chromium/v8/src/extensions/i18n/i18n-utils.h91
-rw-r--r--chromium/v8/src/extensions/i18n/i18n-utils.js541
-rw-r--r--chromium/v8/src/extensions/i18n/locale.cc251
-rw-r--r--chromium/v8/src/extensions/i18n/locale.h56
-rw-r--r--chromium/v8/src/extensions/i18n/locale.js192
-rw-r--r--chromium/v8/src/extensions/i18n/number-format.cc418
-rw-r--r--chromium/v8/src/extensions/i18n/number-format.h69
-rw-r--r--chromium/v8/src/extensions/i18n/number-format.js295
-rw-r--r--chromium/v8/src/extensions/i18n/overrides.js220
-rw-r--r--chromium/v8/src/extensions/statistics-extension.cc2
-rw-r--r--chromium/v8/src/factory.cc106
-rw-r--r--chromium/v8/src/factory.h27
-rw-r--r--chromium/v8/src/flag-definitions.h98
-rw-r--r--chromium/v8/src/flags.cc11
-rw-r--r--chromium/v8/src/flags.h3
-rw-r--r--chromium/v8/src/frames.cc7
-rw-r--r--chromium/v8/src/full-codegen.cc38
-rw-r--r--chromium/v8/src/full-codegen.h68
-rw-r--r--chromium/v8/src/gdb-jit.cc8
-rw-r--r--chromium/v8/src/global-handles.cc130
-rw-r--r--chromium/v8/src/global-handles.h83
-rw-r--r--chromium/v8/src/globals.h135
-rw-r--r--chromium/v8/src/handles-inl.h11
-rw-r--r--chromium/v8/src/handles.cc34
-rw-r--r--chromium/v8/src/handles.h5
-rw-r--r--chromium/v8/src/harmony-array.js124
-rw-r--r--chromium/v8/src/heap-inl.h95
-rw-r--r--chromium/v8/src/heap-snapshot-generator.cc75
-rw-r--r--chromium/v8/src/heap-snapshot-generator.h19
-rw-r--r--chromium/v8/src/heap.cc297
-rw-r--r--chromium/v8/src/heap.h57
-rw-r--r--chromium/v8/src/hydrogen-alias-analysis.h105
-rw-r--r--chromium/v8/src/hydrogen-bce.cc55
-rw-r--r--chromium/v8/src/hydrogen-bce.h2
-rw-r--r--chromium/v8/src/hydrogen-bch.cc40
-rw-r--r--chromium/v8/src/hydrogen-canonicalize.cc4
-rw-r--r--chromium/v8/src/hydrogen-dehoist.cc2
-rw-r--r--chromium/v8/src/hydrogen-environment-liveness.cc6
-rw-r--r--chromium/v8/src/hydrogen-escape-analysis.cc285
-rw-r--r--chromium/v8/src/hydrogen-escape-analysis.h46
-rw-r--r--chromium/v8/src/hydrogen-infer-representation.cc34
-rw-r--r--chromium/v8/src/hydrogen-instructions.cc975
-rw-r--r--chromium/v8/src/hydrogen-instructions.h2290
-rw-r--r--chromium/v8/src/hydrogen-mark-deoptimize.cc29
-rw-r--r--chromium/v8/src/hydrogen-mark-deoptimize.h12
-rw-r--r--chromium/v8/src/hydrogen-osr.cc19
-rw-r--r--chromium/v8/src/hydrogen-osr.h9
-rw-r--r--chromium/v8/src/hydrogen-representation-changes.cc78
-rw-r--r--chromium/v8/src/hydrogen-uint32-analysis.cc19
-rw-r--r--chromium/v8/src/hydrogen.cc1892
-rw-r--r--chromium/v8/src/hydrogen.h320
-rw-r--r--chromium/v8/src/i18n.cc1070
-rw-r--r--chromium/v8/src/i18n.h154
-rw-r--r--chromium/v8/src/i18n.js2116
-rw-r--r--chromium/v8/src/ia32/assembler-ia32-inl.h10
-rw-r--r--chromium/v8/src/ia32/assembler-ia32.cc122
-rw-r--r--chromium/v8/src/ia32/assembler-ia32.h8
-rw-r--r--chromium/v8/src/ia32/builtins-ia32.cc186
-rw-r--r--chromium/v8/src/ia32/code-stubs-ia32.cc575
-rw-r--r--chromium/v8/src/ia32/code-stubs-ia32.h4
-rw-r--r--chromium/v8/src/ia32/codegen-ia32.cc61
-rw-r--r--chromium/v8/src/ia32/codegen-ia32.h2
-rw-r--r--chromium/v8/src/ia32/cpu-ia32.cc14
-rw-r--r--chromium/v8/src/ia32/debug-ia32.cc8
-rw-r--r--chromium/v8/src/ia32/deoptimizer-ia32.cc257
-rw-r--r--chromium/v8/src/ia32/disasm-ia32.cc16
-rw-r--r--chromium/v8/src/ia32/full-codegen-ia32.cc77
-rw-r--r--chromium/v8/src/ia32/ic-ia32.cc8
-rw-r--r--chromium/v8/src/ia32/lithium-codegen-ia32.cc1395
-rw-r--r--chromium/v8/src/ia32/lithium-codegen-ia32.h131
-rw-r--r--chromium/v8/src/ia32/lithium-gap-resolver-ia32.h2
-rw-r--r--chromium/v8/src/ia32/lithium-ia32.cc324
-rw-r--r--chromium/v8/src/ia32/lithium-ia32.h655
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.cc466
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.h41
-rw-r--r--chromium/v8/src/ia32/regexp-macro-assembler-ia32.cc5
-rw-r--r--chromium/v8/src/ia32/stub-cache-ia32.cc770
-rw-r--r--chromium/v8/src/ic-inl.h1
-rw-r--r--chromium/v8/src/ic.cc233
-rw-r--r--chromium/v8/src/ic.h83
-rw-r--r--chromium/v8/src/isolate-inl.h18
-rw-r--r--chromium/v8/src/isolate.cc131
-rw-r--r--chromium/v8/src/isolate.h129
-rw-r--r--chromium/v8/src/json-stringifier.h23
-rw-r--r--chromium/v8/src/jsregexp.cc128
-rw-r--r--chromium/v8/src/jsregexp.h47
-rw-r--r--chromium/v8/src/lazy-instance.h19
-rw-r--r--chromium/v8/src/lithium-allocator.cc2
-rw-r--r--chromium/v8/src/lithium.cc45
-rw-r--r--chromium/v8/src/lithium.h79
-rw-r--r--chromium/v8/src/liveedit.cc133
-rw-r--r--chromium/v8/src/log-utils.cc12
-rw-r--r--chromium/v8/src/log-utils.h4
-rw-r--r--chromium/v8/src/log.cc130
-rw-r--r--chromium/v8/src/log.h24
-rw-r--r--chromium/v8/src/macros.py8
-rw-r--r--chromium/v8/src/mark-compact-inl.h2
-rw-r--r--chromium/v8/src/mark-compact.cc107
-rw-r--r--chromium/v8/src/mark-compact.h4
-rw-r--r--chromium/v8/src/marking-thread.cc20
-rw-r--r--chromium/v8/src/marking-thread.h13
-rw-r--r--chromium/v8/src/messages.js22
-rw-r--r--chromium/v8/src/mips/assembler-mips-inl.h5
-rw-r--r--chromium/v8/src/mips/assembler-mips.cc11
-rw-r--r--chromium/v8/src/mips/assembler-mips.h5
-rw-r--r--chromium/v8/src/mips/builtins-mips.cc198
-rw-r--r--chromium/v8/src/mips/code-stubs-mips.cc1002
-rw-r--r--chromium/v8/src/mips/code-stubs-mips.h134
-rw-r--r--chromium/v8/src/mips/codegen-mips.cc6
-rw-r--r--chromium/v8/src/mips/codegen-mips.h6
-rw-r--r--chromium/v8/src/mips/cpu-mips.cc8
-rw-r--r--chromium/v8/src/mips/debug-mips.cc11
-rw-r--r--chromium/v8/src/mips/deoptimizer-mips.cc202
-rw-r--r--chromium/v8/src/mips/disasm-mips.cc3
-rw-r--r--chromium/v8/src/mips/full-codegen-mips.cc83
-rw-r--r--chromium/v8/src/mips/ic-mips.cc9
-rw-r--r--chromium/v8/src/mips/lithium-codegen-mips.cc713
-rw-r--r--chromium/v8/src/mips/lithium-codegen-mips.h52
-rw-r--r--chromium/v8/src/mips/lithium-gap-resolver-mips.cc2
-rw-r--r--chromium/v8/src/mips/lithium-gap-resolver-mips.h2
-rw-r--r--chromium/v8/src/mips/lithium-mips.cc268
-rw-r--r--chromium/v8/src/mips/lithium-mips.h611
-rw-r--r--chromium/v8/src/mips/macro-assembler-mips.cc528
-rw-r--r--chromium/v8/src/mips/macro-assembler-mips.h142
-rw-r--r--chromium/v8/src/mips/regexp-macro-assembler-mips.cc3
-rw-r--r--chromium/v8/src/mips/simulator-mips.cc140
-rw-r--r--chromium/v8/src/mips/stub-cache-mips.cc880
-rw-r--r--chromium/v8/src/mksnapshot.cc18
-rw-r--r--chromium/v8/src/natives.h3
-rw-r--r--chromium/v8/src/object-observe.js437
-rw-r--r--chromium/v8/src/objects-debug.cc55
-rw-r--r--chromium/v8/src/objects-inl.h297
-rw-r--r--chromium/v8/src/objects-printer.cc88
-rw-r--r--chromium/v8/src/objects-visiting-inl.h5
-rw-r--r--chromium/v8/src/objects-visiting.h2
-rw-r--r--chromium/v8/src/objects.cc1896
-rw-r--r--chromium/v8/src/objects.h782
-rw-r--r--chromium/v8/src/optimizing-compiler-thread.cc219
-rw-r--r--chromium/v8/src/optimizing-compiler-thread.h71
-rw-r--r--chromium/v8/src/parser.cc87
-rw-r--r--chromium/v8/src/parser.h6
-rw-r--r--chromium/v8/src/platform-cygwin.cc126
-rw-r--r--chromium/v8/src/platform-freebsd.cc111
-rw-r--r--chromium/v8/src/platform-linux.cc307
-rw-r--r--chromium/v8/src/platform-macos.cc96
-rw-r--r--chromium/v8/src/platform-nullos.cc573
-rw-r--r--chromium/v8/src/platform-openbsd.cc117
-rw-r--r--chromium/v8/src/platform-posix.cc273
-rw-r--r--chromium/v8/src/platform-solaris.cc131
-rw-r--r--chromium/v8/src/platform-win32.cc495
-rw-r--r--chromium/v8/src/platform.h251
-rw-r--r--chromium/v8/src/platform/condition-variable.cc345
-rw-r--r--chromium/v8/src/platform/condition-variable.h140
-rw-r--r--chromium/v8/src/platform/elapsed-timer.h120
-rw-r--r--chromium/v8/src/platform/mutex.cc214
-rw-r--r--chromium/v8/src/platform/mutex.h238
-rw-r--r--chromium/v8/src/platform/semaphore.cc214
-rw-r--r--chromium/v8/src/platform/semaphore.h126
-rw-r--r--chromium/v8/src/platform/socket.cc224
-rw-r--r--chromium/v8/src/platform/socket.h101
-rw-r--r--chromium/v8/src/platform/time.cc613
-rw-r--r--chromium/v8/src/platform/time.h413
-rw-r--r--chromium/v8/src/preparser.h5
-rw-r--r--chromium/v8/src/prettyprinter.cc10
-rw-r--r--chromium/v8/src/prettyprinter.h6
-rw-r--r--chromium/v8/src/profile-generator-inl.h10
-rw-r--r--chromium/v8/src/profile-generator.cc103
-rw-r--r--chromium/v8/src/profile-generator.h39
-rw-r--r--chromium/v8/src/property-details.h2
-rw-r--r--chromium/v8/src/property.h9
-rw-r--r--chromium/v8/src/regexp-macro-assembler.cc3
-rw-r--r--chromium/v8/src/regexp-stack.cc1
-rw-r--r--chromium/v8/src/rewriter.cc4
-rw-r--r--chromium/v8/src/runtime-profiler.cc41
-rw-r--r--chromium/v8/src/runtime.cc1705
-rw-r--r--chromium/v8/src/runtime.h64
-rw-r--r--chromium/v8/src/runtime.js14
-rw-r--r--chromium/v8/src/sampler.cc326
-rw-r--r--chromium/v8/src/sampler.h19
-rw-r--r--chromium/v8/src/scopeinfo.cc8
-rw-r--r--chromium/v8/src/scopes.cc34
-rw-r--r--chromium/v8/src/serialize.cc62
-rw-r--r--chromium/v8/src/serialize.h28
-rw-r--r--chromium/v8/src/snapshot-common.cc4
-rw-r--r--chromium/v8/src/snapshot.h2
-rw-r--r--chromium/v8/src/spaces-inl.h8
-rw-r--r--chromium/v8/src/spaces.cc76
-rw-r--r--chromium/v8/src/spaces.h51
-rw-r--r--chromium/v8/src/splay-tree-inl.h9
-rw-r--r--chromium/v8/src/splay-tree.h14
-rw-r--r--chromium/v8/src/store-buffer-inl.h2
-rw-r--r--chromium/v8/src/store-buffer.cc14
-rw-r--r--chromium/v8/src/string-stream.cc112
-rw-r--r--chromium/v8/src/string-stream.h10
-rw-r--r--chromium/v8/src/stub-cache.cc273
-rw-r--r--chromium/v8/src/stub-cache.h108
-rw-r--r--chromium/v8/src/sweeper-thread.cc20
-rw-r--r--chromium/v8/src/sweeper-thread.h13
-rw-r--r--chromium/v8/src/transitions.cc20
-rw-r--r--chromium/v8/src/transitions.h3
-rw-r--r--chromium/v8/src/type-info.cc21
-rw-r--r--chromium/v8/src/type-info.h1
-rw-r--r--chromium/v8/src/typedarray.js3
-rw-r--r--chromium/v8/src/types.h7
-rw-r--r--chromium/v8/src/typing.cc254
-rw-r--r--chromium/v8/src/typing.h17
-rw-r--r--chromium/v8/src/unicode.h2
-rw-r--r--chromium/v8/src/unique.h266
-rw-r--r--chromium/v8/src/utils/random-number-generator.cc136
-rw-r--r--chromium/v8/src/utils/random-number-generator.h106
-rw-r--r--chromium/v8/src/v8-counters.cc14
-rw-r--r--chromium/v8/src/v8.cc106
-rw-r--r--chromium/v8/src/v8.h26
-rw-r--r--chromium/v8/src/v8dll-main.cc6
-rw-r--r--chromium/v8/src/v8globals.h38
-rw-r--r--chromium/v8/src/v8threads.cc19
-rw-r--r--chromium/v8/src/v8threads.h6
-rw-r--r--chromium/v8/src/version.cc6
-rw-r--r--chromium/v8/src/win32-headers.h9
-rw-r--r--chromium/v8/src/win32-math.cc2
-rw-r--r--chromium/v8/src/x64/assembler-x64-inl.h10
-rw-r--r--chromium/v8/src/x64/assembler-x64.cc130
-rw-r--r--chromium/v8/src/x64/assembler-x64.h55
-rw-r--r--chromium/v8/src/x64/builtins-x64.cc233
-rw-r--r--chromium/v8/src/x64/code-stubs-x64.cc703
-rw-r--r--chromium/v8/src/x64/code-stubs-x64.h4
-rw-r--r--chromium/v8/src/x64/codegen-x64.cc26
-rw-r--r--chromium/v8/src/x64/codegen-x64.h73
-rw-r--r--chromium/v8/src/x64/cpu-x64.cc12
-rw-r--r--chromium/v8/src/x64/debug-x64.cc26
-rw-r--r--chromium/v8/src/x64/deoptimizer-x64.cc210
-rw-r--r--chromium/v8/src/x64/disasm-x64.cc74
-rw-r--r--chromium/v8/src/x64/full-codegen-x64.cc94
-rw-r--r--chromium/v8/src/x64/ic-x64.cc81
-rw-r--r--chromium/v8/src/x64/lithium-codegen-x64.cc775
-rw-r--r--chromium/v8/src/x64/lithium-codegen-x64.h44
-rw-r--r--chromium/v8/src/x64/lithium-gap-resolver-x64.h2
-rw-r--r--chromium/v8/src/x64/lithium-x64.cc250
-rw-r--r--chromium/v8/src/x64/lithium-x64.h595
-rw-r--r--chromium/v8/src/x64/macro-assembler-x64.cc327
-rw-r--r--chromium/v8/src/x64/macro-assembler-x64.h49
-rw-r--r--chromium/v8/src/x64/regexp-macro-assembler-x64.cc9
-rw-r--r--chromium/v8/src/x64/stub-cache-x64.cc989
-rw-r--r--chromium/v8/src/zone-inl.h6
-rw-r--r--chromium/v8/src/zone.h11
-rw-r--r--chromium/v8/test/cctest/cctest.gyp14
-rw-r--r--chromium/v8/tools/SourceMap.js371
-rw-r--r--chromium/v8/tools/gcmole/Makefile2
-rwxr-xr-xchromium/v8/tools/gcmole/bootstrap.sh126
-rw-r--r--chromium/v8/tools/gcmole/gcmole.lua3
-rwxr-xr-xchromium/v8/tools/grokdump.py276
-rw-r--r--chromium/v8/tools/gyp/v8.gyp138
-rwxr-xr-xchromium/v8/tools/linux-tick-processor1
-rwxr-xr-xchromium/v8/tools/plot-timer-events11
-rw-r--r--chromium/v8/tools/profviz/composer.js75
-rw-r--r--chromium/v8/tools/profviz/profviz.html7
-rw-r--r--chromium/v8/tools/profviz/profviz.js22
-rw-r--r--chromium/v8/tools/profviz/stdio.js6
-rw-r--r--chromium/v8/tools/profviz/worker.js8
-rwxr-xr-xchromium/v8/tools/run-deopt-fuzzer.py4
-rwxr-xr-xchromium/v8/tools/run-tests.py8
-rw-r--r--chromium/v8/tools/testrunner/local/old_statusfile.py2
-rw-r--r--chromium/v8/tools/testrunner/local/statusfile.py7
-rw-r--r--chromium/v8/tools/testrunner/local/testsuite.py47
-rw-r--r--chromium/v8/tools/testrunner/local/verbose.py2
-rw-r--r--chromium/v8/tools/tickprocessor-driver.js20
-rw-r--r--chromium/v8/tools/tickprocessor.js53
-rw-r--r--chromium/v8/tools/v8heapconst.py255
-rw-r--r--chromium/v8/tools/v8heapconst.py.tmpl30
-rwxr-xr-xchromium/v8/tools/windows-tick-processor.bat2
381 files changed, 30605 insertions, 30794 deletions
diff --git a/chromium/v8/ChangeLog b/chromium/v8/ChangeLog
index b0ab892073e..b3eba3661ad 100644
--- a/chromium/v8/ChangeLog
+++ b/chromium/v8/ChangeLog
@@ -1,3 +1,273 @@
+2013-09-18: Version 3.21.17
+
+ Implemented local load/store elimination on basic blocks.
+
+ Added mutex when accessing concurrent recompilation output queue.
+ (Chromium issue 291236)
+
+ Don't lookup the cache for the result of Function::New.
+ (Chromium issue 272579)
+
+ Tweaked HConstant::EmitAtUses() to eliminate useless constant
+ generation.
+ (Chromium issue 2881)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-16: Version 3.21.16
+
+ Every place where AllocationMemento is initialized with an
+ AllocationSite is now checked to be sure a valid Site goes in. This is
+ temporary code to diagnose chromium bug 284577.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-13: Version 3.21.15
+
+ Non-JSObject heap objects are now handled using slow-path IC stub
+ guarded by the map.
+ (Chromium issue 280632)
+
+ i18n Javascript code added to the snapshot.
+ (V8 issue 2745)
+
+ Performance and stability improvements on all platforms.
+
+2013-09-12: Version 3.21.14
+
+ Added access check for observed objects.
+ (V8 issue 2778)
+
+ Cleaned up v8::ArrayBuffer::Allocator interface.
+ (V8 issue 2823)
+
+ Performance and stability improvements on all platforms.
+
+2013-09-11: Version 3.21.13
+
+ Added a ResourceConstraint for the embedder to specify that V8 is
+ running on a memory constrained device.
+ (Chromium issue 280984)
+
+ Removed HandleScope default ctor.
+ (Chromium issue 236173)
+
+ Enabled escape analysis for Hydrogen.
+
+ Correctly stringified mixed encoding indirect strings.
+ (Chromium issue 287476)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-09: Version 3.21.12
+
+ Fixed bitwise negation on x64.
+ (Chromium issue 285355)
+
+ Dropped GetCurrentThreadId() and TerminateExecution(int) from
+ the external API.
+
+ Fixed polymorphic INTERCEPTOR StoreICs on ARM/MIPS.
+ (Chromium issue 284998)
+
+ Added check if timeout has expired after processing each sample.
+ (issue 2814,v8:2871)
+
+ Removed obsolete global V8::has_been_fooed flags.
+ (issue 2744)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-05: Version 3.21.11
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-04: Version 3.21.10
+
+ Fixed Eternal::IsEmpty logic (issue 2870).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-03: Version 3.21.9
+
+ Deprecated Persistent functions which were marked to be deprecated.
+
+ Allowed uncacheable identifiers to go generic (issue 2867).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-09-02: Version 3.21.8
+
+ Added scriptId to StackTrace frames (issue 2865).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-08-30: Version 3.21.7
+
+ Fixed casts of eternal handles.
+
+ Turned on global handle zapping.
+
+ Always visit branches during HGraph building (Chromium issue 280333).
+
+ Profiler changes: removed deprecated API, support higher sampling
+ rate on Windows.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-08-29: Version 3.21.6
+
+ Fixed inlined 'throw' statements interfering with live range
+ computation. (issue 2843)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-08-28: Version 3.21.5
+
+ Fixed compilation with recent MinGW64 versions. (issue 2300)
+
+ Added RemovePrototype to FunctionTemplate. (Chromium issue 272440)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-08-26: Version 3.21.4
+
+ Lowered kInitialMaxFastElementArray constant to 95K (issue 2790).
+
+ Use signals for cpu profiling on Mac OS X (issue 2814).
+
+ Deprecated CpuProfileNode::GetSelfSamplesCount (Chromium issue 267595).
+
+ Added support for higher CPU profiler sampling rate on posix systems
+ (issue 2814).
+
+ Worked around 'inlining failed' build error with older GCC 4.x releases.
+
+ Added source map support to tick processor.
+
+ Stability improvements on all platforms.
+
+
+2013-08-23: Version 3.21.3
+
+ Temporarily disabled optimization for StringWrappers to use native
+ valueOf. (issue 2855)
+
+ Fixed crash on function declarations in eval inside non-trivial local
+ scope. (issue 2594)
+
+ Rewrote SamplingCircularQueue. (issue 2814)
+
+ Fixed hidden properties on object with frozen prototype. (issue 2829)
+
+ Fix deoptimization bug. (Chromium issue 274164)
+
+ Stability improvements on all platforms.
+
+
+2013-08-22: Version 3.21.2
+
+ Stability improvements on all platforms.
+
+
+2013-08-21: Version 3.21.1
+
+ Promoted ArrayBuffer, DataView and typed arrays to non-experimental.
+ (Chromium issue 270527)
+
+ Replaced OS::MemCopy with memcpy in typed array initialization.
+ (Chromium issue 270642)
+
+ Moved i18n break iterator C++ code to runtime (issue 2745)
+
+ Fixed invalid out-of-bounds store in MacroAssembler::Allocate.
+ (Chromium issue 263515)
+
+ Fixed register misuse in Allocate() on ARM. (issue 2851)
+
+ Fixed empty handle dereference in Runtime_InternalNumberFormat.
+ (Chromium issue 275467)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-08-19: Version 3.21.0
+
+ Fixed GC-related crasher (Chromium issue 274438)
+
+ Reverted making Intl non-enumerable.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-08-14: Version 3.20.17
+
+ Fixed Math.round/floor that had bogus Smi representation
+ (Chromium issue 272564)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-08-13: Version 3.20.16
+
+ Fixed bug in HPhi::SimplifyConstantInput (Chromium issue 269679)
+
+ Fixed gcmole bugs in i18n code (issue 2745)
+
+ ia32: Calls to the TranscendentalCacheStub must ensure that esi is
+ set (issue 2827)
+
+ Made sure polymorphic element access creates non-replaying
+ phis. (issue 2815)
+
+ Allowed HPhis to have an invalid merge index. (issue 2815)
+
+ Fixed smi-based math floor. (Chromium issue 270268)
+
+ Deprecated self and total time getters and total sample count
+ getter on CpuProfileNode. (Chromium issue 267595)
+
+ Fixed Object.freeze, Object.observe wrt CountOperation and
+ CompoundAssignment. (issue 2774,2779)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-08-07: Version 3.20.15
+
+ Exposed eternal handle api.
+
+ Bugfix to solve issues with enabling V8 typed arrays in Blink.
+
+ Fixed Array index dehoisting. (Chromium issue 264203)
+
+ Updated Array Iterator to use numeric indexes (issue 2818)
+
+ Return start/end profiling time in microseconds instead of milliseconds
+ (issue 2824)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-08-06: Version 3.20.14
+
+ Added new Harmony methods to Array.prototype object.
+ (issue 2776,v8:2777)
+
+ Performance and stability improvements on all platforms.
+
+
2013-08-01: Version 3.20.12
Removed buggy ToNumber truncation (partial fix for issue 2813)
diff --git a/chromium/v8/Makefile b/chromium/v8/Makefile
index 499f6cb08e0..288c257396d 100644
--- a/chromium/v8/Makefile
+++ b/chromium/v8/Makefile
@@ -192,6 +192,7 @@ endif
# ----------------- available targets: --------------------
# - "dependencies": pulls in external dependencies (currently: GYP)
+# - "grokdump": rebuilds heap constants lists used by grokdump
# - any arch listed in ARCHES (see below)
# - any mode listed in MODES
# - every combination <arch>.<mode>, e.g. "ia32.release"
@@ -392,7 +393,7 @@ endif
# Replaces the old with the new environment file if they're different, which
# will trigger GYP to regenerate Makefiles.
$(ENVFILE): $(ENVFILE).new
- @if test -r $(ENVFILE) && cmp $(ENVFILE).new $(ENVFILE) >/dev/null; \
+ @if test -r $(ENVFILE) && cmp $(ENVFILE).new $(ENVFILE) > /dev/null; \
then rm $(ENVFILE).new; \
else mv $(ENVFILE).new $(ENVFILE); fi
@@ -401,6 +402,12 @@ $(ENVFILE).new:
@mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new; \
echo "CXX=$(CXX)" >> $(ENVFILE).new
+# Heap constants for grokdump.
+DUMP_FILE = tools/v8heapconst.py
+grokdump: ia32.release
+ @cat $(DUMP_FILE).tmpl > $(DUMP_FILE)
+ @$(OUTDIR)/ia32.release/d8 --dump-heap-constants >> $(DUMP_FILE)
+
# Dependencies.
# Remember to keep these in sync with the DEPS file.
dependencies:
diff --git a/chromium/v8/build/toolchain.gypi b/chromium/v8/build/toolchain.gypi
index ddb8aafad0c..c1066ebe94b 100644
--- a/chromium/v8/build/toolchain.gypi
+++ b/chromium/v8/build/toolchain.gypi
@@ -561,13 +561,21 @@
'cflags!': [
'-O0',
'-O1',
- '-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
- '-O3',
+ ],
+ 'conditions': [
+ # TODO(crbug.com/272548): Avoid -O3 in NaCl
+ ['nacl_target_arch=="none"', {
+ 'cflags': ['-O3'],
+ 'cflags!': ['-O2'],
+ }, {
+ 'cflags': ['-O2'],
+ 'cflags!': ['-O3'],
+ }],
],
}],
['v8_optimized_debug!=0 and gcc_version==44 and clang==0', {
@@ -614,13 +622,11 @@
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags!': [
- '-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
- '-O3',
'<(wno_array_bounds)',
],
'conditions': [
@@ -630,6 +636,14 @@
'-fno-tree-vrp',
],
}],
+ # TODO(crbug.com/272548): Avoid -O3 in NaCl
+ ['nacl_target_arch=="none"', {
+ 'cflags': ['-O3'],
+ 'cflags!': ['-O2'],
+ }, {
+ 'cflags': ['-O2'],
+ 'cflags!': ['-O3'],
+ }],
],
}],
['OS=="android"', {
diff --git a/chromium/v8/include/v8-debug.h b/chromium/v8/include/v8-debug.h
index e488aaa8891..053b81d2c58 100755
--- a/chromium/v8/include/v8-debug.h
+++ b/chromium/v8/include/v8-debug.h
@@ -30,40 +30,6 @@
#include "v8.h"
-#ifdef _WIN32
-typedef int int32_t;
-typedef unsigned int uint32_t;
-typedef unsigned short uint16_t; // NOLINT
-typedef long long int64_t; // NOLINT
-
-// Setup for Windows DLL export/import. See v8.h in this directory for
-// information on how to build/use V8 as a DLL.
-#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
-#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
- build configuration to ensure that at most one of these is set
-#endif
-
-#ifdef BUILDING_V8_SHARED
-#define EXPORT __declspec(dllexport)
-#elif USING_V8_SHARED
-#define EXPORT __declspec(dllimport)
-#else
-#define EXPORT
-#endif
-
-#else // _WIN32
-
-// Setup for Linux shared library export. See v8.h in this directory for
-// information on how to build/use V8 as shared library.
-#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
-#define EXPORT __attribute__ ((visibility("default")))
-#else // defined(__GNUC__) && (__GNUC__ >= 4)
-#define EXPORT
-#endif // defined(__GNUC__) && (__GNUC__ >= 4)
-
-#endif // _WIN32
-
-
/**
* Debugger support for the V8 JavaScript engine.
*/
@@ -81,7 +47,7 @@ enum DebugEvent {
};
-class EXPORT Debug {
+class V8_EXPORT Debug {
public:
/**
* A client object passed to the v8 debugger whose ownership will be taken by
@@ -140,6 +106,8 @@ class EXPORT Debug {
*/
virtual ClientData* GetClientData() const = 0;
+ virtual Isolate* GetIsolate() const = 0;
+
virtual ~Message() {}
};
@@ -184,21 +152,6 @@ class EXPORT Debug {
virtual ~EventDetails() {}
};
-
- /**
- * Debug event callback function.
- *
- * \param event the type of the debug event that triggered the callback
- * (enum DebugEvent)
- * \param exec_state execution state (JavaScript object)
- * \param event_data event specific data (JavaScript object)
- * \param data value passed by the user to SetDebugEventListener
- */
- typedef void (*EventCallback)(DebugEvent event,
- Handle<Object> exec_state,
- Handle<Object> event_data,
- Handle<Value> data);
-
/**
* Debug event callback function.
*
@@ -213,23 +166,8 @@ class EXPORT Debug {
* Debug message callback function.
*
* \param message the debug message handler message object
- * \param length length of the message
- * \param client_data the data value passed when registering the message handler
-
- * A MessageHandler does not take possession of the message string,
- * and must not rely on the data persisting after the handler returns.
- *
- * This message handler is deprecated. Use MessageHandler2 instead.
- */
- typedef void (*MessageHandler)(const uint16_t* message, int length,
- ClientData* client_data);
-
- /**
- * Debug message callback function.
- *
- * \param message the debug message handler message object
*
- * A MessageHandler does not take possession of the message data,
+ * A MessageHandler2 does not take possession of the message data,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*MessageHandler2)(const Message& message);
@@ -244,10 +182,6 @@ class EXPORT Debug {
*/
typedef void (*DebugMessageDispatchHandler)();
- // Set a C debug event listener.
- V8_DEPRECATED(static bool SetDebugEventListener(
- EventCallback that,
- Handle<Value> data = Handle<Value>()));
static bool SetDebugEventListener2(EventCallback2 that,
Handle<Value> data = Handle<Value>());
@@ -268,16 +202,12 @@ class EXPORT Debug {
// Break execution of JavaScript in the given isolate (this method
// can be invoked from a non-VM thread) for further client command
// execution on a VM thread. Client data is then passed in
- // EventDetails to EventCallback at the moment when the VM actually
+ // EventDetails to EventCallback2 at the moment when the VM actually
// stops. If no isolate is provided the default isolate is used.
static void DebugBreakForCommand(ClientData* data = NULL,
Isolate* isolate = NULL);
- // Message based interface. The message protocol is JSON. NOTE the message
- // handler thread is not supported any more parameter must be false.
- V8_DEPRECATED(static void SetMessageHandler(
- MessageHandler handler,
- bool message_handler_thread = false));
+ // Message based interface. The message protocol is JSON.
static void SetMessageHandler2(MessageHandler2 handler);
// If no isolate is provided the default isolate is
diff --git a/chromium/v8/include/v8-preparser.h b/chromium/v8/include/v8-preparser.h
index 3e39823d65c..1da77185af8 100644
--- a/chromium/v8/include/v8-preparser.h
+++ b/chromium/v8/include/v8-preparser.h
@@ -28,48 +28,14 @@
#ifndef PREPARSER_H
#define PREPARSER_H
+#include "v8.h"
#include "v8stdint.h"
-#ifdef _WIN32
-
-// Setup for Windows DLL export/import. When building the V8 DLL the
-// BUILDING_V8_SHARED needs to be defined. When building a program which uses
-// the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8
-// static library or building a program which uses the V8 static library neither
-// BUILDING_V8_SHARED nor USING_V8_SHARED should be defined.
-#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
-#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
- build configuration to ensure that at most one of these is set
-#endif
-
-#ifdef BUILDING_V8_SHARED
-#define V8EXPORT __declspec(dllexport)
-#elif USING_V8_SHARED
-#define V8EXPORT __declspec(dllimport)
-#else
-#define V8EXPORT
-#endif // BUILDING_V8_SHARED
-
-#else // _WIN32
-
-// Setup for Linux shared library export. There is no need to distinguish
-// between building or using the V8 shared library, but we should not
-// export symbols when we are building a static library.
-#if defined(__GNUC__) && ((__GNUC__ >= 4) || \
- (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && defined(V8_SHARED)
-#define V8EXPORT __attribute__ ((visibility("default")))
-#else
-#define V8EXPORT
-#endif
-
-#endif // _WIN32
-
-
namespace v8 {
// The result of preparsing is either a stack overflow error, or an opaque
// blob of data that can be passed back into the parser.
-class V8EXPORT PreParserData {
+class V8_EXPORT PreParserData {
public:
PreParserData(size_t size, const uint8_t* data)
: data_(data), size_(size) { }
@@ -94,7 +60,7 @@ class V8EXPORT PreParserData {
// Interface for a stream of Unicode characters.
-class V8EXPORT UnicodeInputStream { // NOLINT - Thinks V8EXPORT is class name.
+class V8_EXPORT UnicodeInputStream { // NOLINT - V8_EXPORT is not a class name.
public:
virtual ~UnicodeInputStream();
@@ -110,11 +76,9 @@ class V8EXPORT UnicodeInputStream { // NOLINT - Thinks V8EXPORT is class name.
// more stack space than the limit provided, the result's stack_overflow()
// method will return true. Otherwise the result contains preparser
// data that can be used by the V8 parser to speed up parsing.
-PreParserData V8EXPORT Preparse(UnicodeInputStream* input,
+PreParserData V8_EXPORT Preparse(UnicodeInputStream* input,
size_t max_stack_size);
} // namespace v8.
-#undef V8EXPORT
-
#endif // PREPARSER_H
diff --git a/chromium/v8/include/v8-profiler.h b/chromium/v8/include/v8-profiler.h
index 7898fef1967..217a938329e 100644
--- a/chromium/v8/include/v8-profiler.h
+++ b/chromium/v8/include/v8-profiler.h
@@ -30,36 +30,6 @@
#include "v8.h"
-#ifdef _WIN32
-// Setup for Windows DLL export/import. See v8.h in this directory for
-// information on how to build/use V8 as a DLL.
-#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
-#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
- build configuration to ensure that at most one of these is set
-#endif
-
-#ifdef BUILDING_V8_SHARED
-#define V8EXPORT __declspec(dllexport)
-#elif USING_V8_SHARED
-#define V8EXPORT __declspec(dllimport)
-#else
-#define V8EXPORT
-#endif
-
-#else // _WIN32
-
-// Setup for Linux shared library export. See v8.h in this directory for
-// information on how to build/use V8 as shared library.
-#if defined(__GNUC__) && ((__GNUC__ >= 4) || \
- (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && defined(V8_SHARED)
-#define V8EXPORT __attribute__ ((visibility("default")))
-#else
-#define V8EXPORT
-#endif
-
-#endif // _WIN32
-
-
/**
* Profiler support for the V8 JavaScript engine.
*/
@@ -70,7 +40,7 @@ typedef uint32_t SnapshotObjectId;
/**
* CpuProfileNode represents a node in a call graph.
*/
-class V8EXPORT CpuProfileNode {
+class V8_EXPORT CpuProfileNode {
public:
/** Returns function name (empty string for anonymous functions.) */
Handle<String> GetFunctionName() const;
@@ -87,23 +57,20 @@ class V8EXPORT CpuProfileNode {
*/
int GetLineNumber() const;
- /**
- * Returns total (self + children) execution time of the function,
- * in milliseconds, estimated by samples count.
- */
- double GetTotalTime() const;
-
- /**
- * Returns self execution time of the function, in milliseconds,
- * estimated by samples count.
- */
- double GetSelfTime() const;
+ /** Returns bailout reason for the function
+ * if the optimization was disabled for it.
+ */
+ const char* GetBailoutReason() const;
- /** Returns the count of samples where function exists. */
- double GetTotalSamplesCount() const;
+ /** DEPRECATED. Please use GetHitCount instead.
+ * Returns the count of samples where function was currently executing.
+ */
+ V8_DEPRECATED(double GetSelfSamplesCount() const);
- /** Returns the count of samples where function was currently executing. */
- double GetSelfSamplesCount() const;
+ /**
+ * Returns the count of samples where the function was currently executing.
+ */
+ unsigned GetHitCount() const;
/** Returns function entry UID. */
unsigned GetCallUid() const;
@@ -125,7 +92,7 @@ class V8EXPORT CpuProfileNode {
* CpuProfile contains a CPU profile in a form of top-down call tree
* (from main() down to functions that do all the work).
*/
-class V8EXPORT CpuProfile {
+class V8_EXPORT CpuProfile {
public:
/** Returns CPU profile UID (assigned by the profiler.) */
unsigned GetUid() const;
@@ -176,16 +143,14 @@ class V8EXPORT CpuProfile {
* Interface for controlling CPU profiling. Instance of the
* profiler can be retrieved using v8::Isolate::GetCpuProfiler.
*/
-class V8EXPORT CpuProfiler {
+class V8_EXPORT CpuProfiler {
public:
/**
- * A note on security tokens usage. As scripts from different
- * origins can run inside a single V8 instance, it is possible to
- * have functions from different security contexts intermixed in a
- * single CPU profile. To avoid exposing function names belonging to
- * other contexts, filtering by security token is performed while
- * obtaining profiling results.
+ * Changes default CPU profiler sampling interval to the specified number
+ * of microseconds. Default interval is 1000us. This method must be called
+ * when there are no profiles being recorded.
*/
+ void SetSamplingInterval(int us);
/**
* Returns the number of profiles collected (doesn't include
@@ -222,6 +187,11 @@ class V8EXPORT CpuProfiler {
*/
void DeleteAllCpuProfiles();
+ /**
+ * Tells the profiler whether the embedder is idle.
+ */
+ void SetIdle(bool is_idle);
+
private:
CpuProfiler();
~CpuProfiler();
@@ -237,7 +207,7 @@ class HeapGraphNode;
* HeapSnapshotEdge represents a directed connection between heap
* graph nodes: from retainers to retained nodes.
*/
-class V8EXPORT HeapGraphEdge {
+class V8_EXPORT HeapGraphEdge {
public:
enum Type {
kContextVariable = 0, // A variable from a function context.
@@ -273,20 +243,22 @@ class V8EXPORT HeapGraphEdge {
/**
* HeapGraphNode represents a node in a heap graph.
*/
-class V8EXPORT HeapGraphNode {
+class V8_EXPORT HeapGraphNode {
public:
enum Type {
- kHidden = 0, // Hidden node, may be filtered when shown to user.
- kArray = 1, // An array of elements.
- kString = 2, // A string.
- kObject = 3, // A JS object (except for arrays and strings).
- kCode = 4, // Compiled code.
- kClosure = 5, // Function closure.
- kRegExp = 6, // RegExp.
- kHeapNumber = 7, // Number stored in the heap.
- kNative = 8, // Native object (not from V8 heap).
- kSynthetic = 9 // Synthetic object, usualy used for grouping
- // snapshot items together.
+ kHidden = 0, // Hidden node, may be filtered when shown to user.
+ kArray = 1, // An array of elements.
+ kString = 2, // A string.
+ kObject = 3, // A JS object (except for arrays and strings).
+ kCode = 4, // Compiled code.
+ kClosure = 5, // Function closure.
+ kRegExp = 6, // RegExp.
+ kHeapNumber = 7, // Number stored in the heap.
+ kNative = 8, // Native object (not from V8 heap).
+ kSynthetic = 9, // Synthetic object, usualy used for grouping
+ // snapshot items together.
+ kConsString = 10, // Concatenated string. A pair of pointers to strings.
+ kSlicedString = 11 // Sliced string. A fragment of another string.
};
/** Returns node type (see HeapGraphNode::Type). */
@@ -325,7 +297,7 @@ class V8EXPORT HeapGraphNode {
/**
* HeapSnapshots record the state of the JS heap at some moment.
*/
-class V8EXPORT HeapSnapshot {
+class V8_EXPORT HeapSnapshot {
public:
enum SerializationFormat {
kJSON = 0 // See format description near 'Serialize' method.
@@ -395,7 +367,7 @@ class RetainedObjectInfo;
* Interface for controlling heap profiling. Instance of the
* profiler can be retrieved using v8::Isolate::GetHeapProfiler.
*/
-class V8EXPORT HeapProfiler {
+class V8_EXPORT HeapProfiler {
public:
/**
* Callback function invoked for obtaining RetainedObjectInfo for
@@ -533,7 +505,7 @@ class V8EXPORT HeapProfiler {
* keeps them alive only during snapshot collection. Afterwards, they
* are freed by calling the Dispose class function.
*/
-class V8EXPORT RetainedObjectInfo { // NOLINT
+class V8_EXPORT RetainedObjectInfo { // NOLINT
public:
/** Called by V8 when it no longer needs an instance. */
virtual void Dispose() = 0;
@@ -599,7 +571,4 @@ struct HeapStatsUpdate {
} // namespace v8
-#undef V8EXPORT
-
-
#endif // V8_V8_PROFILER_H_
diff --git a/chromium/v8/include/v8-testing.h b/chromium/v8/include/v8-testing.h
index 59eebf9db47..97b467a91b1 100644
--- a/chromium/v8/include/v8-testing.h
+++ b/chromium/v8/include/v8-testing.h
@@ -30,42 +30,12 @@
#include "v8.h"
-#ifdef _WIN32
-// Setup for Windows DLL export/import. See v8.h in this directory for
-// information on how to build/use V8 as a DLL.
-#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
-#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
- build configuration to ensure that at most one of these is set
-#endif
-
-#ifdef BUILDING_V8_SHARED
-#define V8EXPORT __declspec(dllexport)
-#elif USING_V8_SHARED
-#define V8EXPORT __declspec(dllimport)
-#else
-#define V8EXPORT
-#endif
-
-#else // _WIN32
-
-// Setup for Linux shared library export. See v8.h in this directory for
-// information on how to build/use V8 as shared library.
-#if defined(__GNUC__) && ((__GNUC__ >= 4) || \
- (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && defined(V8_SHARED)
-#define V8EXPORT __attribute__ ((visibility("default")))
-#else
-#define V8EXPORT
-#endif
-
-#endif // _WIN32
-
-
/**
* Testing support for the V8 JavaScript engine.
*/
namespace v8 {
-class V8EXPORT Testing {
+class V8_EXPORT Testing {
public:
enum StressType {
kStressTypeOpt,
@@ -99,7 +69,7 @@ class V8EXPORT Testing {
} // namespace v8
-#undef V8EXPORT
+#undef V8_EXPORT
#endif // V8_V8_TEST_H_
diff --git a/chromium/v8/include/v8.h b/chromium/v8/include/v8.h
index 3eb4794f5c6..de2733838ff 100644
--- a/chromium/v8/include/v8.h
+++ b/chromium/v8/include/v8.h
@@ -40,7 +40,10 @@
#include "v8stdint.h"
-#ifdef _WIN32
+// We reserve the V8_* prefix for macros defined in V8 public API and
+// assume there are no name conflicts with the embedder's code.
+
+#ifdef V8_OS_WIN
// Setup for Windows DLL export/import. When building the V8 DLL the
// BUILDING_V8_SHARED needs to be defined. When building a program which uses
@@ -53,59 +56,33 @@
#endif
#ifdef BUILDING_V8_SHARED
-#define V8EXPORT __declspec(dllexport)
+# define V8_EXPORT __declspec(dllexport)
#elif USING_V8_SHARED
-#define V8EXPORT __declspec(dllimport)
+# define V8_EXPORT __declspec(dllimport)
#else
-#define V8EXPORT
+# define V8_EXPORT
#endif // BUILDING_V8_SHARED
-#else // _WIN32
+#else // V8_OS_WIN
// Setup for Linux shared library export.
-#if defined(__GNUC__) && ((__GNUC__ >= 4) || \
- (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && defined(V8_SHARED)
-#ifdef BUILDING_V8_SHARED
-#define V8EXPORT __attribute__ ((visibility("default")))
-#else
-#define V8EXPORT
-#endif
-#else
-#define V8EXPORT
-#endif
-
-#endif // _WIN32
-
-#if defined(__GNUC__) && !defined(DEBUG)
-#define V8_INLINE(declarator) inline __attribute__((always_inline)) declarator
-#elif defined(_MSC_VER) && !defined(DEBUG)
-#define V8_INLINE(declarator) __forceinline declarator
+#if V8_HAS_ATTRIBUTE_VISIBILITY && defined(V8_SHARED)
+# ifdef BUILDING_V8_SHARED
+# define V8_EXPORT __attribute__ ((visibility("default")))
+# else
+# define V8_EXPORT
+# endif
#else
-#define V8_INLINE(declarator) inline declarator
+# define V8_EXPORT
#endif
-#if defined(__GNUC__) && !V8_DISABLE_DEPRECATIONS
-#define V8_DEPRECATED(declarator) declarator __attribute__ ((deprecated))
-#elif defined(_MSC_VER) && !V8_DISABLE_DEPRECATIONS
-#define V8_DEPRECATED(declarator) __declspec(deprecated) declarator
-#else
-#define V8_DEPRECATED(declarator) declarator
-#endif
-
-#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
- #define V8_UNLIKELY(condition) __builtin_expect((condition), 0)
- #define V8_LIKELY(condition) __builtin_expect((condition), 1)
-#else
- #define V8_UNLIKELY(condition) (condition)
- #define V8_LIKELY(condition) (condition)
-#endif
+#endif // V8_OS_WIN
/**
* The v8 JavaScript engine.
*/
namespace v8 {
-class AccessorInfo;
class AccessorSignature;
class Array;
class Boolean;
@@ -142,11 +119,14 @@ class Utils;
class Value;
template <class T> class Handle;
template <class T> class Local;
-template <class T> class Persistent;
+template <class T> class Eternal;
+template<class T> class NonCopyablePersistentTraits;
+template<class T,
+ class M = NonCopyablePersistentTraits<T> > class Persistent;
+template<class T, class P> class WeakCallbackObject;
class FunctionTemplate;
class ObjectTemplate;
class Data;
-class AccessorInfo;
template<typename T> class PropertyCallbackInfo;
class StackTrace;
class StackFrame;
@@ -165,6 +145,7 @@ class Object;
template<typename T> class CustomArguments;
class PropertyCallbackArguments;
class FunctionCallbackArguments;
+class GlobalHandles;
}
@@ -192,27 +173,6 @@ class UniqueId {
intptr_t data_;
};
-
-// --- Weak Handles ---
-
-
-/**
- * A weak reference callback function.
- *
- * This callback should either explicitly invoke Dispose on |object| if
- * V8 wrapper is not needed anymore, or 'revive' it by invocation of MakeWeak.
- *
- * \param object the weak global object to be reclaimed by the garbage collector
- * \param parameter the value passed in when making the weak global object
- */
-template<typename T, typename P>
-class WeakReferenceCallbacks {
- public:
- typedef void (*Revivable)(Isolate* isolate,
- Persistent<T>* object,
- P* parameter);
-};
-
// --- Handles ---
#define TYPE_CHECK(T, S) \
@@ -251,14 +211,7 @@ template <class T> class Handle {
/**
* Creates an empty handle.
*/
- V8_INLINE(Handle()) : val_(0) {}
-
-#ifdef V8_USE_UNSAFE_HANDLES
- /**
- * Creates a new handle for the specified value.
- */
- V8_INLINE(explicit Handle(T* val)) : val_(val) {}
-#endif
+ V8_INLINE Handle() : val_(0) {}
/**
* Creates a handle for the contents of the specified handle. This
@@ -270,7 +223,7 @@ template <class T> class Handle {
* Handle<String> to a variable declared as Handle<Value>, is legal
* because String is a subclass of Value.
*/
- template <class S> V8_INLINE(Handle(Handle<S> that))
+ template <class S> V8_INLINE Handle(Handle<S> that)
: val_(reinterpret_cast<T*>(*that)) {
/**
* This check fails when trying to convert between incompatible
@@ -283,16 +236,16 @@ template <class T> class Handle {
/**
* Returns true if the handle is empty.
*/
- V8_INLINE(bool IsEmpty() const) { return val_ == 0; }
+ V8_INLINE bool IsEmpty() const { return val_ == 0; }
/**
* Sets the handle to be empty. IsEmpty() will then return true.
*/
- V8_INLINE(void Clear()) { val_ = 0; }
+ V8_INLINE void Clear() { val_ = 0; }
- V8_INLINE(T* operator->() const) { return val_; }
+ V8_INLINE T* operator->() const { return val_; }
- V8_INLINE(T* operator*() const) { return val_; }
+ V8_INLINE T* operator*() const { return val_; }
/**
* Checks whether two handles are the same.
@@ -300,7 +253,7 @@ template <class T> class Handle {
* to which they refer are identical.
* The handles' references are not checked.
*/
- template <class S> V8_INLINE(bool operator==(const Handle<S> that) const) {
+ template <class S> V8_INLINE bool operator==(const Handle<S>& that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(**this);
internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
@@ -308,16 +261,14 @@ template <class T> class Handle {
return *a == *b;
}
-#ifndef V8_USE_UNSAFE_HANDLES
- template <class S> V8_INLINE(
- bool operator==(const Persistent<S>& that) const) {
+ template <class S> V8_INLINE bool operator==(
+ const Persistent<S>& that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(**this);
internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
if (b == 0) return false;
return *a == *b;
}
-#endif
/**
* Checks whether two handles are different.
@@ -325,11 +276,16 @@ template <class T> class Handle {
* the objects to which they refer are different.
* The handles' references are not checked.
*/
- template <class S> V8_INLINE(bool operator!=(Handle<S> that) const) {
+ template <class S> V8_INLINE bool operator!=(const Handle<S>& that) const {
+ return !operator==(that);
+ }
+
+ template <class S> V8_INLINE bool operator!=(
+ const Persistent<S>& that) const {
return !operator==(that);
}
- template <class S> V8_INLINE(static Handle<T> Cast(Handle<S> that)) {
+ template <class S> V8_INLINE static Handle<T> Cast(Handle<S> that) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
@@ -338,16 +294,14 @@ template <class T> class Handle {
return Handle<T>(T::Cast(*that));
}
- template <class S> V8_INLINE(Handle<S> As()) {
+ template <class S> V8_INLINE Handle<S> As() {
return Handle<S>::Cast(*this);
}
-#ifndef V8_USE_UNSAFE_HANDLES
- V8_INLINE(static Handle<T> New(Isolate* isolate, Handle<T> that)) {
+ V8_INLINE static Handle<T> New(Isolate* isolate, Handle<T> that) {
return New(isolate, that.val_);
}
- // TODO(dcarney): remove before cutover
- V8_INLINE(static Handle<T> New(Isolate* isolate, const Persistent<T>& that)) {
+ V8_INLINE static Handle<T> New(Isolate* isolate, const Persistent<T>& that) {
return New(isolate, that.val_);
}
@@ -358,18 +312,15 @@ template <class T> class Handle {
/**
* Creates a new handle for the specified value.
*/
- V8_INLINE(explicit Handle(T* val)) : val_(val) {}
-#endif
+ V8_INLINE explicit Handle(T* val) : val_(val) {}
private:
friend class Utils;
- template<class F> friend class Persistent;
+ template<class F, class M> friend class Persistent;
template<class F> friend class Local;
- friend class Arguments;
template<class F> friend class FunctionCallbackInfo;
template<class F> friend class PropertyCallbackInfo;
template<class F> friend class internal::CustomArguments;
- friend class AccessorInfo;
friend Handle<Primitive> Undefined(Isolate* isolate);
friend Handle<Primitive> Null(Isolate* isolate);
friend Handle<Boolean> True(Isolate* isolate);
@@ -377,9 +328,7 @@ template <class T> class Handle {
friend class Context;
friend class HandleScope;
-#ifndef V8_USE_UNSAFE_HANDLES
- V8_INLINE(static Handle<T> New(Isolate* isolate, T* that));
-#endif
+ V8_INLINE static Handle<T> New(Isolate* isolate, T* that);
T* val_;
};
@@ -392,11 +341,10 @@ template <class T> class Handle {
* handle scope are destroyed when the handle scope is destroyed. Hence it
* is not necessary to explicitly deallocate local handles.
*/
-// TODO(dcarney): deprecate entire class
template <class T> class Local : public Handle<T> {
public:
- V8_INLINE(Local());
- template <class S> V8_INLINE(Local(Local<S> that))
+ V8_INLINE Local();
+ template <class S> V8_INLINE Local(Local<S> that)
: Handle<T>(reinterpret_cast<T*>(*that)) {
/**
* This check fails when trying to convert between incompatible
@@ -407,11 +355,7 @@ template <class T> class Local : public Handle<T> {
}
-#ifdef V8_USE_UNSAFE_HANDLES
- template <class S> V8_INLINE(Local(S* that) : Handle<T>(that)) { }
-#endif
-
- template <class S> V8_INLINE(static Local<T> Cast(Local<S> that)) {
+ template <class S> V8_INLINE static Local<T> Cast(Local<S> that) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
@@ -419,14 +363,12 @@ template <class T> class Local : public Handle<T> {
#endif
return Local<T>(T::Cast(*that));
}
-#ifndef V8_USE_UNSAFE_HANDLES
- template <class S> V8_INLINE(Local(Handle<S> that))
+ template <class S> V8_INLINE Local(Handle<S> that)
: Handle<T>(reinterpret_cast<T*>(*that)) {
TYPE_CHECK(T, S);
}
-#endif
- template <class S> V8_INLINE(Local<S> As()) {
+ template <class S> V8_INLINE Local<S> As() {
return Local<S>::Cast(*this);
}
@@ -435,36 +377,108 @@ template <class T> class Local : public Handle<T> {
* The referee is kept alive by the local handle even when
* the original handle is destroyed/disposed.
*/
- V8_INLINE(static Local<T> New(Handle<T> that));
- V8_INLINE(static Local<T> New(Isolate* isolate, Handle<T> that));
-#ifndef V8_USE_UNSAFE_HANDLES
- // TODO(dcarney): remove before cutover
- V8_INLINE(static Local<T> New(Isolate* isolate, const Persistent<T>& that));
+ V8_INLINE static Local<T> New(Handle<T> that);
+ V8_INLINE static Local<T> New(Isolate* isolate, Handle<T> that);
+ template<class M>
+ V8_INLINE static Local<T> New(Isolate* isolate,
+ const Persistent<T, M>& that);
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
private:
#endif
- template <class S> V8_INLINE(Local(S* that) : Handle<T>(that)) { }
-#endif
+ template <class S> V8_INLINE Local(S* that) : Handle<T>(that) { }
private:
friend class Utils;
- template<class F> friend class Persistent;
+ template<class F> friend class Eternal;
+ template<class F, class M> friend class Persistent;
template<class F> friend class Handle;
- friend class Arguments;
template<class F> friend class FunctionCallbackInfo;
template<class F> friend class PropertyCallbackInfo;
friend class String;
friend class Object;
- friend class AccessorInfo;
friend class Context;
template<class F> friend class internal::CustomArguments;
friend class HandleScope;
- V8_INLINE(static Local<T> New(Isolate* isolate, T* that));
+ V8_INLINE static Local<T> New(Isolate* isolate, T* that);
+};
+
+
+// Eternal handles are set-once handles that live for the life of the isolate.
+template <class T> class Eternal {
+ public:
+ V8_INLINE Eternal() : index_(kInitialValue) { }
+ template<class S>
+ V8_INLINE Eternal(Isolate* isolate, Local<S> handle) : index_(kInitialValue) {
+ Set(isolate, handle);
+ }
+ // Can only be safely called if already set.
+ V8_INLINE Local<T> Get(Isolate* isolate);
+ V8_INLINE bool IsEmpty() { return index_ == kInitialValue; }
+ template<class S> V8_INLINE void Set(Isolate* isolate, Local<S> handle);
+
+ private:
+ static const int kInitialValue = -1;
+ int index_;
+};
+
+
+template<class T, class P>
+class WeakCallbackData {
+ public:
+ typedef void (*Callback)(const WeakCallbackData<T, P>& data);
+
+ V8_INLINE Isolate* GetIsolate() const { return isolate_; }
+ V8_INLINE Local<T> GetValue() const { return handle_; }
+ V8_INLINE P* GetParameter() const { return parameter_; }
+
+ private:
+ friend class internal::GlobalHandles;
+ WeakCallbackData(Isolate* isolate, Local<T> handle, P* parameter)
+ : isolate_(isolate), handle_(handle), parameter_(parameter) { }
+ Isolate* isolate_;
+ Local<T> handle_;
+ P* parameter_;
+};
+
+
+// TODO(dcarney): Remove this class.
+template<typename T,
+ typename P,
+ typename M = NonCopyablePersistentTraits<T> >
+class WeakReferenceCallbacks {
+ public:
+ typedef void (*Revivable)(Isolate* isolate,
+ Persistent<T, M>* object,
+ P* parameter);
+};
+
+
+/**
+ * Default traits for Persistent. This class does not allow
+ * use of the copy constructor or assignment operator.
+ * At present kResetInDestructor is not set, but that will change in a future
+ * version.
+ */
+template<class T>
+class NonCopyablePersistentTraits {
+ public:
+ typedef Persistent<T, NonCopyablePersistentTraits<T> > NonCopyablePersistent;
+ static const bool kResetInDestructor = false;
+ template<class S, class M>
+ V8_INLINE static void Copy(const Persistent<S, M>& source,
+ NonCopyablePersistent* dest) {
+ Uncompilable<Object>();
+ }
+ // TODO(dcarney): come up with a good compile error here.
+ template<class O> V8_INLINE static void Uncompilable() {
+ TYPE_CHECK(O, Primitive);
+ }
};
+
/**
* An object reference that is independent of any handle scope. Where
* a Local handle only lives as long as the HandleScope in which it was
@@ -474,108 +488,94 @@ template <class T> class Local : public Handle<T> {
* A persistent handle contains a reference to a storage cell within
* the v8 engine which holds an object value and which is updated by
* the garbage collector whenever the object is moved. A new storage
- * cell can be created using Persistent::New and existing handles can
- * be disposed using Persistent::Dispose. Since persistent handles
- * are passed by value you may have many persistent handle objects
- * that point to the same storage cell. For instance, if you pass a
- * persistent handle as an argument to a function you will not get two
- * different storage cells but rather two references to the same
- * storage cell.
+ * cell can be created using the constructor or Persistent::Reset and
+ * existing handles can be disposed using Persistent::Reset.
+ *
+ * Copy, assignment and destructor bevavior is controlled by the traits
+ * class M.
*/
-template <class T> class Persistent // NOLINT
-#ifdef V8_USE_UNSAFE_HANDLES
- : public Handle<T> {
-#else
- { // NOLINT
-#endif
+template <class T, class M> class Persistent {
public:
-#ifndef V8_USE_UNSAFE_HANDLES
- V8_INLINE(Persistent()) : val_(0) { }
- // TODO(dcarney): add this back before cutover.
-// V8_INLINE(~Persistent()) {
-// Dispose();
-// }
- V8_INLINE(bool IsEmpty() const) { return val_ == 0; }
- // TODO(dcarney): remove somehow before cutover
- // The handle should either be 0, or a pointer to a live cell.
- V8_INLINE(void Clear()) { val_ = 0; }
-
/**
- * A constructor that creates a new global cell pointing to that. In contrast
- * to the copy constructor, this creates a new persistent handle which needs
- * to be separately disposed.
+ * A Persistent with no storage cell.
*/
- template <class S> V8_INLINE(Persistent(Isolate* isolate, Handle<S> that))
- : val_(New(isolate, *that)) { }
-
- template <class S> V8_INLINE(Persistent(Isolate* isolate,
- const Persistent<S>& that)) // NOLINT
- : val_(New(isolate, *that)) { }
-
-#else
+ V8_INLINE Persistent() : val_(0) { }
/**
- * Creates an empty persistent handle that doesn't point to any
- * storage cell.
+ * Construct a Persistent from a Handle.
+ * When the Handle is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
*/
- V8_INLINE(Persistent()) : Handle<T>() { }
-
+ template <class S> V8_INLINE Persistent(Isolate* isolate, Handle<S> that)
+ : val_(New(isolate, *that)) {
+ TYPE_CHECK(T, S);
+ }
/**
- * Creates a persistent handle for the same storage cell as the
- * specified handle. This constructor allows you to pass persistent
- * handles as arguments by value and to assign between persistent
- * handles. However, attempting to assign between incompatible
- * persistent handles, for instance from a Persistent<String> to a
- * Persistent<Number> will cause a compile-time error. Assigning
- * between compatible persistent handles, for instance assigning a
- * Persistent<String> to a variable declared as Persistent<Value>,
- * is allowed as String is a subclass of Value.
+ * Construct a Persistent from a Persistent.
+ * When the Persistent is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
*/
- template <class S> V8_INLINE(Persistent(Persistent<S> that))
- : Handle<T>(reinterpret_cast<T*>(*that)) {
- /**
- * This check fails when trying to convert between incompatible
- * handles. For example, converting from a Handle<String> to a
- * Handle<Number>.
- */
+ template <class S, class M2>
+ V8_INLINE Persistent(Isolate* isolate, const Persistent<S, M2>& that)
+ : val_(New(isolate, *that)) {
TYPE_CHECK(T, S);
}
-
- template <class S> V8_INLINE(Persistent(S* that)) : Handle<T>(that) { }
-
/**
- * A constructor that creates a new global cell pointing to that. In contrast
- * to the copy constructor, this creates a new persistent handle which needs
- * to be separately disposed.
+ * The copy constructors and assignment operator create a Persistent
+ * exactly as the Persistent constructor, but the Copy function from the
+ * traits class is called, allowing the setting of flags based on the
+ * copied Persistent.
*/
- template <class S> V8_INLINE(Persistent(Isolate* isolate, Handle<S> that))
- : Handle<T>(New(isolate, that)) { }
-
+ V8_INLINE Persistent(const Persistent& that) : val_(0) {
+ Copy(that);
+ }
+ template <class S, class M2>
+ V8_INLINE Persistent(const Persistent<S, M2>& that) : val_(0) {
+ Copy(that);
+ }
+ V8_INLINE Persistent& operator=(const Persistent& that) { // NOLINT
+ Copy(that);
+ return *this;
+ }
+ template <class S, class M2>
+ V8_INLINE Persistent& operator=(const Persistent<S, M2>& that) { // NOLINT
+ Copy(that);
+ return *this;
+ }
/**
- * "Casts" a plain handle which is known to be a persistent handle
- * to a persistent handle.
+ * The destructor will dispose the Persistent based on the
+ * kResetInDestructor flags in the traits class. Since not calling dispose
+ * can result in a memory leak, it is recommended to always set this flag.
*/
- template <class S> explicit V8_INLINE(Persistent(Handle<S> that))
- : Handle<T>(*that) { }
-
-#endif
-
-#ifdef V8_USE_UNSAFE_HANDLES
- template <class S> V8_INLINE(static Persistent<T> Cast(Persistent<S> that)) {
-#ifdef V8_ENABLE_CHECKS
- // If we're going to perform the type check then we have to check
- // that the handle isn't empty before doing the checked cast.
- if (that.IsEmpty()) return Persistent<T>();
-#endif
- return Persistent<T>(T::Cast(*that));
+ V8_INLINE ~Persistent() {
+ if (M::kResetInDestructor) Reset();
}
- template <class S> V8_INLINE(Persistent<S> As()) {
- return Persistent<S>::Cast(*this);
- }
+ /**
+ * If non-empty, destroy the underlying storage cell
+ * IsEmpty() will return true after this call.
+ */
+ V8_INLINE void Reset();
+ /**
+ * If non-empty, destroy the underlying storage cell
+ * and create a new one with the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const Handle<S>& other);
+ /**
+ * If non-empty, destroy the underlying storage cell
+ * and create a new one with the contents of other if other is non empty
+ */
+ template <class S, class M2>
+ V8_INLINE void Reset(Isolate* isolate, const Persistent<S, M2>& other);
+ // TODO(dcarney): deprecate
+ V8_INLINE void Dispose() { Reset(); }
+ V8_DEPRECATED(V8_INLINE void Dispose(Isolate* isolate)) { Reset(); }
-#else
+ V8_INLINE bool IsEmpty() const { return val_ == 0; }
+
+ // TODO(dcarney): this is pretty useless, fix or remove
template <class S>
- V8_INLINE(static Persistent<T>& Cast(Persistent<S>& that)) { // NOLINT
+ V8_INLINE static Persistent<T>& Cast(Persistent<S>& that) { // NOLINT
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
@@ -584,20 +584,13 @@ template <class T> class Persistent // NOLINT
return reinterpret_cast<Persistent<T>&>(that);
}
- template <class S> V8_INLINE(Persistent<S>& As()) { // NOLINT
+ // TODO(dcarney): this is pretty useless, fix or remove
+ template <class S> V8_INLINE Persistent<S>& As() { // NOLINT
return Persistent<S>::Cast(*this);
}
-#endif
-
-#ifdef V8_USE_UNSAFE_HANDLES
- V8_DEPRECATED(static Persistent<T> New(Handle<T> that));
- V8_INLINE(static Persistent<T> New(Isolate* isolate, Handle<T> that));
- V8_INLINE(static Persistent<T> New(Isolate* isolate, Persistent<T> that));
-#endif
-#ifndef V8_USE_UNSAFE_HANDLES
- template <class S> V8_INLINE(
- bool operator==(const Persistent<S>& that) const) {
+ template <class S, class M2>
+ V8_INLINE bool operator==(const Persistent<S, M2>& that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(**this);
internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
@@ -605,58 +598,48 @@ template <class T> class Persistent // NOLINT
return *a == *b;
}
- template <class S> V8_INLINE(bool operator==(const Handle<S> that) const) {
+ template <class S> V8_INLINE bool operator==(const Handle<S>& that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(**this);
internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
if (b == 0) return false;
return *a == *b;
}
-#endif
- V8_INLINE(void Dispose());
-
- /**
- * Releases the storage cell referenced by this persistent handle.
- * Does not remove the reference to the cell from any handles.
- * This handle's reference, and any other references to the storage
- * cell remain and IsEmpty will still return false.
- */
- // TODO(dcarney): deprecate
- V8_INLINE(void Dispose(Isolate* isolate)) { Dispose(); }
+ template <class S, class M2>
+ V8_INLINE bool operator!=(const Persistent<S, M2>& that) const {
+ return !operator==(that);
+ }
- /**
- * Make the reference to this object weak. When only weak handles
- * refer to the object, the garbage collector will perform a
- * callback to the given V8::NearDeathCallback function, passing
- * it the object reference and the given parameters.
- */
- template<typename S, typename P>
- V8_INLINE(void MakeWeak(
- P* parameters,
- typename WeakReferenceCallbacks<S, P>::Revivable callback));
+ template <class S> V8_INLINE bool operator!=(const Handle<S>& that) const {
+ return !operator==(that);
+ }
template<typename P>
- V8_INLINE(void MakeWeak(
- P* parameters,
- typename WeakReferenceCallbacks<T, P>::Revivable callback));
+ V8_INLINE void SetWeak(
+ P* parameter,
+ typename WeakCallbackData<T, P>::Callback callback);
template<typename S, typename P>
- V8_DEPRECATED(void MakeWeak(
- Isolate* isolate,
- P* parameters,
- typename WeakReferenceCallbacks<S, P>::Revivable callback));
+ V8_INLINE void SetWeak(
+ P* parameter,
+ typename WeakCallbackData<S, P>::Callback callback);
+
+ // TODO(dcarney): deprecate
+ template<typename S, typename P>
+ V8_INLINE void MakeWeak(
+ P* parameter,
+ typename WeakReferenceCallbacks<S, P>::Revivable callback);
+ // TODO(dcarney): deprecate
template<typename P>
- V8_DEPRECATED(void MakeWeak(
- Isolate* isolate,
- P* parameters,
- typename WeakReferenceCallbacks<T, P>::Revivable callback));
+ V8_INLINE void MakeWeak(
+ P* parameter,
+ typename WeakReferenceCallbacks<T, P>::Revivable callback);
- V8_INLINE(void ClearWeak());
+ V8_INLINE void ClearWeak();
- // TODO(dcarney): deprecate
- V8_INLINE(void ClearWeak(Isolate* isolate)) { ClearWeak(); }
+ V8_DEPRECATED(V8_INLINE void ClearWeak(Isolate* isolate)) { ClearWeak(); }
/**
* Marks the reference to this object independent. Garbage collector is free
@@ -664,10 +647,11 @@ template <class T> class Persistent // NOLINT
* independent handle should not assume that it will be preceded by a global
* GC prologue callback or followed by a global GC epilogue callback.
*/
- V8_INLINE(void MarkIndependent());
+ V8_INLINE void MarkIndependent();
- // TODO(dcarney): deprecate
- V8_INLINE(void MarkIndependent(Isolate* isolate)) { MarkIndependent(); }
+ V8_DEPRECATED(V8_INLINE void MarkIndependent(Isolate* isolate)) {
+ MarkIndependent();
+ }
/**
* Marks the reference to this object partially dependent. Partially dependent
@@ -677,40 +661,40 @@ template <class T> class Persistent // NOLINT
* external dependencies. This mark is automatically cleared after each
* garbage collection.
*/
- V8_INLINE(void MarkPartiallyDependent());
+ V8_INLINE void MarkPartiallyDependent();
- // TODO(dcarney): deprecate
- V8_INLINE(void MarkPartiallyDependent(Isolate* isolate)) {
+ V8_DEPRECATED(V8_INLINE void MarkPartiallyDependent(Isolate* isolate)) {
MarkPartiallyDependent();
}
- V8_INLINE(bool IsIndependent() const);
+ V8_INLINE bool IsIndependent() const;
- // TODO(dcarney): deprecate
- V8_INLINE(bool IsIndependent(Isolate* isolate) const) {
+ V8_DEPRECATED(V8_INLINE bool IsIndependent(Isolate* isolate) const) {
return IsIndependent();
}
/** Checks if the handle holds the only reference to an object. */
- V8_INLINE(bool IsNearDeath() const);
+ V8_INLINE bool IsNearDeath() const;
- // TODO(dcarney): deprecate
- V8_INLINE(bool IsNearDeath(Isolate* isolate) const) { return IsNearDeath(); }
+ V8_DEPRECATED(V8_INLINE bool IsNearDeath(Isolate* isolate) const) {
+ return IsNearDeath();
+ }
/** Returns true if the handle's reference is weak. */
- V8_INLINE(bool IsWeak() const);
+ V8_INLINE bool IsWeak() const;
- // TODO(dcarney): deprecate
- V8_INLINE(bool IsWeak(Isolate* isolate) const) { return IsWeak(); }
+ V8_DEPRECATED(V8_INLINE bool IsWeak(Isolate* isolate) const) {
+ return IsWeak();
+ }
/**
* Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface
* description in v8-profiler.h for details.
*/
- V8_INLINE(void SetWrapperClassId(uint16_t class_id));
+ V8_INLINE void SetWrapperClassId(uint16_t class_id);
- // TODO(dcarney): deprecate
- V8_INLINE(void SetWrapperClassId(Isolate* isolate, uint16_t class_id)) {
+ V8_DEPRECATED(
+ V8_INLINE void SetWrapperClassId(Isolate * isolate, uint16_t class_id)) {
SetWrapperClassId(class_id);
}
@@ -718,75 +702,41 @@ template <class T> class Persistent // NOLINT
* Returns the class ID previously assigned to this handle or 0 if no class ID
* was previously assigned.
*/
- V8_INLINE(uint16_t WrapperClassId() const);
+ V8_INLINE uint16_t WrapperClassId() const;
- // TODO(dcarney): deprecate
- V8_INLINE(uint16_t WrapperClassId(Isolate* isolate) const) {
+ V8_DEPRECATED(V8_INLINE uint16_t WrapperClassId(Isolate* isolate) const) {
return WrapperClassId();
}
- /**
- * Disposes the current contents of the handle and replaces it.
- */
- V8_INLINE(void Reset(Isolate* isolate, const Handle<T>& other));
+ // TODO(dcarney): remove
+ V8_INLINE T* ClearAndLeak();
-#ifndef V8_USE_UNSAFE_HANDLES
- V8_INLINE(void Reset(Isolate* isolate, const Persistent<T>& other));
-#endif
-
- /**
- * Returns the underlying raw pointer and clears the handle. The caller is
- * responsible of eventually destroying the underlying object (by creating a
- * Persistent handle which points to it and Disposing it). In the future,
- * destructing a Persistent will also Dispose it. With this function, the
- * embedder can let the Persistent go out of scope without it getting
- * disposed.
- */
- V8_INLINE(T* ClearAndLeak());
+ // TODO(dcarney): remove
+ V8_INLINE void Clear() { val_ = 0; }
-#ifndef V8_USE_UNSAFE_HANDLES
-
- private:
- // TODO(dcarney): make unlinkable before cutover
- V8_INLINE(Persistent(const Persistent& that)) : val_(that.val_) {}
- // TODO(dcarney): make unlinkable before cutover
- V8_INLINE(Persistent& operator=(const Persistent& that)) { // NOLINT
- this->val_ = that.val_;
- return *this;
- }
-
- public:
+ // TODO(dcarney): remove
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
private:
#endif
- // TODO(dcarney): remove before cutover
- template <class S> V8_INLINE(Persistent(S* that)) : val_(that) { }
+ template <class S> V8_INLINE Persistent(S* that) : val_(that) { }
- // TODO(dcarney): remove before cutover
- V8_INLINE(T* operator*() const) { return val_; }
-
- private:
- // TODO(dcarney): remove before cutover
- V8_INLINE(T* operator->() const) { return val_; }
- public:
-#endif
+ V8_INLINE T* operator*() const { return val_; }
private:
friend class Utils;
template<class F> friend class Handle;
template<class F> friend class Local;
- template<class F> friend class Persistent;
+ template<class F1, class F2> friend class Persistent;
template<class F> friend class ReturnValue;
- V8_INLINE(static T* New(Isolate* isolate, T* that));
+ V8_INLINE static T* New(Isolate* isolate, T* that);
+ template<class S, class M2>
+ V8_INLINE void Copy(const Persistent<S, M2>& that);
-#ifndef V8_USE_UNSAFE_HANDLES
T* val_;
-#endif
};
-
/**
* A stack-allocated class that governs a number of local handles.
* After a handle scope has been created, all local handles will be
@@ -801,11 +751,8 @@ template <class T> class Persistent // NOLINT
* handle and may deallocate it. The behavior of accessing a handle
* for which the handle scope has been deleted is undefined.
*/
-class V8EXPORT HandleScope {
+class V8_EXPORT HandleScope {
public:
- // TODO(svenpanne) Deprecate me when Chrome is fixed!
- HandleScope();
-
HandleScope(Isolate* isolate);
~HandleScope();
@@ -840,12 +787,12 @@ class V8EXPORT HandleScope {
// This Data class is accessible internally as HandleScopeData through a
// typedef in the ImplementationUtilities class.
- class V8EXPORT Data {
+ class V8_EXPORT Data {
public:
internal::Object** next;
internal::Object** limit;
int level;
- V8_INLINE(void Initialize()) {
+ V8_INLINE void Initialize() {
next = limit = NULL;
level = 0;
}
@@ -867,13 +814,28 @@ class V8EXPORT HandleScope {
};
+/**
+ * A simple Maybe type, representing an object which may or may not have a
+ * value.
+ */
+template<class T>
+struct Maybe {
+ Maybe() : has_value(false) {}
+ explicit Maybe(T t) : has_value(true), value(t) {}
+ Maybe(bool has, T t) : has_value(has), value(t) {}
+
+ bool has_value;
+ T value;
+};
+
+
// --- Special objects ---
/**
* The superclass of values and API object templates.
*/
-class V8EXPORT Data {
+class V8_EXPORT Data {
private:
Data();
};
@@ -885,7 +847,7 @@ class V8EXPORT Data {
* compiling it, and can be stored between compilations. When script
* data is given to the compile method compilation will be faster.
*/
-class V8EXPORT ScriptData { // NOLINT
+class V8_EXPORT ScriptData { // NOLINT
public:
virtual ~ScriptData() { }
@@ -939,19 +901,19 @@ class V8EXPORT ScriptData { // NOLINT
*/
class ScriptOrigin {
public:
- V8_INLINE(ScriptOrigin(
+ V8_INLINE ScriptOrigin(
Handle<Value> resource_name,
Handle<Integer> resource_line_offset = Handle<Integer>(),
Handle<Integer> resource_column_offset = Handle<Integer>(),
- Handle<Boolean> resource_is_shared_cross_origin = Handle<Boolean>()))
+ Handle<Boolean> resource_is_shared_cross_origin = Handle<Boolean>())
: resource_name_(resource_name),
resource_line_offset_(resource_line_offset),
resource_column_offset_(resource_column_offset),
resource_is_shared_cross_origin_(resource_is_shared_cross_origin) { }
- V8_INLINE(Handle<Value> ResourceName() const);
- V8_INLINE(Handle<Integer> ResourceLineOffset() const);
- V8_INLINE(Handle<Integer> ResourceColumnOffset() const);
- V8_INLINE(Handle<Boolean> ResourceIsSharedCrossOrigin() const);
+ V8_INLINE Handle<Value> ResourceName() const;
+ V8_INLINE Handle<Integer> ResourceLineOffset() const;
+ V8_INLINE Handle<Integer> ResourceColumnOffset() const;
+ V8_INLINE Handle<Boolean> ResourceIsSharedCrossOrigin() const;
private:
Handle<Value> resource_name_;
Handle<Integer> resource_line_offset_;
@@ -963,7 +925,7 @@ class ScriptOrigin {
/**
* A compiled JavaScript script.
*/
-class V8EXPORT Script {
+class V8_EXPORT Script {
public:
/**
* Compiles the specified script (context-independent).
@@ -1081,7 +1043,7 @@ class V8EXPORT Script {
/**
* An error message.
*/
-class V8EXPORT Message {
+class V8_EXPORT Message {
public:
Local<String> Get() const;
Local<String> GetSourceLine() const;
@@ -1145,6 +1107,7 @@ class V8EXPORT Message {
static const int kNoLineNumberInfo = 0;
static const int kNoColumnInfo = 0;
+ static const int kNoScriptIdInfo = 0;
};
@@ -1153,7 +1116,7 @@ class V8EXPORT Message {
* snapshot of the execution stack and the information remains valid after
* execution continues.
*/
-class V8EXPORT StackTrace {
+class V8_EXPORT StackTrace {
public:
/**
* Flags that determine what information is placed captured for each
@@ -1167,6 +1130,7 @@ class V8EXPORT StackTrace {
kIsEval = 1 << 4,
kIsConstructor = 1 << 5,
kScriptNameOrSourceURL = 1 << 6,
+ kScriptId = 1 << 7,
kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName,
kDetailed = kOverview | kIsEval | kIsConstructor | kScriptNameOrSourceURL
};
@@ -1202,7 +1166,7 @@ class V8EXPORT StackTrace {
/**
* A single JavaScript stack frame.
*/
-class V8EXPORT StackFrame {
+class V8_EXPORT StackFrame {
public:
/**
* Returns the number, 1-based, of the line for the associate function call.
@@ -1222,6 +1186,14 @@ class V8EXPORT StackFrame {
int GetColumn() const;
/**
+ * Returns the id of the script for the function for this StackFrame.
+ * This method will return Message::kNoScriptIdInfo if it is unable to
+ * retrieve the script id, or if kScriptId was not passed as an option when
+ * capturing the StackTrace.
+ */
+ int GetScriptId() const;
+
+ /**
* Returns the name of the resource that contains the script for the
* function for this StackFrame.
*/
@@ -1254,25 +1226,41 @@ class V8EXPORT StackFrame {
};
+/**
+ * A JSON Parser.
+ */
+class V8_EXPORT JSON {
+ public:
+ /**
+ * Tries to parse the string |json_string| and returns it as value if
+ * successful.
+ *
+ * \param json_string The string to parse.
+ * \return The corresponding value if successfully parsed.
+ */
+ static Local<Value> Parse(Local<String> json_string);
+};
+
+
// --- Value ---
/**
* The superclass of all JavaScript values and objects.
*/
-class V8EXPORT Value : public Data {
+class V8_EXPORT Value : public Data {
public:
/**
* Returns true if this value is the undefined value. See ECMA-262
* 4.3.10.
*/
- V8_INLINE(bool IsUndefined() const);
+ V8_INLINE bool IsUndefined() const;
/**
* Returns true if this value is the null value. See ECMA-262
* 4.3.11.
*/
- V8_INLINE(bool IsNull() const);
+ V8_INLINE bool IsNull() const;
/**
* Returns true if this value is true.
@@ -1288,7 +1276,7 @@ class V8EXPORT Value : public Data {
* Returns true if this value is an instance of the String type.
* See ECMA-262 8.4.
*/
- V8_INLINE(bool IsString() const);
+ V8_INLINE bool IsString() const;
/**
* Returns true if this value is a symbol.
@@ -1476,12 +1464,12 @@ class V8EXPORT Value : public Data {
bool Equals(Handle<Value> that) const;
bool StrictEquals(Handle<Value> that) const;
- template <class T> V8_INLINE(static Value* Cast(T* value));
+ template <class T> V8_INLINE static Value* Cast(T* value);
private:
- V8_INLINE(bool QuickIsUndefined() const);
- V8_INLINE(bool QuickIsNull() const);
- V8_INLINE(bool QuickIsString() const);
+ V8_INLINE bool QuickIsUndefined() const;
+ V8_INLINE bool QuickIsNull() const;
+ V8_INLINE bool QuickIsString() const;
bool FullIsUndefined() const;
bool FullIsNull() const;
bool FullIsString() const;
@@ -1491,24 +1479,24 @@ class V8EXPORT Value : public Data {
/**
* The superclass of primitive values. See ECMA-262 4.3.2.
*/
-class V8EXPORT Primitive : public Value { };
+class V8_EXPORT Primitive : public Value { };
/**
* A primitive boolean value (ECMA-262, 4.3.14). Either the true
* or false value.
*/
-class V8EXPORT Boolean : public Primitive {
+class V8_EXPORT Boolean : public Primitive {
public:
bool Value() const;
- V8_INLINE(static Handle<Boolean> New(bool value));
+ V8_INLINE static Handle<Boolean> New(bool value);
};
/**
* A JavaScript string value (ECMA-262, 4.3.17).
*/
-class V8EXPORT String : public Primitive {
+class V8_EXPORT String : public Primitive {
public:
enum Encoding {
UNKNOWN_ENCODING = 0x1,
@@ -1530,7 +1518,7 @@ class V8EXPORT String : public Primitive {
/**
* This function is no longer useful.
*/
- V8_DEPRECATED(V8_INLINE(bool MayContainNonAscii()) const) { return true; }
+ V8_DEPRECATED(V8_INLINE bool MayContainNonAscii() const) { return true; }
/**
* Returns whether this string is known to contain only one byte data.
@@ -1602,7 +1590,7 @@ class V8EXPORT String : public Primitive {
* A zero length string.
*/
static v8::Local<v8::String> Empty();
- V8_INLINE(static v8::Local<v8::String> Empty(Isolate* isolate));
+ V8_INLINE static v8::Local<v8::String> Empty(Isolate* isolate);
/**
* Returns true if the string is external
@@ -1614,7 +1602,7 @@ class V8EXPORT String : public Primitive {
*/
bool IsExternalAscii() const;
- class V8EXPORT ExternalStringResourceBase { // NOLINT
+ class V8_EXPORT ExternalStringResourceBase { // NOLINT
public:
virtual ~ExternalStringResourceBase() {}
@@ -1643,7 +1631,7 @@ class V8EXPORT String : public Primitive {
* ExternalStringResource to manage the life cycle of the underlying
* buffer. Note that the string data must be immutable.
*/
- class V8EXPORT ExternalStringResource
+ class V8_EXPORT ExternalStringResource
: public ExternalStringResourceBase {
public:
/**
@@ -1677,7 +1665,7 @@ class V8EXPORT String : public Primitive {
* Use String::New or convert to 16 bit data for non-ASCII.
*/
- class V8EXPORT ExternalAsciiStringResource
+ class V8_EXPORT ExternalAsciiStringResource
: public ExternalStringResourceBase {
public:
/**
@@ -1700,14 +1688,14 @@ class V8EXPORT String : public Primitive {
* regardless of the encoding, otherwise return NULL. The encoding of the
* string is returned in encoding_out.
*/
- V8_INLINE(ExternalStringResourceBase* GetExternalStringResourceBase(
- Encoding* encoding_out) const);
+ V8_INLINE ExternalStringResourceBase* GetExternalStringResourceBase(
+ Encoding* encoding_out) const;
/**
* Get the ExternalStringResource for an external string. Returns
* NULL if IsExternal() doesn't return true.
*/
- V8_INLINE(ExternalStringResource* GetExternalStringResource() const);
+ V8_INLINE ExternalStringResource* GetExternalStringResource() const;
/**
* Get the ExternalAsciiStringResource for an external ASCII string.
@@ -1715,7 +1703,7 @@ class V8EXPORT String : public Primitive {
*/
const ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
- V8_INLINE(static String* Cast(v8::Value* obj));
+ V8_INLINE static String* Cast(v8::Value* obj);
// TODO(dcarney): deprecate
/**
@@ -1723,18 +1711,18 @@ class V8EXPORT String : public Primitive {
* The second parameter 'length' gives the buffer length. If omitted,
* the function calls 'strlen' to determine the buffer length.
*/
- V8_INLINE(static Local<String> New(const char* data, int length = -1));
+ V8_INLINE static Local<String> New(const char* data, int length = -1);
// TODO(dcarney): deprecate
/** Allocates a new string from 16-bit character codes.*/
- V8_INLINE(static Local<String> New(const uint16_t* data, int length = -1));
+ V8_INLINE static Local<String> New(const uint16_t* data, int length = -1);
// TODO(dcarney): deprecate
/**
* Creates an internalized string (historically called a "symbol",
* not to be confused with ES6 symbols). Returns one if it exists already.
*/
- V8_INLINE(static Local<String> NewSymbol(const char* data, int length = -1));
+ V8_INLINE static Local<String> NewSymbol(const char* data, int length = -1);
enum NewStringType {
kNormalString, kInternalizedString, kUndetectableString
@@ -1815,13 +1803,13 @@ class V8EXPORT String : public Primitive {
// TODO(dcarney): deprecate
/** Creates an undetectable string from the supplied ASCII or UTF-8 data.*/
- V8_INLINE(
- static Local<String> NewUndetectable(const char* data, int length = -1));
+ V8_INLINE static Local<String> NewUndetectable(const char* data,
+ int length = -1);
// TODO(dcarney): deprecate
/** Creates an undetectable string from the supplied 16-bit character codes.*/
- V8_INLINE(static Local<String> NewUndetectable(
- const uint16_t* data, int length = -1));
+ V8_INLINE static Local<String> NewUndetectable(const uint16_t* data,
+ int length = -1);
/**
* Converts an object to a UTF-8-encoded character array. Useful if
@@ -1830,7 +1818,7 @@ class V8EXPORT String : public Primitive {
* then the length() method returns 0 and the * operator returns
* NULL.
*/
- class V8EXPORT Utf8Value {
+ class V8_EXPORT Utf8Value {
public:
explicit Utf8Value(Handle<v8::Value> obj);
~Utf8Value();
@@ -1853,7 +1841,7 @@ class V8EXPORT String : public Primitive {
* method of the object) then the length() method returns 0 and the * operator
* returns NULL.
*/
- class V8EXPORT AsciiValue {
+ class V8_EXPORT AsciiValue {
public:
// TODO(dcarney): deprecate
explicit AsciiValue(Handle<v8::Value> obj);
@@ -1876,7 +1864,7 @@ class V8EXPORT String : public Primitive {
* method of the object) then the length() method returns 0 and the * operator
* returns NULL.
*/
- class V8EXPORT Value {
+ class V8_EXPORT Value {
public:
explicit Value(Handle<v8::Value> obj);
~Value();
@@ -1905,7 +1893,7 @@ class V8EXPORT String : public Primitive {
*
* This is an experimental feature. Use at your own risk.
*/
-class V8EXPORT Symbol : public Primitive {
+class V8_EXPORT Symbol : public Primitive {
public:
// Returns the print name string of the symbol, or undefined if none.
Local<Value> Name() const;
@@ -1916,7 +1904,7 @@ class V8EXPORT Symbol : public Primitive {
// Create a symbol with a print name.
static Local<Symbol> New(Isolate *isolate, const char* data, int length = -1);
- V8_INLINE(static Symbol* Cast(v8::Value* obj));
+ V8_INLINE static Symbol* Cast(v8::Value* obj);
private:
Symbol();
static void CheckCast(v8::Value* obj);
@@ -1926,12 +1914,12 @@ class V8EXPORT Symbol : public Primitive {
/**
* A JavaScript number value (ECMA-262, 4.3.20)
*/
-class V8EXPORT Number : public Primitive {
+class V8_EXPORT Number : public Primitive {
public:
double Value() const;
static Local<Number> New(double value);
static Local<Number> New(Isolate* isolate, double value);
- V8_INLINE(static Number* Cast(v8::Value* obj));
+ V8_INLINE static Number* Cast(v8::Value* obj);
private:
Number();
static void CheckCast(v8::Value* obj);
@@ -1941,14 +1929,14 @@ class V8EXPORT Number : public Primitive {
/**
* A JavaScript value representing a signed integer.
*/
-class V8EXPORT Integer : public Number {
+class V8_EXPORT Integer : public Number {
public:
static Local<Integer> New(int32_t value);
static Local<Integer> NewFromUnsigned(uint32_t value);
static Local<Integer> New(int32_t value, Isolate*);
static Local<Integer> NewFromUnsigned(uint32_t value, Isolate*);
int64_t Value() const;
- V8_INLINE(static Integer* Cast(v8::Value* obj));
+ V8_INLINE static Integer* Cast(v8::Value* obj);
private:
Integer();
static void CheckCast(v8::Value* obj);
@@ -1958,7 +1946,7 @@ class V8EXPORT Integer : public Number {
/**
* A JavaScript value representing a 32-bit signed integer.
*/
-class V8EXPORT Int32 : public Integer {
+class V8_EXPORT Int32 : public Integer {
public:
int32_t Value() const;
private:
@@ -1969,7 +1957,7 @@ class V8EXPORT Int32 : public Integer {
/**
* A JavaScript value representing a 32-bit unsigned integer.
*/
-class V8EXPORT Uint32 : public Integer {
+class V8_EXPORT Uint32 : public Integer {
public:
uint32_t Value() const;
private:
@@ -2001,16 +1989,11 @@ enum ExternalArrayType {
* setting|getting a particular property. See Object and ObjectTemplate's
* method SetAccessor.
*/
-typedef Handle<Value> (*AccessorGetter)(Local<String> property,
- const AccessorInfo& info);
typedef void (*AccessorGetterCallback)(
Local<String> property,
const PropertyCallbackInfo<Value>& info);
-typedef void (*AccessorSetter)(Local<String> property,
- Local<Value> value,
- const AccessorInfo& info);
typedef void (*AccessorSetterCallback)(
Local<String> property,
Local<Value> value,
@@ -2041,7 +2024,7 @@ enum AccessControl {
/**
* A JavaScript object (ECMA-262, 4.3.3)
*/
-class V8EXPORT Object : public Value {
+class V8_EXPORT Object : public Value {
public:
bool Set(Handle<Value> key,
Handle<Value> value,
@@ -2084,12 +2067,6 @@ class V8EXPORT Object : public Value {
bool Delete(uint32_t index);
- V8_DEPRECATED(bool SetAccessor(Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter = 0,
- Handle<Value> data = Handle<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None));
bool SetAccessor(Handle<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter = 0,
@@ -2098,10 +2075,10 @@ class V8EXPORT Object : public Value {
PropertyAttribute attribute = None);
// This function is not yet stable and should not be used at this time.
- bool SetAccessor(Handle<String> name,
- Handle<DeclaredAccessorDescriptor> descriptor,
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None);
+ bool SetDeclaredAccessor(Local<String> name,
+ Local<DeclaredAccessorDescriptor> descriptor,
+ PropertyAttribute attribute = None,
+ AccessControl settings = DEFAULT);
/**
* Returns an array containing the names of the enumerable properties
@@ -2160,7 +2137,7 @@ class V8EXPORT Object : public Value {
int InternalFieldCount();
/** Gets the value from an internal field. */
- V8_INLINE(Local<Value> GetInternalField(int index));
+ V8_INLINE Local<Value> GetInternalField(int index);
/** Sets the value in an internal field. */
void SetInternalField(int index, Handle<Value> value);
@@ -2170,7 +2147,7 @@ class V8EXPORT Object : public Value {
* must have been set by SetAlignedPointerInInternalField, everything else
* leads to undefined behavior.
*/
- V8_INLINE(void* GetAlignedPointerFromInternalField(int index));
+ V8_INLINE void* GetAlignedPointerFromInternalField(int index);
/**
* Sets a 2-byte-aligned native pointer in an internal field. To retrieve such
@@ -2300,7 +2277,7 @@ class V8EXPORT Object : public Value {
Local<Value> CallAsConstructor(int argc, Handle<Value> argv[]);
static Local<Object> New();
- V8_INLINE(static Object* Cast(Value* obj));
+ V8_INLINE static Object* Cast(Value* obj);
private:
Object();
@@ -2313,7 +2290,7 @@ class V8EXPORT Object : public Value {
/**
* An instance of the built-in array constructor (ECMA-262, 15.4.2).
*/
-class V8EXPORT Array : public Object {
+class V8_EXPORT Array : public Object {
public:
uint32_t Length() const;
@@ -2329,18 +2306,135 @@ class V8EXPORT Array : public Object {
*/
static Local<Array> New(int length = 0);
- V8_INLINE(static Array* Cast(Value* obj));
+ V8_INLINE static Array* Cast(Value* obj);
private:
Array();
static void CheckCast(Value* obj);
};
+template<typename T>
+class ReturnValue {
+ public:
+ template <class S> V8_INLINE ReturnValue(const ReturnValue<S>& that)
+ : value_(that.value_) {
+ TYPE_CHECK(T, S);
+ }
+ // Handle setters
+ template <typename S> V8_INLINE void Set(const Persistent<S>& handle);
+ template <typename S> V8_INLINE void Set(const Handle<S> handle);
+ // Fast primitive setters
+ V8_INLINE void Set(bool value);
+ V8_INLINE void Set(double i);
+ V8_INLINE void Set(int32_t i);
+ V8_INLINE void Set(uint32_t i);
+ // Fast JS primitive setters
+ V8_INLINE void SetNull();
+ V8_INLINE void SetUndefined();
+ V8_INLINE void SetEmptyString();
+ // Convenience getter for Isolate
+ V8_INLINE Isolate* GetIsolate();
+
+ private:
+ template<class F> friend class ReturnValue;
+ template<class F> friend class FunctionCallbackInfo;
+ template<class F> friend class PropertyCallbackInfo;
+ V8_INLINE internal::Object* GetDefaultValue();
+ V8_INLINE explicit ReturnValue(internal::Object** slot);
+ internal::Object** value_;
+};
+
+
+/**
+ * The argument information given to function call callbacks. This
+ * class provides access to information about the context of the call,
+ * including the receiver, the number and values of arguments, and
+ * the holder of the function.
+ */
+template<typename T>
+class FunctionCallbackInfo {
+ public:
+ V8_INLINE int Length() const;
+ V8_INLINE Local<Value> operator[](int i) const;
+ V8_INLINE Local<Function> Callee() const;
+ V8_INLINE Local<Object> This() const;
+ V8_INLINE Local<Object> Holder() const;
+ V8_INLINE bool IsConstructCall() const;
+ V8_INLINE Local<Value> Data() const;
+ V8_INLINE Isolate* GetIsolate() const;
+ V8_INLINE ReturnValue<T> GetReturnValue() const;
+ // This shouldn't be public, but the arm compiler needs it.
+ static const int kArgsLength = 6;
+
+ protected:
+ friend class internal::FunctionCallbackArguments;
+ friend class internal::CustomArguments<FunctionCallbackInfo>;
+ static const int kReturnValueIndex = 0;
+ static const int kReturnValueDefaultValueIndex = -1;
+ static const int kIsolateIndex = -2;
+ static const int kDataIndex = -3;
+ static const int kCalleeIndex = -4;
+ static const int kHolderIndex = -5;
+
+ V8_INLINE FunctionCallbackInfo(internal::Object** implicit_args,
+ internal::Object** values,
+ int length,
+ bool is_construct_call);
+ internal::Object** implicit_args_;
+ internal::Object** values_;
+ int length_;
+ bool is_construct_call_;
+};
+
+
+/**
+ * The information passed to a property callback about the context
+ * of the property access.
+ */
+template<typename T>
+class PropertyCallbackInfo {
+ public:
+ V8_INLINE Isolate* GetIsolate() const;
+ V8_INLINE Local<Value> Data() const;
+ V8_INLINE Local<Object> This() const;
+ V8_INLINE Local<Object> Holder() const;
+ V8_INLINE ReturnValue<T> GetReturnValue() const;
+ // This shouldn't be public, but the arm compiler needs it.
+ static const int kArgsLength = 6;
+
+ protected:
+ friend class MacroAssembler;
+ friend class internal::PropertyCallbackArguments;
+ friend class internal::CustomArguments<PropertyCallbackInfo>;
+ static const int kThisIndex = 0;
+ static const int kDataIndex = -1;
+ static const int kReturnValueIndex = -2;
+ static const int kReturnValueDefaultValueIndex = -3;
+ static const int kIsolateIndex = -4;
+ static const int kHolderIndex = -5;
+
+ V8_INLINE PropertyCallbackInfo(internal::Object** args) : args_(args) {}
+ internal::Object** args_;
+};
+
+
+typedef void (*FunctionCallback)(const FunctionCallbackInfo<Value>& info);
+
+
/**
* A JavaScript function object (ECMA-262, 15.3).
*/
-class V8EXPORT Function : public Object {
+class V8_EXPORT Function : public Object {
public:
+ /**
+ * Create a function in the current execution context
+ * for a given FunctionCallback.
+ */
+ static Local<Function> New(Isolate* isolate,
+ FunctionCallback callback,
+ Local<Value> data = Local<Value>(),
+ int length = 0);
+
Local<Object> NewInstance() const;
Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
@@ -2378,7 +2472,7 @@ class V8EXPORT Function : public Object {
int ScriptId() const;
ScriptOrigin GetScriptOrigin() const;
- V8_INLINE(static Function* Cast(Value* obj));
+ V8_INLINE static Function* Cast(Value* obj);
static const int kLineOffsetNotFound;
private:
@@ -2395,7 +2489,7 @@ class V8EXPORT Function : public Object {
* An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5).
* This API is experimental and may change significantly.
*/
-class V8EXPORT ArrayBuffer : public Object {
+class V8_EXPORT ArrayBuffer : public Object {
public:
/**
* Allocator that V8 uses to allocate |ArrayBuffer|'s memory.
@@ -2404,7 +2498,7 @@ class V8EXPORT ArrayBuffer : public Object {
*
* This API is experimental and may change significantly.
*/
- class V8EXPORT Allocator { // NOLINT
+ class V8_EXPORT Allocator { // NOLINT
public:
virtual ~Allocator() {}
@@ -2418,27 +2512,12 @@ class V8EXPORT ArrayBuffer : public Object {
* Allocate |length| bytes. Return NULL if allocation is not successful.
* Memory does not have to be initialized.
*/
- virtual void* AllocateUninitialized(size_t length) {
- // Override with call to |Allocate| for compatibility
- // with legacy version.
- return Allocate(length);
- }
-
+ virtual void* AllocateUninitialized(size_t length) = 0;
/**
* Free the memory block of size |length|, pointed to by |data|.
* That memory is guaranteed to be previously allocated by |Allocate|.
*/
- virtual void Free(void* data, size_t length) {
- // Override with call to |Free(void*)| for compatibility
- // with legacy version.
- Free(data);
- }
-
- /**
- * Deprecated. Never called directly by V8.
- * For compatibility with legacy version of this interface.
- */
- virtual void Free(void* data);
+ virtual void Free(void* data, size_t length) = 0;
};
/**
@@ -2451,7 +2530,7 @@ class V8EXPORT ArrayBuffer : public Object {
*
* This API is experimental and may change significantly.
*/
- class V8EXPORT Contents { // NOLINT
+ class V8_EXPORT Contents { // NOLINT
public:
Contents() : data_(NULL), byte_length_(0) {}
@@ -2512,7 +2591,7 @@ class V8EXPORT ArrayBuffer : public Object {
*/
Contents Externalize();
- V8_INLINE(static ArrayBuffer* Cast(Value* obj));
+ V8_INLINE static ArrayBuffer* Cast(Value* obj);
static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
@@ -2534,7 +2613,7 @@ class V8EXPORT ArrayBuffer : public Object {
*
* This API is experimental and may change significantly.
*/
-class V8EXPORT ArrayBufferView : public Object {
+class V8_EXPORT ArrayBufferView : public Object {
public:
/**
* Returns underlying ArrayBuffer.
@@ -2553,7 +2632,7 @@ class V8EXPORT ArrayBufferView : public Object {
*/
void* BaseAddress();
- V8_INLINE(static ArrayBufferView* Cast(Value* obj));
+ V8_INLINE static ArrayBufferView* Cast(Value* obj);
static const int kInternalFieldCount =
V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
@@ -2569,7 +2648,7 @@ class V8EXPORT ArrayBufferView : public Object {
* (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
*/
-class V8EXPORT TypedArray : public ArrayBufferView {
+class V8_EXPORT TypedArray : public ArrayBufferView {
public:
/**
* Number of elements in this typed array
@@ -2577,7 +2656,7 @@ class V8EXPORT TypedArray : public ArrayBufferView {
*/
size_t Length();
- V8_INLINE(static TypedArray* Cast(Value* obj));
+ V8_INLINE static TypedArray* Cast(Value* obj);
private:
TypedArray();
@@ -2589,11 +2668,11 @@ class V8EXPORT TypedArray : public ArrayBufferView {
* An instance of Uint8Array constructor (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
*/
-class V8EXPORT Uint8Array : public TypedArray {
+class V8_EXPORT Uint8Array : public TypedArray {
public:
static Local<Uint8Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE(static Uint8Array* Cast(Value* obj));
+ V8_INLINE static Uint8Array* Cast(Value* obj);
private:
Uint8Array();
@@ -2605,11 +2684,11 @@ class V8EXPORT Uint8Array : public TypedArray {
* An instance of Uint8ClampedArray constructor (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
*/
-class V8EXPORT Uint8ClampedArray : public TypedArray {
+class V8_EXPORT Uint8ClampedArray : public TypedArray {
public:
static Local<Uint8ClampedArray> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE(static Uint8ClampedArray* Cast(Value* obj));
+ V8_INLINE static Uint8ClampedArray* Cast(Value* obj);
private:
Uint8ClampedArray();
@@ -2620,11 +2699,11 @@ class V8EXPORT Uint8ClampedArray : public TypedArray {
* An instance of Int8Array constructor (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
*/
-class V8EXPORT Int8Array : public TypedArray {
+class V8_EXPORT Int8Array : public TypedArray {
public:
static Local<Int8Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE(static Int8Array* Cast(Value* obj));
+ V8_INLINE static Int8Array* Cast(Value* obj);
private:
Int8Array();
@@ -2636,11 +2715,11 @@ class V8EXPORT Int8Array : public TypedArray {
* An instance of Uint16Array constructor (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
*/
-class V8EXPORT Uint16Array : public TypedArray {
+class V8_EXPORT Uint16Array : public TypedArray {
public:
static Local<Uint16Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE(static Uint16Array* Cast(Value* obj));
+ V8_INLINE static Uint16Array* Cast(Value* obj);
private:
Uint16Array();
@@ -2652,11 +2731,11 @@ class V8EXPORT Uint16Array : public TypedArray {
* An instance of Int16Array constructor (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
*/
-class V8EXPORT Int16Array : public TypedArray {
+class V8_EXPORT Int16Array : public TypedArray {
public:
static Local<Int16Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE(static Int16Array* Cast(Value* obj));
+ V8_INLINE static Int16Array* Cast(Value* obj);
private:
Int16Array();
@@ -2668,11 +2747,11 @@ class V8EXPORT Int16Array : public TypedArray {
* An instance of Uint32Array constructor (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
*/
-class V8EXPORT Uint32Array : public TypedArray {
+class V8_EXPORT Uint32Array : public TypedArray {
public:
static Local<Uint32Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE(static Uint32Array* Cast(Value* obj));
+ V8_INLINE static Uint32Array* Cast(Value* obj);
private:
Uint32Array();
@@ -2684,11 +2763,11 @@ class V8EXPORT Uint32Array : public TypedArray {
* An instance of Int32Array constructor (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
*/
-class V8EXPORT Int32Array : public TypedArray {
+class V8_EXPORT Int32Array : public TypedArray {
public:
static Local<Int32Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE(static Int32Array* Cast(Value* obj));
+ V8_INLINE static Int32Array* Cast(Value* obj);
private:
Int32Array();
@@ -2700,11 +2779,11 @@ class V8EXPORT Int32Array : public TypedArray {
* An instance of Float32Array constructor (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
*/
-class V8EXPORT Float32Array : public TypedArray {
+class V8_EXPORT Float32Array : public TypedArray {
public:
static Local<Float32Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE(static Float32Array* Cast(Value* obj));
+ V8_INLINE static Float32Array* Cast(Value* obj);
private:
Float32Array();
@@ -2716,11 +2795,11 @@ class V8EXPORT Float32Array : public TypedArray {
* An instance of Float64Array constructor (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
*/
-class V8EXPORT Float64Array : public TypedArray {
+class V8_EXPORT Float64Array : public TypedArray {
public:
static Local<Float64Array> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE(static Float64Array* Cast(Value* obj));
+ V8_INLINE static Float64Array* Cast(Value* obj);
private:
Float64Array();
@@ -2732,11 +2811,11 @@ class V8EXPORT Float64Array : public TypedArray {
* An instance of DataView constructor (ES6 draft 15.13.7).
* This API is experimental and may change significantly.
*/
-class V8EXPORT DataView : public ArrayBufferView {
+class V8_EXPORT DataView : public ArrayBufferView {
public:
static Local<DataView> New(Handle<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
- V8_INLINE(static DataView* Cast(Value* obj));
+ V8_INLINE static DataView* Cast(Value* obj);
private:
DataView();
@@ -2747,7 +2826,7 @@ class V8EXPORT DataView : public ArrayBufferView {
/**
* An instance of the built-in Date constructor (ECMA-262, 15.9).
*/
-class V8EXPORT Date : public Object {
+class V8_EXPORT Date : public Object {
public:
static Local<Value> New(double time);
@@ -2761,7 +2840,7 @@ class V8EXPORT Date : public Object {
*/
double ValueOf() const;
- V8_INLINE(static Date* Cast(v8::Value* obj));
+ V8_INLINE static Date* Cast(v8::Value* obj);
/**
* Notification that the embedder has changed the time zone,
@@ -2785,7 +2864,7 @@ class V8EXPORT Date : public Object {
/**
* A Number object (ECMA-262, 4.3.21).
*/
-class V8EXPORT NumberObject : public Object {
+class V8_EXPORT NumberObject : public Object {
public:
static Local<Value> New(double value);
@@ -2798,7 +2877,7 @@ class V8EXPORT NumberObject : public Object {
*/
double ValueOf() const;
- V8_INLINE(static NumberObject* Cast(v8::Value* obj));
+ V8_INLINE static NumberObject* Cast(v8::Value* obj);
private:
static void CheckCast(v8::Value* obj);
@@ -2808,7 +2887,7 @@ class V8EXPORT NumberObject : public Object {
/**
* A Boolean object (ECMA-262, 4.3.15).
*/
-class V8EXPORT BooleanObject : public Object {
+class V8_EXPORT BooleanObject : public Object {
public:
static Local<Value> New(bool value);
@@ -2821,7 +2900,7 @@ class V8EXPORT BooleanObject : public Object {
*/
bool ValueOf() const;
- V8_INLINE(static BooleanObject* Cast(v8::Value* obj));
+ V8_INLINE static BooleanObject* Cast(v8::Value* obj);
private:
static void CheckCast(v8::Value* obj);
@@ -2831,7 +2910,7 @@ class V8EXPORT BooleanObject : public Object {
/**
* A String object (ECMA-262, 4.3.18).
*/
-class V8EXPORT StringObject : public Object {
+class V8_EXPORT StringObject : public Object {
public:
static Local<Value> New(Handle<String> value);
@@ -2844,7 +2923,7 @@ class V8EXPORT StringObject : public Object {
*/
Local<String> ValueOf() const;
- V8_INLINE(static StringObject* Cast(v8::Value* obj));
+ V8_INLINE static StringObject* Cast(v8::Value* obj);
private:
static void CheckCast(v8::Value* obj);
@@ -2856,7 +2935,7 @@ class V8EXPORT StringObject : public Object {
*
* This is an experimental feature. Use at your own risk.
*/
-class V8EXPORT SymbolObject : public Object {
+class V8_EXPORT SymbolObject : public Object {
public:
static Local<Value> New(Isolate* isolate, Handle<Symbol> value);
@@ -2869,7 +2948,7 @@ class V8EXPORT SymbolObject : public Object {
*/
Local<Symbol> ValueOf() const;
- V8_INLINE(static SymbolObject* Cast(v8::Value* obj));
+ V8_INLINE static SymbolObject* Cast(v8::Value* obj);
private:
static void CheckCast(v8::Value* obj);
@@ -2879,7 +2958,7 @@ class V8EXPORT SymbolObject : public Object {
/**
* An instance of the built-in RegExp constructor (ECMA-262, 15.10).
*/
-class V8EXPORT RegExp : public Object {
+class V8_EXPORT RegExp : public Object {
public:
/**
* Regular expression flag bits. They can be or'ed to enable a set
@@ -2915,7 +2994,7 @@ class V8EXPORT RegExp : public Object {
*/
Flags GetFlags() const;
- V8_INLINE(static RegExp* Cast(v8::Value* obj));
+ V8_INLINE static RegExp* Cast(v8::Value* obj);
private:
static void CheckCast(v8::Value* obj);
@@ -2926,10 +3005,10 @@ class V8EXPORT RegExp : public Object {
* A JavaScript value that wraps a C++ void*. This type of value is mainly used
* to associate C++ data structures with JavaScript objects.
*/
-class V8EXPORT External : public Value {
+class V8_EXPORT External : public Value {
public:
static Local<External> New(void* value);
- V8_INLINE(static External* Cast(Value* obj));
+ V8_INLINE static External* Cast(Value* obj);
void* Value() const;
private:
static void CheckCast(v8::Value* obj);
@@ -2942,152 +3021,77 @@ class V8EXPORT External : public Value {
/**
* The superclass of object and function templates.
*/
-class V8EXPORT Template : public Data {
+class V8_EXPORT Template : public Data {
public:
/** Adds a property to each instance created by this template.*/
void Set(Handle<String> name, Handle<Data> value,
PropertyAttribute attributes = None);
- V8_INLINE(void Set(const char* name, Handle<Data> value));
- private:
- Template();
-
- friend class ObjectTemplate;
- friend class FunctionTemplate;
-};
-
-
-template<typename T>
-class ReturnValue {
- public:
- template <class S> V8_INLINE(ReturnValue(const ReturnValue<S>& that))
- : value_(that.value_) {
- TYPE_CHECK(T, S);
- }
- // Handle setters
- template <typename S> V8_INLINE(void Set(const Persistent<S>& handle));
- template <typename S> V8_INLINE(void Set(const Handle<S> handle));
- // Fast primitive setters
- V8_INLINE(void Set(bool value));
- V8_INLINE(void Set(double i));
- V8_INLINE(void Set(int32_t i));
- V8_INLINE(void Set(uint32_t i));
- // Fast JS primitive setters
- V8_INLINE(void SetNull());
- V8_INLINE(void SetUndefined());
- V8_INLINE(void SetEmptyString());
- // Convenience getter for Isolate
- V8_INLINE(Isolate* GetIsolate());
-
- private:
- template<class F> friend class ReturnValue;
- template<class F> friend class FunctionCallbackInfo;
- template<class F> friend class PropertyCallbackInfo;
- V8_INLINE(internal::Object* GetDefaultValue());
- V8_INLINE(explicit ReturnValue(internal::Object** slot));
- internal::Object** value_;
-};
-
-
-/**
- * The argument information given to function call callbacks. This
- * class provides access to information about the context of the call,
- * including the receiver, the number and values of arguments, and
- * the holder of the function.
- */
-template<typename T>
-class FunctionCallbackInfo {
- public:
- V8_INLINE(int Length() const);
- V8_INLINE(Local<Value> operator[](int i) const);
- V8_INLINE(Local<Function> Callee() const);
- V8_INLINE(Local<Object> This() const);
- V8_INLINE(Local<Object> Holder() const);
- V8_INLINE(bool IsConstructCall() const);
- V8_INLINE(Local<Value> Data() const);
- V8_INLINE(Isolate* GetIsolate() const);
- V8_INLINE(ReturnValue<T> GetReturnValue() const);
- // This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 6;
+ V8_INLINE void Set(const char* name, Handle<Data> value);
- protected:
- friend class internal::FunctionCallbackArguments;
- friend class internal::CustomArguments<FunctionCallbackInfo>;
- static const int kReturnValueIndex = 0;
- static const int kReturnValueDefaultValueIndex = -1;
- static const int kIsolateIndex = -2;
- static const int kDataIndex = -3;
- static const int kCalleeIndex = -4;
- static const int kHolderIndex = -5;
+ void SetAccessorProperty(
+ Local<String> name,
+ Local<FunctionTemplate> getter = Local<FunctionTemplate>(),
+ Local<FunctionTemplate> setter = Local<FunctionTemplate>(),
+ PropertyAttribute attribute = None,
+ AccessControl settings = DEFAULT);
- V8_INLINE(FunctionCallbackInfo(internal::Object** implicit_args,
- internal::Object** values,
- int length,
- bool is_construct_call));
- internal::Object** implicit_args_;
- internal::Object** values_;
- int length_;
- bool is_construct_call_;
-};
+ /**
+ * Whenever the property with the given name is accessed on objects
+ * created from this Template the getter and setter callbacks
+ * are called instead of getting and setting the property directly
+ * on the JavaScript object.
+ *
+ * \param name The name of the property for which an accessor is added.
+ * \param getter The callback to invoke when getting the property.
+ * \param setter The callback to invoke when setting the property.
+ * \param data A piece of data that will be passed to the getter and setter
+ * callbacks whenever they are invoked.
+ * \param settings Access control settings for the accessor. This is a bit
+ * field consisting of one of more of
+ * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2.
+ * The default is to not allow cross-context access.
+ * ALL_CAN_READ means that all cross-context reads are allowed.
+ * ALL_CAN_WRITE means that all cross-context writes are allowed.
+ * The combination ALL_CAN_READ | ALL_CAN_WRITE can be used to allow all
+ * cross-context access.
+ * \param attribute The attributes of the property for which an accessor
+ * is added.
+ * \param signature The signature describes valid receivers for the accessor
+ * and is used to perform implicit instance checks against them. If the
+ * receiver is incompatible (i.e. is not an instance of the constructor as
+ * defined by FunctionTemplate::HasInstance()), an implicit TypeError is
+ * thrown and no callback is invoked.
+ */
+ void SetNativeDataProperty(Local<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter = 0,
+ // TODO(dcarney): gcc can't handle Local below
+ Handle<Value> data = Handle<Value>(),
+ PropertyAttribute attribute = None,
+ Local<AccessorSignature> signature =
+ Local<AccessorSignature>(),
+ AccessControl settings = DEFAULT);
+ // This function is not yet stable and should not be used at this time.
+ bool SetDeclaredAccessor(Local<String> name,
+ Local<DeclaredAccessorDescriptor> descriptor,
+ PropertyAttribute attribute = None,
+ Local<AccessorSignature> signature =
+ Local<AccessorSignature>(),
+ AccessControl settings = DEFAULT);
-class V8EXPORT Arguments : public FunctionCallbackInfo<Value> {
private:
- friend class internal::FunctionCallbackArguments;
- V8_INLINE(Arguments(internal::Object** implicit_args,
- internal::Object** values,
- int length,
- bool is_construct_call));
-};
-
-/**
- * The information passed to a property callback about the context
- * of the property access.
- */
-template<typename T>
-class PropertyCallbackInfo {
- public:
- V8_INLINE(Isolate* GetIsolate() const);
- V8_INLINE(Local<Value> Data() const);
- V8_INLINE(Local<Object> This() const);
- V8_INLINE(Local<Object> Holder() const);
- V8_INLINE(ReturnValue<T> GetReturnValue() const);
- // This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 6;
-
- protected:
- friend class MacroAssembler;
- friend class internal::PropertyCallbackArguments;
- friend class internal::CustomArguments<PropertyCallbackInfo>;
- static const int kThisIndex = 0;
- static const int kHolderIndex = -1;
- static const int kDataIndex = -2;
- static const int kReturnValueIndex = -3;
- static const int kReturnValueDefaultValueIndex = -4;
- static const int kIsolateIndex = -5;
-
- V8_INLINE(PropertyCallbackInfo(internal::Object** args))
- : args_(args) { }
- internal::Object** args_;
-};
-
+ Template();
-class V8EXPORT AccessorInfo : public PropertyCallbackInfo<Value> {
- private:
- friend class internal::PropertyCallbackArguments;
- V8_INLINE(AccessorInfo(internal::Object** args))
- : PropertyCallbackInfo<Value>(args) { }
+ friend class ObjectTemplate;
+ friend class FunctionTemplate;
};
-typedef Handle<Value> (*InvocationCallback)(const Arguments& args);
-typedef void (*FunctionCallback)(const FunctionCallbackInfo<Value>& info);
-
/**
* NamedProperty[Getter|Setter] are used as interceptors on object.
* See ObjectTemplate::SetNamedPropertyHandler.
*/
-typedef Handle<Value> (*NamedPropertyGetter)(Local<String> property,
- const AccessorInfo& info);
typedef void (*NamedPropertyGetterCallback)(
Local<String> property,
const PropertyCallbackInfo<Value>& info);
@@ -3097,9 +3101,6 @@ typedef void (*NamedPropertyGetterCallback)(
* Returns the value if the setter intercepts the request.
* Otherwise, returns an empty handle.
*/
-typedef Handle<Value> (*NamedPropertySetter)(Local<String> property,
- Local<Value> value,
- const AccessorInfo& info);
typedef void (*NamedPropertySetterCallback)(
Local<String> property,
Local<Value> value,
@@ -3111,8 +3112,6 @@ typedef void (*NamedPropertySetterCallback)(
* The result is an integer encoding property attributes (like v8::None,
* v8::DontEnum, etc.)
*/
-typedef Handle<Integer> (*NamedPropertyQuery)(Local<String> property,
- const AccessorInfo& info);
typedef void (*NamedPropertyQueryCallback)(
Local<String> property,
const PropertyCallbackInfo<Integer>& info);
@@ -3123,8 +3122,6 @@ typedef void (*NamedPropertyQueryCallback)(
* The return value is true if the property could be deleted and false
* otherwise.
*/
-typedef Handle<Boolean> (*NamedPropertyDeleter)(Local<String> property,
- const AccessorInfo& info);
typedef void (*NamedPropertyDeleterCallback)(
Local<String> property,
const PropertyCallbackInfo<Boolean>& info);
@@ -3134,7 +3131,6 @@ typedef void (*NamedPropertyDeleterCallback)(
* Returns an array containing the names of the properties the named
* property getter intercepts.
*/
-typedef Handle<Array> (*NamedPropertyEnumerator)(const AccessorInfo& info);
typedef void (*NamedPropertyEnumeratorCallback)(
const PropertyCallbackInfo<Array>& info);
@@ -3143,8 +3139,6 @@ typedef void (*NamedPropertyEnumeratorCallback)(
* Returns the value of the property if the getter intercepts the
* request. Otherwise, returns an empty handle.
*/
-typedef Handle<Value> (*IndexedPropertyGetter)(uint32_t index,
- const AccessorInfo& info);
typedef void (*IndexedPropertyGetterCallback)(
uint32_t index,
const PropertyCallbackInfo<Value>& info);
@@ -3154,9 +3148,6 @@ typedef void (*IndexedPropertyGetterCallback)(
* Returns the value if the setter intercepts the request.
* Otherwise, returns an empty handle.
*/
-typedef Handle<Value> (*IndexedPropertySetter)(uint32_t index,
- Local<Value> value,
- const AccessorInfo& info);
typedef void (*IndexedPropertySetterCallback)(
uint32_t index,
Local<Value> value,
@@ -3167,8 +3158,6 @@ typedef void (*IndexedPropertySetterCallback)(
* Returns a non-empty handle if the interceptor intercepts the request.
* The result is an integer encoding property attributes.
*/
-typedef Handle<Integer> (*IndexedPropertyQuery)(uint32_t index,
- const AccessorInfo& info);
typedef void (*IndexedPropertyQueryCallback)(
uint32_t index,
const PropertyCallbackInfo<Integer>& info);
@@ -3179,8 +3168,6 @@ typedef void (*IndexedPropertyQueryCallback)(
* The return value is true if the property could be deleted and false
* otherwise.
*/
-typedef Handle<Boolean> (*IndexedPropertyDeleter)(uint32_t index,
- const AccessorInfo& info);
typedef void (*IndexedPropertyDeleterCallback)(
uint32_t index,
const PropertyCallbackInfo<Boolean>& info);
@@ -3190,7 +3177,6 @@ typedef void (*IndexedPropertyDeleterCallback)(
* Returns an array containing the indices of the properties the
* indexed property getter intercepts.
*/
-typedef Handle<Array> (*IndexedPropertyEnumerator)(const AccessorInfo& info);
typedef void (*IndexedPropertyEnumeratorCallback)(
const PropertyCallbackInfo<Array>& info);
@@ -3319,14 +3305,9 @@ typedef bool (*IndexedSecurityCallback)(Local<Object> host,
* child_instance.instance_property == 3;
* \endcode
*/
-class V8EXPORT FunctionTemplate : public Template {
+class V8_EXPORT FunctionTemplate : public Template {
public:
/** Creates a function template.*/
- V8_DEPRECATED(static Local<FunctionTemplate> New(
- InvocationCallback callback,
- Handle<Value> data = Handle<Value>(),
- Handle<Signature> signature = Handle<Signature>(),
- int length = 0));
static Local<FunctionTemplate> New(
FunctionCallback callback = 0,
Handle<Value> data = Handle<Value>(),
@@ -3341,8 +3322,6 @@ class V8EXPORT FunctionTemplate : public Template {
* callback is called whenever the function created from this
* FunctionTemplate is called.
*/
- V8_DEPRECATED(void SetCallHandler(InvocationCallback callback,
- Handle<Value> data = Handle<Value>()));
void SetCallHandler(FunctionCallback callback,
Handle<Value> data = Handle<Value>());
@@ -3389,6 +3368,12 @@ class V8EXPORT FunctionTemplate : public Template {
void ReadOnlyPrototype();
/**
+ * Removes the prototype property from functions created from this
+ * FunctionTemplate.
+ */
+ void RemovePrototype();
+
+ /**
* Returns true if the given object is an instance of this function
* template.
*/
@@ -3396,9 +3381,6 @@ class V8EXPORT FunctionTemplate : public Template {
private:
FunctionTemplate();
- // TODO(dcarney): Remove with SetCallHandler.
- friend class v8::CallHandlerHelper;
- void SetCallHandlerInternal(InvocationCallback callback, Handle<Value> data);
friend class Context;
friend class ObjectTemplate;
};
@@ -3410,7 +3392,7 @@ class V8EXPORT FunctionTemplate : public Template {
* Properties added to an ObjectTemplate are added to each object
* created from the ObjectTemplate.
*/
-class V8EXPORT ObjectTemplate : public Template {
+class V8_EXPORT ObjectTemplate : public Template {
public:
/** Creates an ObjectTemplate. */
static Local<ObjectTemplate> New();
@@ -3447,14 +3429,6 @@ class V8EXPORT ObjectTemplate : public Template {
* defined by FunctionTemplate::HasInstance()), an implicit TypeError is
* thrown and no callback is invoked.
*/
- V8_DEPRECATED(void SetAccessor(Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter = 0,
- Handle<Value> data = Handle<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None,
- Handle<AccessorSignature> signature =
- Handle<AccessorSignature>()));
void SetAccessor(Handle<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter = 0,
@@ -3464,14 +3438,6 @@ class V8EXPORT ObjectTemplate : public Template {
Handle<AccessorSignature> signature =
Handle<AccessorSignature>());
- // This function is not yet stable and should not be used at this time.
- bool SetAccessor(Handle<String> name,
- Handle<DeclaredAccessorDescriptor> descriptor,
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None,
- Handle<AccessorSignature> signature =
- Handle<AccessorSignature>());
-
/**
* Sets a named property handler on the object template.
*
@@ -3489,13 +3455,6 @@ class V8EXPORT ObjectTemplate : public Template {
* \param data A piece of data that will be passed to the callbacks
* whenever they are invoked.
*/
- V8_DEPRECATED(void SetNamedPropertyHandler(
- NamedPropertyGetter getter,
- NamedPropertySetter setter = 0,
- NamedPropertyQuery query = 0,
- NamedPropertyDeleter deleter = 0,
- NamedPropertyEnumerator enumerator = 0,
- Handle<Value> data = Handle<Value>()));
void SetNamedPropertyHandler(
NamedPropertyGetterCallback getter,
NamedPropertySetterCallback setter = 0,
@@ -3520,13 +3479,6 @@ class V8EXPORT ObjectTemplate : public Template {
* \param data A piece of data that will be passed to the callbacks
* whenever they are invoked.
*/
- V8_DEPRECATED(void SetIndexedPropertyHandler(
- IndexedPropertyGetter getter,
- IndexedPropertySetter setter = 0,
- IndexedPropertyQuery query = 0,
- IndexedPropertyDeleter deleter = 0,
- IndexedPropertyEnumerator enumerator = 0,
- Handle<Value> data = Handle<Value>()));
void SetIndexedPropertyHandler(
IndexedPropertyGetterCallback getter,
IndexedPropertySetterCallback setter = 0,
@@ -3541,9 +3493,6 @@ class V8EXPORT ObjectTemplate : public Template {
* behave like normal JavaScript objects that cannot be called as a
* function.
*/
- V8_DEPRECATED(void SetCallAsFunctionHandler(
- InvocationCallback callback,
- Handle<Value> data = Handle<Value>()));
void SetCallAsFunctionHandler(FunctionCallback callback,
Handle<Value> data = Handle<Value>());
@@ -3596,7 +3545,7 @@ class V8EXPORT ObjectTemplate : public Template {
* A Signature specifies which receivers and arguments are valid
* parameters to a function.
*/
-class V8EXPORT Signature : public Data {
+class V8_EXPORT Signature : public Data {
public:
static Local<Signature> New(Handle<FunctionTemplate> receiver =
Handle<FunctionTemplate>(),
@@ -3611,7 +3560,7 @@ class V8EXPORT Signature : public Data {
* An AccessorSignature specifies which receivers are valid parameters
* to an accessor callback.
*/
-class V8EXPORT AccessorSignature : public Data {
+class V8_EXPORT AccessorSignature : public Data {
public:
static Local<AccessorSignature> New(Handle<FunctionTemplate> receiver =
Handle<FunctionTemplate>());
@@ -3620,13 +3569,13 @@ class V8EXPORT AccessorSignature : public Data {
};
-class V8EXPORT DeclaredAccessorDescriptor : public Data {
+class V8_EXPORT DeclaredAccessorDescriptor : public Data {
private:
DeclaredAccessorDescriptor();
};
-class V8EXPORT ObjectOperationDescriptor : public Data {
+class V8_EXPORT ObjectOperationDescriptor : public Data {
public:
// This function is not yet stable and should not be used at this time.
static Local<RawOperationDescriptor> NewInternalFieldDereference(
@@ -3646,7 +3595,7 @@ enum DeclaredAccessorDescriptorDataType {
};
-class V8EXPORT RawOperationDescriptor : public Data {
+class V8_EXPORT RawOperationDescriptor : public Data {
public:
Local<DeclaredAccessorDescriptor> NewHandleDereference(Isolate* isolate);
Local<RawOperationDescriptor> NewRawDereference(Isolate* isolate);
@@ -3679,7 +3628,7 @@ class V8EXPORT RawOperationDescriptor : public Data {
* A utility for determining the type of objects based on the template
* they were constructed from.
*/
-class V8EXPORT TypeSwitch : public Data {
+class V8_EXPORT TypeSwitch : public Data {
public:
static Local<TypeSwitch> New(Handle<FunctionTemplate> type);
static Local<TypeSwitch> New(int argc, Handle<FunctionTemplate> types[]);
@@ -3691,7 +3640,7 @@ class V8EXPORT TypeSwitch : public Data {
// --- Extensions ---
-class V8EXPORT ExternalAsciiStringResourceImpl
+class V8_EXPORT ExternalAsciiStringResourceImpl
: public String::ExternalAsciiStringResource {
public:
ExternalAsciiStringResourceImpl() : data_(0), length_(0) {}
@@ -3708,7 +3657,7 @@ class V8EXPORT ExternalAsciiStringResourceImpl
/**
* Ignore
*/
-class V8EXPORT Extension { // NOLINT
+class V8_EXPORT Extension { // NOLINT
public:
// Note that the strings passed into this constructor must live as long
// as the Extension itself.
@@ -3746,15 +3695,15 @@ class V8EXPORT Extension { // NOLINT
};
-void V8EXPORT RegisterExtension(Extension* extension);
+void V8_EXPORT RegisterExtension(Extension* extension);
/**
* Ignore
*/
-class V8EXPORT DeclareExtension {
+class V8_EXPORT DeclareExtension {
public:
- V8_INLINE(DeclareExtension(Extension* extension)) {
+ V8_INLINE DeclareExtension(Extension* extension) {
RegisterExtension(extension);
}
};
@@ -3763,15 +3712,15 @@ class V8EXPORT DeclareExtension {
// --- Statics ---
-Handle<Primitive> V8EXPORT Undefined();
-Handle<Primitive> V8EXPORT Null();
-Handle<Boolean> V8EXPORT True();
-Handle<Boolean> V8EXPORT False();
+Handle<Primitive> V8_EXPORT Undefined();
+Handle<Primitive> V8_EXPORT Null();
+Handle<Boolean> V8_EXPORT True();
+Handle<Boolean> V8_EXPORT False();
-V8_INLINE(Handle<Primitive> Undefined(Isolate* isolate));
-V8_INLINE(Handle<Primitive> Null(Isolate* isolate));
-V8_INLINE(Handle<Boolean> True(Isolate* isolate));
-V8_INLINE(Handle<Boolean> False(Isolate* isolate));
+V8_INLINE Handle<Primitive> Undefined(Isolate* isolate);
+V8_INLINE Handle<Primitive> Null(Isolate* isolate);
+V8_INLINE Handle<Boolean> True(Isolate* isolate);
+V8_INLINE Handle<Boolean> False(Isolate* isolate);
/**
@@ -3783,7 +3732,7 @@ V8_INLINE(Handle<Boolean> False(Isolate* isolate));
* setting the stack limit and you must set a non-default stack limit separately
* for each thread.
*/
-class V8EXPORT ResourceConstraints {
+class V8_EXPORT ResourceConstraints {
public:
ResourceConstraints();
int max_young_space_size() const { return max_young_space_size_; }
@@ -3795,15 +3744,24 @@ class V8EXPORT ResourceConstraints {
uint32_t* stack_limit() const { return stack_limit_; }
// Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
+ Maybe<bool> is_memory_constrained() const { return is_memory_constrained_; }
+ // If set to true, V8 will limit it's memory usage, at the potential cost of
+ // lower performance. Note, this option is a tentative addition to the API
+ // and may be removed or modified without warning.
+ void set_memory_constrained(bool value) {
+ is_memory_constrained_ = Maybe<bool>(value);
+ }
+
private:
int max_young_space_size_;
int max_old_space_size_;
int max_executable_size_;
uint32_t* stack_limit_;
+ Maybe<bool> is_memory_constrained_;
};
-bool V8EXPORT SetResourceConstraints(ResourceConstraints* constraints);
+bool V8_EXPORT SetResourceConstraints(ResourceConstraints* constraints);
// --- Exceptions ---
@@ -3821,13 +3779,13 @@ typedef void (*MessageCallback)(Handle<Message> message, Handle<Value> error);
* operation; the caller must return immediately and only after the exception
* has been handled does it become legal to invoke JavaScript operations.
*/
-Handle<Value> V8EXPORT ThrowException(Handle<Value> exception);
+Handle<Value> V8_EXPORT ThrowException(Handle<Value> exception);
/**
* Create new error objects by calling the corresponding error object
* constructor with the message.
*/
-class V8EXPORT Exception {
+class V8_EXPORT Exception {
public:
static Local<Value> RangeError(Handle<String> message);
static Local<Value> ReferenceError(Handle<String> message);
@@ -3921,7 +3879,7 @@ typedef void (*GCCallback)();
* Instances of this class can be passed to v8::V8::HeapStatistics to
* get heap statistics from V8.
*/
-class V8EXPORT HeapStatistics {
+class V8_EXPORT HeapStatistics {
public:
HeapStatistics();
size_t total_heap_size() { return total_heap_size_; }
@@ -3953,13 +3911,13 @@ class RetainedObjectInfo;
* threads. An isolate can be entered by at most one thread at any
* given time. The Locker/Unlocker API must be used to synchronize.
*/
-class V8EXPORT Isolate {
+class V8_EXPORT Isolate {
public:
/**
* Stack-allocated class which sets the isolate for all operations
* executed within a local scope.
*/
- class V8EXPORT Scope {
+ class V8_EXPORT Scope {
public:
explicit Scope(Isolate* isolate) : isolate_(isolate) {
isolate->Enter();
@@ -4020,13 +3978,13 @@ class V8EXPORT Isolate {
/**
* Associate embedder-specific data with the isolate
*/
- V8_INLINE(void SetData(void* data));
+ V8_INLINE void SetData(void* data);
/**
* Retrieve embedder-specific data from the isolate.
* Returns NULL if SetData has never been called.
*/
- V8_INLINE(void* GetData());
+ V8_INLINE void* GetData();
/**
* Get statistics about the heap memory usage.
@@ -4107,7 +4065,7 @@ class V8EXPORT Isolate {
};
-class V8EXPORT StartupData {
+class V8_EXPORT StartupData {
public:
enum CompressionAlgorithm {
kUncompressed,
@@ -4128,7 +4086,7 @@ class V8EXPORT StartupData {
*
* For an example of the class usage, see the "shell.cc" sample application.
*/
-class V8EXPORT StartupDataDecompressor { // NOLINT
+class V8_EXPORT StartupDataDecompressor { // NOLINT
public:
StartupDataDecompressor();
virtual ~StartupDataDecompressor();
@@ -4272,7 +4230,7 @@ typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
/**
* Interface for iterating through all external resources in the heap.
*/
-class V8EXPORT ExternalResourceVisitor { // NOLINT
+class V8_EXPORT ExternalResourceVisitor { // NOLINT
public:
virtual ~ExternalResourceVisitor() {}
virtual void VisitExternalString(Handle<String> string) {}
@@ -4282,7 +4240,7 @@ class V8EXPORT ExternalResourceVisitor { // NOLINT
/**
* Interface for iterating through all the persistent handles in the heap.
*/
-class V8EXPORT PersistentHandleVisitor { // NOLINT
+class V8_EXPORT PersistentHandleVisitor { // NOLINT
public:
virtual ~PersistentHandleVisitor() {}
virtual void VisitPersistentHandle(Persistent<Value>* value,
@@ -4295,10 +4253,10 @@ class V8EXPORT PersistentHandleVisitor { // NOLINT
* to be modified. Useful when otherwise unsafe handle operations need to
* be performed.
*/
-class V8EXPORT AssertNoGCScope {
+class V8_EXPORT AssertNoGCScope {
#ifndef DEBUG
// TODO(yangguo): remove isolate argument.
- V8_INLINE(AssertNoGCScope(Isolate* isolate)) { }
+ V8_INLINE AssertNoGCScope(Isolate* isolate) {}
#else
AssertNoGCScope(Isolate* isolate);
~AssertNoGCScope();
@@ -4311,7 +4269,7 @@ class V8EXPORT AssertNoGCScope {
/**
* Container class for static utility functions.
*/
-class V8EXPORT V8 {
+class V8_EXPORT V8 {
public:
/** Set the callback to invoke in case of fatal errors. */
static void SetFatalErrorHandler(FatalErrorCallback that);
@@ -4589,62 +4547,6 @@ class V8EXPORT V8 {
intptr_t change_in_bytes);
/**
- * Suspends recording of tick samples in the profiler.
- * When the V8 profiling mode is enabled (usually via command line
- * switches) this function suspends recording of tick samples.
- * Profiling ticks are discarded until ResumeProfiler() is called.
- *
- * See also the --prof and --prof_auto command line switches to
- * enable V8 profiling.
- */
- V8_DEPRECATED(static void PauseProfiler());
-
- /**
- * Resumes recording of tick samples in the profiler.
- * See also PauseProfiler().
- */
- V8_DEPRECATED(static void ResumeProfiler());
-
- /**
- * Return whether profiler is currently paused.
- */
- V8_DEPRECATED(static bool IsProfilerPaused());
-
- /**
- * Retrieve the V8 thread id of the calling thread.
- *
- * The thread id for a thread should only be retrieved after the V8
- * lock has been acquired with a Locker object with that thread.
- */
- static int GetCurrentThreadId();
-
- /**
- * Forcefully terminate execution of a JavaScript thread. This can
- * be used to terminate long-running scripts.
- *
- * TerminateExecution should only be called when then V8 lock has
- * been acquired with a Locker object. Therefore, in order to be
- * able to terminate long-running threads, preemption must be
- * enabled to allow the user of TerminateExecution to acquire the
- * lock.
- *
- * The termination is achieved by throwing an exception that is
- * uncatchable by JavaScript exception handlers. Termination
- * exceptions act as if they were caught by a C++ TryCatch exception
- * handler. If forceful termination is used, any C++ TryCatch
- * exception handler that catches an exception should check if that
- * exception is a termination exception and immediately return if
- * that is the case. Returning immediately in that case will
- * continue the propagation of the termination exception if needed.
- *
- * The thread id passed to TerminateExecution must have been
- * obtained by calling GetCurrentThreadId on the thread in question.
- *
- * \param thread_id The thread id of the thread to terminate.
- */
- static void TerminateExecution(int thread_id);
-
- /**
* Forcefully terminate the current thread of JavaScript execution
* in the given isolate. If no isolate is provided, the default
* isolate is used.
@@ -4762,16 +4664,24 @@ class V8EXPORT V8 {
static internal::Object** GlobalizeReference(internal::Isolate* isolate,
internal::Object** handle);
+ static internal::Object** CopyPersistent(internal::Object** handle);
static void DisposeGlobal(internal::Object** global_handle);
typedef WeakReferenceCallbacks<Value, void>::Revivable RevivableCallback;
+ typedef WeakCallbackData<Value, void>::Callback WeakCallback;
static void MakeWeak(internal::Object** global_handle,
void* data,
+ WeakCallback weak_callback,
RevivableCallback weak_reference_callback);
static void ClearWeak(internal::Object** global_handle);
+ static void Eternalize(Isolate* isolate,
+ Value* handle,
+ int* index);
+ static Local<Value> GetEternal(Isolate* isolate, int index);
template <class T> friend class Handle;
template <class T> friend class Local;
- template <class T> friend class Persistent;
+ template <class T> friend class Eternal;
+ template <class T, class M> friend class Persistent;
friend class Context;
};
@@ -4779,7 +4689,7 @@ class V8EXPORT V8 {
/**
* An external exception handler.
*/
-class V8EXPORT TryCatch {
+class V8_EXPORT TryCatch {
public:
/**
* Creates a new try/catch block and registers it with v8. Note that
@@ -4912,7 +4822,7 @@ class V8EXPORT TryCatch {
/**
* Ignore
*/
-class V8EXPORT ExtensionConfiguration {
+class V8_EXPORT ExtensionConfiguration {
public:
ExtensionConfiguration(int name_count, const char* names[])
: name_count_(name_count), names_(names) { }
@@ -4927,7 +4837,7 @@ class V8EXPORT ExtensionConfiguration {
* A sandboxed execution context with its own set of built-in objects
* and functions.
*/
-class V8EXPORT Context {
+class V8_EXPORT Context {
public:
/**
* Returns the global proxy object or global object itself for
@@ -5049,7 +4959,7 @@ class V8EXPORT Context {
* previous call to SetEmbedderData with the same index. Note that index 0
* currently has a special meaning for Chrome's debugger.
*/
- V8_INLINE(Local<Value> GetEmbedderData(int index));
+ V8_INLINE Local<Value> GetEmbedderData(int index);
/**
* Sets the embedder data with the given index, growing the data as
@@ -5064,7 +4974,7 @@ class V8EXPORT Context {
* SetAlignedPointerInEmbedderData with the same index. Note that index 0
* currently has a special meaning for Chrome's debugger.
*/
- V8_INLINE(void* GetAlignedPointerFromEmbedderData(int index));
+ V8_INLINE void* GetAlignedPointerFromEmbedderData(int index);
/**
* Sets a 2-byte-aligned native pointer in the embedder data with the given
@@ -5107,19 +5017,15 @@ class V8EXPORT Context {
*/
class Scope {
public:
- explicit V8_INLINE(Scope(Handle<Context> context)) : context_(context) {
+ explicit V8_INLINE Scope(Handle<Context> context) : context_(context) {
context_->Enter();
}
// TODO(dcarney): deprecate
- V8_INLINE(Scope(Isolate* isolate, Persistent<Context>& context)) // NOLINT
-#ifndef V8_USE_UNSAFE_HANDLES
+ V8_INLINE Scope(Isolate* isolate, Persistent<Context>& context) // NOLINT
: context_(Handle<Context>::New(isolate, context)) {
-#else
- : context_(Local<Context>::New(isolate, context)) {
-#endif
context_->Enter();
}
- V8_INLINE(~Scope()) { context_->Exit(); }
+ V8_INLINE ~Scope() { context_->Exit(); }
private:
Handle<Context> context_;
@@ -5212,12 +5118,12 @@ class V8EXPORT Context {
* // V8 Now no longer locked.
* \endcode
*/
-class V8EXPORT Unlocker {
+class V8_EXPORT Unlocker {
public:
/**
* Initialize Unlocker for a given Isolate.
*/
- V8_INLINE(explicit Unlocker(Isolate* isolate)) { Initialize(isolate); }
+ V8_INLINE explicit Unlocker(Isolate* isolate) { Initialize(isolate); }
/** Deprecated. Use Isolate version instead. */
V8_DEPRECATED(Unlocker());
@@ -5230,12 +5136,12 @@ class V8EXPORT Unlocker {
};
-class V8EXPORT Locker {
+class V8_EXPORT Locker {
public:
/**
* Initialize Locker for a given Isolate.
*/
- V8_INLINE(explicit Locker(Isolate* isolate)) { Initialize(isolate); }
+ V8_INLINE explicit Locker(Isolate* isolate) { Initialize(isolate); }
/** Deprecated. Use Isolate version instead. */
V8_DEPRECATED(Locker());
@@ -5291,7 +5197,7 @@ struct HeapStatsUpdate;
/**
* An interface for exporting data from V8, using "push" model.
*/
-class V8EXPORT OutputStream { // NOLINT
+class V8_EXPORT OutputStream { // NOLINT
public:
enum OutputEncoding {
kAscii = 0 // 7-bit ASCII.
@@ -5328,7 +5234,7 @@ class V8EXPORT OutputStream { // NOLINT
* An interface for reporting progress and controlling long-running
* activities.
*/
-class V8EXPORT ActivityControl { // NOLINT
+class V8_EXPORT ActivityControl { // NOLINT
public:
enum ControlOption {
kContinue = 0,
@@ -5364,7 +5270,7 @@ const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
template <size_t ptr_size> struct SmiTagging;
template<int kSmiShiftSize>
-V8_INLINE(internal::Object* IntToSmi(int value)) {
+V8_INLINE internal::Object* IntToSmi(int value) {
int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
intptr_t tagged_value =
(static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
@@ -5375,15 +5281,15 @@ V8_INLINE(internal::Object* IntToSmi(int value)) {
template <> struct SmiTagging<4> {
static const int kSmiShiftSize = 0;
static const int kSmiValueSize = 31;
- V8_INLINE(static int SmiToInt(internal::Object* value)) {
+ V8_INLINE static int SmiToInt(internal::Object* value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Throw away top 32 bits and shift down (requires >> to be sign extending).
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
}
- V8_INLINE(static internal::Object* IntToSmi(int value)) {
+ V8_INLINE static internal::Object* IntToSmi(int value) {
return internal::IntToSmi<kSmiShiftSize>(value);
}
- V8_INLINE(static bool IsValidSmi(intptr_t value)) {
+ V8_INLINE static bool IsValidSmi(intptr_t value) {
// To be representable as an tagged small integer, the two
// most-significant bits of 'value' must be either 00 or 11 due to
// sign-extension. To check this we add 01 to the two
@@ -5403,15 +5309,15 @@ template <> struct SmiTagging<4> {
template <> struct SmiTagging<8> {
static const int kSmiShiftSize = 31;
static const int kSmiValueSize = 32;
- V8_INLINE(static int SmiToInt(internal::Object* value)) {
+ V8_INLINE static int SmiToInt(internal::Object* value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down and throw away top 32 bits.
return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
}
- V8_INLINE(static internal::Object* IntToSmi(int value)) {
+ V8_INLINE static internal::Object* IntToSmi(int value) {
return internal::IntToSmi<kSmiShiftSize>(value);
}
- V8_INLINE(static bool IsValidSmi(intptr_t value)) {
+ V8_INLINE static bool IsValidSmi(intptr_t value) {
// To be representable as a long smi, the value must be a 32-bit integer.
return (value == static_cast<int32_t>(value));
}
@@ -5420,6 +5326,8 @@ template <> struct SmiTagging<8> {
typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
+V8_INLINE static bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
+V8_INLINE static bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
/**
* This class exports constants and functionality from within v8 that
@@ -5451,7 +5359,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 135;
+ static const int kEmptyStringRootIndex = 131;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@@ -5471,94 +5379,93 @@ class Internals {
static const int kNullOddballKind = 3;
static void CheckInitializedImpl(v8::Isolate* isolate);
- V8_INLINE(static void CheckInitialized(v8::Isolate* isolate)) {
+ V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
#ifdef V8_ENABLE_CHECKS
CheckInitializedImpl(isolate);
#endif
}
- V8_INLINE(static bool HasHeapObjectTag(internal::Object* value)) {
+ V8_INLINE static bool HasHeapObjectTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag);
}
- V8_INLINE(static int SmiValue(internal::Object* value)) {
+ V8_INLINE static int SmiValue(internal::Object* value) {
return PlatformSmiTagging::SmiToInt(value);
}
- V8_INLINE(static internal::Object* IntToSmi(int value)) {
+ V8_INLINE static internal::Object* IntToSmi(int value) {
return PlatformSmiTagging::IntToSmi(value);
}
- V8_INLINE(static bool IsValidSmi(intptr_t value)) {
+ V8_INLINE static bool IsValidSmi(intptr_t value) {
return PlatformSmiTagging::IsValidSmi(value);
}
- V8_INLINE(static int GetInstanceType(internal::Object* obj)) {
+ V8_INLINE static int GetInstanceType(internal::Object* obj) {
typedef internal::Object O;
O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
}
- V8_INLINE(static int GetOddballKind(internal::Object* obj)) {
+ V8_INLINE static int GetOddballKind(internal::Object* obj) {
typedef internal::Object O;
return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
}
- V8_INLINE(static bool IsExternalTwoByteString(int instance_type)) {
+ V8_INLINE static bool IsExternalTwoByteString(int instance_type) {
int representation = (instance_type & kFullStringRepresentationMask);
return representation == kExternalTwoByteRepresentationTag;
}
- V8_INLINE(static uint8_t GetNodeFlag(internal::Object** obj, int shift)) {
+ V8_INLINE static uint8_t GetNodeFlag(internal::Object** obj, int shift) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
- return *addr & (1 << shift);
+ return *addr & static_cast<uint8_t>(1U << shift);
}
- V8_INLINE(static void UpdateNodeFlag(internal::Object** obj,
- bool value, int shift)) {
+ V8_INLINE static void UpdateNodeFlag(internal::Object** obj,
+ bool value, int shift) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
- uint8_t mask = 1 << shift;
- *addr = (*addr & ~mask) | (value << shift);
+ uint8_t mask = static_cast<uint8_t>(1 << shift);
+ *addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
}
- V8_INLINE(static uint8_t GetNodeState(internal::Object** obj)) {
+ V8_INLINE static uint8_t GetNodeState(internal::Object** obj) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
return *addr & kNodeStateMask;
}
- V8_INLINE(static void UpdateNodeState(internal::Object** obj,
- uint8_t value)) {
+ V8_INLINE static void UpdateNodeState(internal::Object** obj,
+ uint8_t value) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
- *addr = (*addr & ~kNodeStateMask) | value;
+ *addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
}
- V8_INLINE(static void SetEmbedderData(v8::Isolate* isolate, void* data)) {
+ V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, void* data) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset;
*reinterpret_cast<void**>(addr) = data;
}
- V8_INLINE(static void* GetEmbedderData(v8::Isolate* isolate)) {
+ V8_INLINE static void* GetEmbedderData(v8::Isolate* isolate) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset;
return *reinterpret_cast<void**>(addr);
}
- V8_INLINE(static internal::Object** GetRoot(v8::Isolate* isolate,
- int index)) {
+ V8_INLINE static internal::Object** GetRoot(v8::Isolate* isolate,
+ int index) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
}
- template <typename T>
- V8_INLINE(static T ReadField(Object* ptr, int offset)) {
+ template <typename T> V8_INLINE static T ReadField(Object* ptr, int offset) {
uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
return *reinterpret_cast<T*>(addr);
}
template <typename T>
- V8_INLINE(static T ReadEmbedderData(Context* context, int index)) {
+ V8_INLINE static T ReadEmbedderData(Context* context, int index) {
typedef internal::Object O;
typedef internal::Internals I;
O* ctx = *reinterpret_cast<O**>(context);
@@ -5570,13 +5477,13 @@ class Internals {
return I::ReadField<T>(embedder_data, value_offset);
}
- V8_INLINE(static bool CanCastToHeapObject(void* o)) { return false; }
- V8_INLINE(static bool CanCastToHeapObject(Context* o)) { return true; }
- V8_INLINE(static bool CanCastToHeapObject(String* o)) { return true; }
- V8_INLINE(static bool CanCastToHeapObject(Object* o)) { return true; }
- V8_INLINE(static bool CanCastToHeapObject(Message* o)) { return true; }
- V8_INLINE(static bool CanCastToHeapObject(StackTrace* o)) { return true; }
- V8_INLINE(static bool CanCastToHeapObject(StackFrame* o)) { return true; }
+ V8_INLINE static bool CanCastToHeapObject(void* o) { return false; }
+ V8_INLINE static bool CanCastToHeapObject(Context* o) { return true; }
+ V8_INLINE static bool CanCastToHeapObject(String* o) { return true; }
+ V8_INLINE static bool CanCastToHeapObject(Object* o) { return true; }
+ V8_INLINE static bool CanCastToHeapObject(Message* o) { return true; }
+ V8_INLINE static bool CanCastToHeapObject(StackTrace* o) { return true; }
+ V8_INLINE static bool CanCastToHeapObject(StackFrame* o) { return true; }
};
} // namespace internal
@@ -5604,9 +5511,9 @@ Local<T> Local<T>::New(Isolate* isolate, Handle<T> that) {
return New(isolate, that.val_);
}
-#ifndef V8_USE_UNSAFE_HANDLES
template <class T>
-Local<T> Local<T>::New(Isolate* isolate, const Persistent<T>& that) {
+template <class M>
+Local<T> Local<T>::New(Isolate* isolate, const Persistent<T, M>& that) {
return New(isolate, that.val_);
}
@@ -5618,7 +5525,6 @@ Handle<T> Handle<T>::New(Isolate* isolate, T* that) {
return Handle<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
reinterpret_cast<internal::Isolate*>(isolate), *p)));
}
-#endif
template <class T>
@@ -5631,27 +5537,22 @@ Local<T> Local<T>::New(Isolate* isolate, T* that) {
}
-#ifdef V8_USE_UNSAFE_HANDLES
-template <class T>
-Persistent<T> Persistent<T>::New(Handle<T> that) {
- return New(Isolate::GetCurrent(), that.val_);
+template<class T>
+template<class S>
+void Eternal<T>::Set(Isolate* isolate, Local<S> handle) {
+ TYPE_CHECK(T, S);
+ V8::Eternalize(isolate, reinterpret_cast<Value*>(*handle), &this->index_);
}
-template <class T>
-Persistent<T> Persistent<T>::New(Isolate* isolate, Handle<T> that) {
- return New(Isolate::GetCurrent(), that.val_);
+template<class T>
+Local<T> Eternal<T>::Get(Isolate* isolate) {
+ return Local<T>(reinterpret_cast<T*>(*V8::GetEternal(isolate, index_)));
}
-template <class T>
-Persistent<T> Persistent<T>::New(Isolate* isolate, Persistent<T> that) {
- return New(Isolate::GetCurrent(), that.val_);
-}
-#endif
-
-template <class T>
-T* Persistent<T>::New(Isolate* isolate, T* that) {
+template <class T, class M>
+T* Persistent<T, M>::New(Isolate* isolate, T* that) {
if (that == NULL) return NULL;
internal::Object** p = reinterpret_cast<internal::Object**>(that);
return reinterpret_cast<T*>(
@@ -5660,8 +5561,20 @@ T* Persistent<T>::New(Isolate* isolate, T* that) {
}
-template <class T>
-bool Persistent<T>::IsIndependent() const {
+template <class T, class M>
+template <class S, class M2>
+void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
+ TYPE_CHECK(T, S);
+ Reset();
+ if (that.IsEmpty()) return;
+ internal::Object** p = reinterpret_cast<internal::Object**>(that.val_);
+ this->val_ = reinterpret_cast<T*>(V8::CopyPersistent(p));
+ M::Copy(that, this);
+}
+
+
+template <class T, class M>
+bool Persistent<T, M>::IsIndependent() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
return I::GetNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
@@ -5669,8 +5582,8 @@ bool Persistent<T>::IsIndependent() const {
}
-template <class T>
-bool Persistent<T>::IsNearDeath() const {
+template <class T, class M>
+bool Persistent<T, M>::IsNearDeath() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
uint8_t node_state =
@@ -5680,8 +5593,8 @@ bool Persistent<T>::IsNearDeath() const {
}
-template <class T>
-bool Persistent<T>::IsWeak() const {
+template <class T, class M>
+bool Persistent<T, M>::IsWeak() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
return I::GetNodeState(reinterpret_cast<internal::Object**>(this->val_)) ==
@@ -5689,66 +5602,89 @@ bool Persistent<T>::IsWeak() const {
}
-template <class T>
-void Persistent<T>::Dispose() {
+template <class T, class M>
+void Persistent<T, M>::Reset() {
if (this->IsEmpty()) return;
V8::DisposeGlobal(reinterpret_cast<internal::Object**>(this->val_));
-#ifndef V8_USE_UNSAFE_HANDLES
val_ = 0;
-#endif
}
-template <class T>
+template <class T, class M>
+template <class S>
+void Persistent<T, M>::Reset(Isolate* isolate, const Handle<S>& other) {
+ TYPE_CHECK(T, S);
+ Reset();
+ if (other.IsEmpty()) return;
+ this->val_ = New(isolate, other.val_);
+}
+
+
+template <class T, class M>
+template <class S, class M2>
+void Persistent<T, M>::Reset(Isolate* isolate,
+ const Persistent<S, M2>& other) {
+ TYPE_CHECK(T, S);
+ Reset();
+ if (other.IsEmpty()) return;
+ this->val_ = New(isolate, other.val_);
+}
+
+
+template <class T, class M>
template <typename S, typename P>
-void Persistent<T>::MakeWeak(
- P* parameters,
- typename WeakReferenceCallbacks<S, P>::Revivable callback) {
+void Persistent<T, M>::SetWeak(
+ P* parameter,
+ typename WeakCallbackData<S, P>::Callback callback) {
TYPE_CHECK(S, T);
- typedef typename WeakReferenceCallbacks<Value, void>::Revivable Revivable;
+ typedef typename WeakCallbackData<Value, void>::Callback Callback;
V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_),
- parameters,
- reinterpret_cast<Revivable>(callback));
+ parameter,
+ reinterpret_cast<Callback>(callback),
+ NULL);
}
-template <class T>
+template <class T, class M>
template <typename P>
-void Persistent<T>::MakeWeak(
- P* parameters,
- typename WeakReferenceCallbacks<T, P>::Revivable callback) {
- MakeWeak<T, P>(parameters, callback);
+void Persistent<T, M>::SetWeak(
+ P* parameter,
+ typename WeakCallbackData<T, P>::Callback callback) {
+ SetWeak<T, P>(parameter, callback);
}
-template <class T>
+template <class T, class M>
template <typename S, typename P>
-void Persistent<T>::MakeWeak(
- Isolate* isolate,
+void Persistent<T, M>::MakeWeak(
P* parameters,
typename WeakReferenceCallbacks<S, P>::Revivable callback) {
- MakeWeak<S, P>(parameters, callback);
+ TYPE_CHECK(S, T);
+ typedef typename WeakReferenceCallbacks<Value, void>::Revivable Revivable;
+ V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_),
+ parameters,
+ NULL,
+ reinterpret_cast<Revivable>(callback));
}
-template <class T>
-template<typename P>
-void Persistent<T>::MakeWeak(
- Isolate* isolate,
+template <class T, class M>
+template <typename P>
+void Persistent<T, M>::MakeWeak(
P* parameters,
typename WeakReferenceCallbacks<T, P>::Revivable callback) {
- MakeWeak<P>(parameters, callback);
+ MakeWeak<T, P>(parameters, callback);
}
-template <class T>
-void Persistent<T>::ClearWeak() {
+template <class T, class M>
+void Persistent<T, M>::ClearWeak() {
V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_));
}
-template <class T>
-void Persistent<T>::MarkIndependent() {
+template <class T, class M>
+void Persistent<T, M>::MarkIndependent() {
typedef internal::Internals I;
if (this->IsEmpty()) return;
I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
@@ -5757,8 +5693,8 @@ void Persistent<T>::MarkIndependent() {
}
-template <class T>
-void Persistent<T>::MarkPartiallyDependent() {
+template <class T, class M>
+void Persistent<T, M>::MarkPartiallyDependent() {
typedef internal::Internals I;
if (this->IsEmpty()) return;
I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
@@ -5767,54 +5703,17 @@ void Persistent<T>::MarkPartiallyDependent() {
}
-template <class T>
-void Persistent<T>::Reset(Isolate* isolate, const Handle<T>& other) {
- Dispose(isolate);
-#ifdef V8_USE_UNSAFE_HANDLES
- *this = *New(isolate, other);
-#else
- if (other.IsEmpty()) {
- this->val_ = NULL;
- return;
- }
- internal::Object** p = reinterpret_cast<internal::Object**>(other.val_);
- this->val_ = reinterpret_cast<T*>(
- V8::GlobalizeReference(reinterpret_cast<internal::Isolate*>(isolate), p));
-#endif
-}
-
-
-#ifndef V8_USE_UNSAFE_HANDLES
-template <class T>
-void Persistent<T>::Reset(Isolate* isolate, const Persistent<T>& other) {
- Dispose(isolate);
- if (other.IsEmpty()) {
- this->val_ = NULL;
- return;
- }
- internal::Object** p = reinterpret_cast<internal::Object**>(other.val_);
- this->val_ = reinterpret_cast<T*>(
- V8::GlobalizeReference(reinterpret_cast<internal::Isolate*>(isolate), p));
-}
-#endif
-
-
-template <class T>
-T* Persistent<T>::ClearAndLeak() {
+template <class T, class M>
+T* Persistent<T, M>::ClearAndLeak() {
T* old;
-#ifdef V8_USE_UNSAFE_HANDLES
- old = **this;
- *this = Persistent<T>();
-#else
old = val_;
val_ = NULL;
-#endif
return old;
}
-template <class T>
-void Persistent<T>::SetWrapperClassId(uint16_t class_id) {
+template <class T, class M>
+void Persistent<T, M>::SetWrapperClassId(uint16_t class_id) {
typedef internal::Internals I;
if (this->IsEmpty()) return;
internal::Object** obj = reinterpret_cast<internal::Object**>(this->val_);
@@ -5823,8 +5722,8 @@ void Persistent<T>::SetWrapperClassId(uint16_t class_id) {
}
-template <class T>
-uint16_t Persistent<T>::WrapperClassId() const {
+template <class T, class M>
+uint16_t Persistent<T, M>::WrapperClassId() const {
typedef internal::Internals I;
if (this->IsEmpty()) return 0;
internal::Object** obj = reinterpret_cast<internal::Object**>(this->val_);
@@ -5880,7 +5779,7 @@ void ReturnValue<T>::Set(uint32_t i) {
TYPE_CHECK(T, Integer);
typedef internal::Internals I;
// Can't simply use INT32_MAX here for whatever reason.
- bool fits_into_int32_t = (i & (1 << 31)) == 0;
+ bool fits_into_int32_t = (i & (1U << 31)) == 0;
if (V8_LIKELY(fits_into_int32_t)) {
Set(static_cast<int32_t>(i));
return;
@@ -5946,13 +5845,6 @@ FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Object** implicit_args,
is_construct_call_(is_construct_call) { }
-Arguments::Arguments(internal::Object** args,
- internal::Object** values,
- int length,
- bool is_construct_call)
- : FunctionCallbackInfo<Value>(args, values, length, is_construct_call) { }
-
-
template<typename T>
Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
@@ -6542,7 +6434,6 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) {
} // namespace v8
-#undef V8EXPORT
#undef TYPE_CHECK
diff --git a/chromium/v8/include/v8config.h b/chromium/v8/include/v8config.h
new file mode 100644
index 00000000000..6fe5c5aabc7
--- /dev/null
+++ b/chromium/v8/include/v8config.h
@@ -0,0 +1,451 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8CONFIG_H_
+#define V8CONFIG_H_
+
+// Platform headers for feature detection below.
+#if defined(__ANDROID__)
+# include <sys/cdefs.h>
+#elif defined(__APPLE__)
+# include <TargetConditionals.h>
+#elif defined(__linux__)
+# include <features.h>
+#endif
+
+
+// This macro allows to test for the version of the GNU C library (or
+// a compatible C library that masquerades as glibc). It evaluates to
+// 0 if libc is not GNU libc or compatible.
+// Use like:
+// #if V8_GLIBC_PREREQ(2, 3)
+// ...
+// #endif
+#if defined(__GLIBC__) && defined(__GLIBC_MINOR__)
+# define V8_GLIBC_PREREQ(major, minor) \
+ ((__GLIBC__ * 100 + __GLIBC_MINOR__) >= ((major) * 100 + (minor)))
+#else
+# define V8_GLIBC_PREREQ(major, minor) 0
+#endif
+
+
+// This macro allows to test for the version of the GNU C++ compiler.
+// Note that this also applies to compilers that masquerade as GCC,
+// for example clang and the Intel C++ compiler for Linux.
+// Use like:
+// #if V8_GNUC_PREREQ(4, 3, 1)
+// ...
+// #endif
+#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+# define V8_GNUC_PREREQ(major, minor, patchlevel) \
+ ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= \
+ ((major) * 10000 + (minor) * 100 + (patchlevel)))
+#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
+# define V8_GNUC_PREREQ(major, minor, patchlevel) \
+ ((__GNUC__ * 10000 + __GNUC_MINOR__) >= \
+ ((major) * 10000 + (minor) * 100 + (patchlevel)))
+#else
+# define V8_GNUC_PREREQ(major, minor, patchlevel) 0
+#endif
+
+
+
+// -----------------------------------------------------------------------------
+// Operating system detection
+//
+// V8_OS_ANDROID - Android
+// V8_OS_BSD - BSDish (Mac OS X, Net/Free/Open/DragonFlyBSD)
+// V8_OS_CYGWIN - Cygwin
+// V8_OS_DRAGONFLYBSD - DragonFlyBSD
+// V8_OS_FREEBSD - FreeBSD
+// V8_OS_LINUX - Linux
+// V8_OS_MACOSX - Mac OS X
+// V8_OS_NACL - Native Client
+// V8_OS_NETBSD - NetBSD
+// V8_OS_OPENBSD - OpenBSD
+// V8_OS_POSIX - POSIX compatible (mostly everything except Windows)
+// V8_OS_SOLARIS - Sun Solaris and OpenSolaris
+// V8_OS_WIN - Microsoft Windows
+
+#if defined(__ANDROID__)
+# define V8_OS_ANDROID 1
+# define V8_OS_LINUX 1
+# define V8_OS_POSIX 1
+#elif defined(__APPLE__)
+# define V8_OS_BSD 1
+# define V8_OS_MACOSX 1
+# define V8_OS_POSIX 1
+#elif defined(__native_client__)
+# define V8_OS_NACL 1
+# define V8_OS_POSIX 1
+#elif defined(__CYGWIN__)
+# define V8_OS_CYGWIN 1
+# define V8_OS_POSIX 1
+#elif defined(__linux__)
+# define V8_OS_LINUX 1
+# define V8_OS_POSIX 1
+#elif defined(__sun)
+# define V8_OS_POSIX 1
+# define V8_OS_SOLARIS 1
+#elif defined(__FreeBSD__)
+# define V8_OS_BSD 1
+# define V8_OS_FREEBSD 1
+# define V8_OS_POSIX 1
+#elif defined(__DragonFly__)
+# define V8_OS_BSD 1
+# define V8_OS_DRAGONFLYBSD 1
+# define V8_OS_POSIX 1
+#elif defined(__NetBSD__)
+# define V8_OS_BSD 1
+# define V8_OS_NETBSD 1
+# define V8_OS_POSIX 1
+#elif defined(__OpenBSD__)
+# define V8_OS_BSD 1
+# define V8_OS_OPENBSD 1
+# define V8_OS_POSIX 1
+#elif defined(_WIN32)
+# define V8_OS_WIN 1
+#endif
+
+
+// -----------------------------------------------------------------------------
+// C library detection
+//
+// V8_LIBC_BIONIC - Bionic libc
+// V8_LIBC_BSD - BSD libc derivate
+// V8_LIBC_GLIBC - GNU C library
+// V8_LIBC_UCLIBC - uClibc
+//
+// Note that testing for libc must be done using #if not #ifdef. For example,
+// to test for the GNU C library, use:
+// #if V8_LIBC_GLIBC
+// ...
+// #endif
+
+#if defined(__BIONIC__)
+# define V8_LIBC_BIONIC 1
+# define V8_LIBC_BSD 1
+#elif defined(__UCLIBC__)
+# define V8_LIBC_UCLIBC 1
+#elif defined(__GLIBC__) || defined(__GNU_LIBRARY__)
+# define V8_LIBC_GLIBC 1
+#else
+# define V8_LIBC_BSD V8_OS_BSD
+#endif
+
+
+// -----------------------------------------------------------------------------
+// Compiler detection
+//
+// V8_CC_CLANG - Clang
+// V8_CC_GNU - GNU C++
+// V8_CC_INTEL - Intel C++
+// V8_CC_MINGW - Minimalist GNU for Windows
+// V8_CC_MINGW32 - Minimalist GNU for Windows (mingw32)
+// V8_CC_MINGW64 - Minimalist GNU for Windows (mingw-w64)
+// V8_CC_MSVC - Microsoft Visual C/C++
+//
+// C++11 feature detection
+//
+// V8_HAS_CXX11_ALIGNAS - alignas specifier supported
+// V8_HAS_CXX11_ALIGNOF - alignof(type) operator supported
+// V8_HAS_CXX11_STATIC_ASSERT - static_assert() supported
+// V8_HAS_CXX11_DELETE - deleted functions supported
+// V8_HAS_CXX11_FINAL - final marker supported
+// V8_HAS_CXX11_OVERRIDE - override marker supported
+//
+// Compiler-specific feature detection
+//
+// V8_HAS___ALIGNOF - __alignof(type) operator supported
+// V8_HAS___ALIGNOF__ - __alignof__(type) operator supported
+// V8_HAS_ATTRIBUTE_ALIGNED - __attribute__((aligned(n))) supported
+// V8_HAS_ATTRIBUTE_ALWAYS_INLINE - __attribute__((always_inline))
+// supported
+// V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported
+// V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported
+// V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported
+// V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
+// supported
+// V8_HAS_BUILTIN_EXPECT - __builtin_expect() supported
+// V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported
+// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
+// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
+// V8_HAS___FINAL - __final supported in non-C++11 mode
+// V8_HAS___FORCEINLINE - __forceinline supported
+// V8_HAS_SEALED - MSVC style sealed marker supported
+//
+// Note that testing for compilers and/or features must be done using #if
+// not #ifdef. For example, to test for Intel C++ Compiler, use:
+// #if V8_CC_INTEL
+// ...
+// #endif
+
+#if defined(__clang__)
+
+# define V8_CC_CLANG 1
+
+// Clang defines __alignof__ as alias for __alignof
+# define V8_HAS___ALIGNOF 1
+# define V8_HAS___ALIGNOF__ V8_HAS___ALIGNOF
+
+# define V8_HAS_ATTRIBUTE_ALIGNED (__has_attribute(aligned))
+# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
+# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
+# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
+# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
+# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
+ (__has_attribute(warn_unused_result))
+
+# define V8_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
+
+# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
+# define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
+# define V8_HAS_CXX11_DELETE (__has_feature(cxx_deleted_functions))
+# define V8_HAS_CXX11_FINAL (__has_feature(cxx_override_control))
+# define V8_HAS_CXX11_OVERRIDE (__has_feature(cxx_override_control))
+
+#elif defined(__GNUC__)
+
+# define V8_CC_GNU 1
+// Intel C++ also masquerades as GCC 3.2.0
+# define V8_CC_INTEL (defined(__INTEL_COMPILER))
+# define V8_CC_MINGW32 (defined(__MINGW32__))
+# define V8_CC_MINGW64 (defined(__MINGW64__))
+# define V8_CC_MINGW (V8_CC_MINGW32 || V8_CC_MINGW64)
+
+# define V8_HAS___ALIGNOF__ (V8_GNUC_PREREQ(4, 3, 0))
+
+# define V8_HAS_ATTRIBUTE_ALIGNED (V8_GNUC_PREREQ(2, 95, 0))
+// always_inline is available in gcc 4.0 but not very reliable until 4.4.
+// Works around "sorry, unimplemented: inlining failed" build errors with
+// older compilers.
+# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (V8_GNUC_PREREQ(4, 4, 0))
+# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
+# define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
+# define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
+# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
+ (!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0))
+
+# define V8_HAS_BUILTIN_EXPECT (V8_GNUC_PREREQ(2, 96, 0))
+
+// g++ requires -std=c++0x or -std=gnu++0x to support C++11 functionality
+// without warnings (functionality used by the macros below). These modes
+// are detectable by checking whether __GXX_EXPERIMENTAL_CXX0X__ is defined or,
+// more standardly, by checking whether __cplusplus has a C++11 or greater
+// value. Current versions of g++ do not correctly set __cplusplus, so we check
+// both for forward compatibility.
+# if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
+# define V8_HAS_CXX11_ALIGNAS (V8_GNUC_PREREQ(4, 8, 0))
+# define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0))
+# define V8_HAS_CXX11_STATIC_ASSERT (V8_GNUC_PREREQ(4, 3, 0))
+# define V8_HAS_CXX11_DELETE (V8_GNUC_PREREQ(4, 4, 0))
+# define V8_HAS_CXX11_OVERRIDE (V8_GNUC_PREREQ(4, 7, 0))
+# define V8_HAS_CXX11_FINAL (V8_GNUC_PREREQ(4, 7, 0))
+# else
+// '__final' is a non-C++11 GCC synonym for 'final', per GCC r176655.
+# define V8_HAS___FINAL (V8_GNUC_PREREQ(4, 7, 0))
+# endif
+
+#elif defined(_MSC_VER)
+
+# define V8_CC_MSVC 1
+
+# define V8_HAS___ALIGNOF 1
+
+// Override control was added with Visual Studio 2005, but
+// Visual Studio 2010 and earlier spell "final" as "sealed".
+# define V8_HAS_CXX11_FINAL (_MSC_VER >= 1700)
+# define V8_HAS_CXX11_OVERRIDE (_MSC_VER >= 1400)
+# define V8_HAS_SEALED (_MSC_VER >= 1400)
+
+# define V8_HAS_DECLSPEC_ALIGN 1
+# define V8_HAS_DECLSPEC_DEPRECATED (_MSC_VER >= 1300)
+# define V8_HAS_DECLSPEC_NOINLINE 1
+
+# define V8_HAS___FORCEINLINE 1
+
+#endif
+
+
+// -----------------------------------------------------------------------------
+// Helper macros
+
+// A macro used to make better inlining. Don't bother for debug builds.
+// Use like:
+// V8_INLINE int GetZero() { return 0; }
+#if !defined(DEBUG) && V8_HAS_ATTRIBUTE_ALWAYS_INLINE
+# define V8_INLINE inline __attribute__((always_inline))
+#elif !defined(DEBUG) && V8_HAS___FORCEINLINE
+# define V8_INLINE __forceinline
+#else
+# define V8_INLINE inline
+#endif
+
+
+// A macro used to tell the compiler to never inline a particular function.
+// Don't bother for debug builds.
+// Use like:
+// V8_NOINLINE int GetMinusOne() { return -1; }
+#if !defined(DEBUG) && V8_HAS_ATTRIBUTE_NOINLINE
+# define V8_NOINLINE __attribute__((noinline))
+#elif !defined(DEBUG) && V8_HAS_DECLSPEC_NOINLINE
+# define V8_NOINLINE __declspec(noinline)
+#else
+# define V8_NOINLINE /* NOT SUPPORTED */
+#endif
+
+
+// A macro to mark classes or functions as deprecated.
+#if !V8_DISABLE_DEPRECATIONS && V8_HAS_ATTRIBUTE_DEPRECATED
+# define V8_DEPRECATED(declarator) declarator __attribute__((deprecated))
+#elif !V8_DISABLE_DEPRECATIONS && V8_HAS_DECLSPEC_DEPRECATED
+# define V8_DEPRECATED(declarator) __declspec(deprecated) declarator
+#else
+# define V8_DEPRECATED(declarator) declarator
+#endif
+
+
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+// int foo() V8_WARN_UNUSED_RESULT;
+#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
+# define V8_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+# define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
+#endif
+
+
+// A macro to provide the compiler with branch prediction information.
+#if V8_HAS_BUILTIN_EXPECT
+# define V8_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
+# define V8_LIKELY(condition) (__builtin_expect(!!(condition), 1))
+#else
+# define V8_UNLIKELY(condition) (condition)
+# define V8_LIKELY(condition) (condition)
+#endif
+
+
+// A macro to specify that a method is deleted from the corresponding class.
+// Any attempt to use the method will always produce an error at compile time
+// when this macro can be implemented (i.e. if the compiler supports C++11).
+// If the current compiler does not support C++11, use of the annotated method
+// will still cause an error, but the error will most likely occur at link time
+// rather than at compile time. As a backstop, method declarations using this
+// macro should be private.
+// Use like:
+// class A {
+// private:
+// A(const A& other) V8_DELETE;
+// A& operator=(const A& other) V8_DELETE;
+// };
+#if V8_HAS_CXX11_DELETE
+# define V8_DELETE = delete
+#else
+# define V8_DELETE /* NOT SUPPORTED */
+#endif
+
+
+// Annotate a virtual method indicating it must be overriding a virtual
+// method in the parent class.
+// Use like:
+// virtual void bar() V8_OVERRIDE;
+#if V8_HAS_CXX11_OVERRIDE
+# define V8_OVERRIDE override
+#else
+# define V8_OVERRIDE /* NOT SUPPORTED */
+#endif
+
+
+// Annotate a virtual method indicating that subclasses must not override it,
+// or annotate a class to indicate that it cannot be subclassed.
+// Use like:
+// class B V8_FINAL : public A {};
+// virtual void bar() V8_FINAL;
+#if V8_HAS_CXX11_FINAL
+# define V8_FINAL final
+#elif V8_HAS___FINAL
+# define V8_FINAL __final
+#elif V8_HAS_SEALED
+# define V8_FINAL sealed
+#else
+# define V8_FINAL /* NOT SUPPORTED */
+#endif
+
+
+// This macro allows to specify memory alignment for structs, classes, etc.
+// Use like:
+// class V8_ALIGNED(16) MyClass { ... };
+// V8_ALIGNED(32) int array[42];
+#if V8_HAS_CXX11_ALIGNAS
+# define V8_ALIGNED(n) alignas(n)
+#elif V8_HAS_ATTRIBUTE_ALIGNED
+# define V8_ALIGNED(n) __attribute__((aligned(n)))
+#elif V8_HAS_DECLSPEC_ALIGN
+# define V8_ALIGNED(n) __declspec(align(n))
+#else
+# define V8_ALIGNED(n) /* NOT SUPPORTED */
+#endif
+
+
+// This macro is similar to V8_ALIGNED(), but takes a type instead of size
+// in bytes. If the compiler does not supports using the alignment of the
+// |type|, it will align according to the |alignment| instead. For example,
+// Visual Studio C++ cannot combine __declspec(align) and __alignof. The
+// |alignment| must be a literal that is used as a kind of worst-case fallback
+// alignment.
+// Use like:
+// struct V8_ALIGNAS(AnotherClass, 16) NewClass { ... };
+// V8_ALIGNAS(double, 8) int array[100];
+#if V8_HAS_CXX11_ALIGNAS
+# define V8_ALIGNAS(type, alignment) alignas(type)
+#elif V8_HAS___ALIGNOF__ && V8_HAS_ATTRIBUTE_ALIGNED
+# define V8_ALIGNAS(type, alignment) __attribute__((aligned(__alignof__(type))))
+#else
+# define V8_ALIGNAS(type, alignment) V8_ALIGNED(alignment)
+#endif
+
+
+// This macro returns alignment in bytes (an integer power of two) required for
+// any instance of the given type, which is either complete type, an array type,
+// or a reference type.
+// Use like:
+// size_t alignment = V8_ALIGNOF(double);
+#if V8_HAS_CXX11_ALIGNOF
+# define V8_ALIGNOF(type) alignof(type)
+#elif V8_HAS___ALIGNOF
+# define V8_ALIGNOF(type) __alignof(type)
+#elif V8_HAS___ALIGNOF__
+# define V8_ALIGNOF(type) __alignof__(type)
+#else
+// Note that alignment of a type within a struct can be less than the
+// alignment of the type stand-alone (because of ancient ABIs), so this
+// should only be used as a last resort.
+namespace v8 { template <typename T> class AlignOfHelper { char c; T t; }; }
+# define V8_ALIGNOF(type) (sizeof(::v8::AlignOfHelper<type>) - sizeof(type))
+#endif
+
+#endif // V8CONFIG_H_
diff --git a/chromium/v8/include/v8stdint.h b/chromium/v8/include/v8stdint.h
index 7c12e1f4907..0b49b379182 100644
--- a/chromium/v8/include/v8stdint.h
+++ b/chromium/v8/include/v8stdint.h
@@ -33,7 +33,9 @@
#include <stddef.h>
#include <stdio.h>
-#if defined(_WIN32) && !defined(__MINGW32__)
+#include "v8config.h"
+
+#if V8_OS_WIN && !V8_CC_MINGW
typedef signed char int8_t;
typedef unsigned char uint8_t;
@@ -47,7 +49,7 @@ typedef unsigned __int64 uint64_t;
#else
-#include <stdint.h>
+#include <stdint.h> // NOLINT
#endif
diff --git a/chromium/v8/samples/process.cc b/chromium/v8/samples/process.cc
index 844aee3d45f..e6f2ee3addd 100644
--- a/chromium/v8/samples/process.cc
+++ b/chromium/v8/samples/process.cc
@@ -291,9 +291,8 @@ JsHttpRequestProcessor::~JsHttpRequestProcessor() {
// Dispose the persistent handles. When noone else has any
// references to the objects stored in the handles they will be
// automatically reclaimed.
- Isolate* isolate = GetIsolate();
- context_.Dispose(isolate);
- process_.Dispose(isolate);
+ context_.Dispose();
+ process_.Dispose();
}
diff --git a/chromium/v8/src/accessors.cc b/chromium/v8/src/accessors.cc
index a43eb78b870..669c02baf36 100644
--- a/chromium/v8/src/accessors.cc
+++ b/chromium/v8/src/accessors.cc
@@ -51,19 +51,27 @@ static C* FindInstanceOf(Isolate* isolate, Object* obj) {
// Entry point that never should be called.
-MaybeObject* Accessors::IllegalSetter(JSObject*, Object*, void*) {
+MaybeObject* Accessors::IllegalSetter(Isolate* isolate,
+ JSObject*,
+ Object*,
+ void*) {
UNREACHABLE();
return NULL;
}
-Object* Accessors::IllegalGetAccessor(Object* object, void*) {
+Object* Accessors::IllegalGetAccessor(Isolate* isolate,
+ Object* object,
+ void*) {
UNREACHABLE();
return object;
}
-MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
+MaybeObject* Accessors::ReadOnlySetAccessor(Isolate* isolate,
+ JSObject*,
+ Object* value,
+ void*) {
// According to ECMA-262, section 8.6.2.2, page 28, setting
// read-only properties must be silently ignored.
return value;
@@ -75,38 +83,41 @@ MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
//
-MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
+MaybeObject* Accessors::ArrayGetLength(Isolate* isolate,
+ Object* object,
+ void*) {
// Traverse the prototype chain until we reach an array.
- JSArray* holder = FindInstanceOf<JSArray>(Isolate::Current(), object);
+ JSArray* holder = FindInstanceOf<JSArray>(isolate, object);
return holder == NULL ? Smi::FromInt(0) : holder->length();
}
// The helper function will 'flatten' Number objects.
-Object* Accessors::FlattenNumber(Object* value) {
+Object* Accessors::FlattenNumber(Isolate* isolate, Object* value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value);
- ASSERT(Isolate::Current()->context()->native_context()->number_function()->
+ ASSERT(wrapper->GetIsolate()->context()->native_context()->number_function()->
has_initial_map());
- Map* number_map = Isolate::Current()->context()->native_context()->
+ Map* number_map = isolate->context()->native_context()->
number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value();
return value;
}
-MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
- Isolate* isolate = object->GetIsolate();
-
+MaybeObject* Accessors::ArraySetLength(Isolate* isolate,
+ JSObject* object,
+ Object* value,
+ void*) {
// This means one of the object's prototypes is a JSArray and the
// object does not have a 'length' property. Calling SetProperty
// causes an infinite loop.
if (!object->IsJSArray()) {
- return object->SetLocalPropertyIgnoreAttributes(
+ return object->SetLocalPropertyIgnoreAttributesTrampoline(
isolate->heap()->length_string(), value, NONE);
}
- value = FlattenNumber(value);
+ value = FlattenNumber(isolate, value);
// Need to call methods that may trigger GC.
HandleScope scope(isolate);
@@ -116,9 +127,11 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Handle<Object> value_handle(value, isolate);
bool has_exception;
- Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
+ Handle<Object> uint32_v =
+ Execution::ToUint32(isolate, value_handle, &has_exception);
if (has_exception) return Failure::Exception();
- Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
+ Handle<Object> number_v =
+ Execution::ToNumber(isolate, value_handle, &has_exception);
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
@@ -142,7 +155,9 @@ const AccessorDescriptor Accessors::ArrayLength = {
//
-MaybeObject* Accessors::StringGetLength(Object* object, void*) {
+MaybeObject* Accessors::StringGetLength(Isolate* isolate,
+ Object* object,
+ void*) {
Object* value = object;
if (object->IsJSValue()) value = JSValue::cast(object)->value();
if (value->IsString()) return Smi::FromInt(String::cast(value)->length());
@@ -164,7 +179,9 @@ const AccessorDescriptor Accessors::StringLength = {
//
-MaybeObject* Accessors::ScriptGetSource(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetSource(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->source();
}
@@ -182,7 +199,9 @@ const AccessorDescriptor Accessors::ScriptSource = {
//
-MaybeObject* Accessors::ScriptGetName(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetName(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->name();
}
@@ -200,7 +219,7 @@ const AccessorDescriptor Accessors::ScriptName = {
//
-MaybeObject* Accessors::ScriptGetId(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetId(Isolate* isolate, Object* object, void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->id();
}
@@ -218,7 +237,9 @@ const AccessorDescriptor Accessors::ScriptId = {
//
-MaybeObject* Accessors::ScriptGetLineOffset(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetLineOffset(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->line_offset();
}
@@ -236,7 +257,9 @@ const AccessorDescriptor Accessors::ScriptLineOffset = {
//
-MaybeObject* Accessors::ScriptGetColumnOffset(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetColumnOffset(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->column_offset();
}
@@ -254,7 +277,9 @@ const AccessorDescriptor Accessors::ScriptColumnOffset = {
//
-MaybeObject* Accessors::ScriptGetData(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetData(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->data();
}
@@ -272,7 +297,9 @@ const AccessorDescriptor Accessors::ScriptData = {
//
-MaybeObject* Accessors::ScriptGetType(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetType(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->type();
}
@@ -290,7 +317,9 @@ const AccessorDescriptor Accessors::ScriptType = {
//
-MaybeObject* Accessors::ScriptGetCompilationType(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetCompilationType(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Smi::FromInt(Script::cast(script)->compilation_type());
}
@@ -308,9 +337,10 @@ const AccessorDescriptor Accessors::ScriptCompilationType = {
//
-MaybeObject* Accessors::ScriptGetLineEnds(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetLineEnds(Isolate* isolate,
+ Object* object,
+ void*) {
JSValue* wrapper = JSValue::cast(object);
- Isolate* isolate = wrapper->GetIsolate();
HandleScope scope(isolate);
Handle<Script> script(Script::cast(wrapper->value()), isolate);
InitScriptLineEnds(script);
@@ -337,7 +367,9 @@ const AccessorDescriptor Accessors::ScriptLineEnds = {
//
-MaybeObject* Accessors::ScriptGetContextData(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetContextData(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->context_data();
}
@@ -355,7 +387,9 @@ const AccessorDescriptor Accessors::ScriptContextData = {
//
-MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetEvalFromScript(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
if (!Script::cast(script)->eval_from_shared()->IsUndefined()) {
Handle<SharedFunctionInfo> eval_from_shared(
@@ -366,7 +400,7 @@ MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
return *GetScriptWrapper(eval_from_script);
}
}
- return HEAP->undefined_value();
+ return isolate->heap()->undefined_value();
}
@@ -382,9 +416,11 @@ const AccessorDescriptor Accessors::ScriptEvalFromScript = {
//
-MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Isolate* isolate,
+ Object* object,
+ void*) {
Script* raw_script = Script::cast(JSValue::cast(object)->value());
- HandleScope scope(raw_script->GetIsolate());
+ HandleScope scope(isolate);
Handle<Script> script(raw_script);
// If this is not a script compiled through eval there is no eval position.
@@ -413,7 +449,9 @@ const AccessorDescriptor Accessors::ScriptEvalFromScriptPosition = {
//
-MaybeObject* Accessors::ScriptGetEvalFromFunctionName(Object* object, void*) {
+MaybeObject* Accessors::ScriptGetEvalFromFunctionName(Isolate* isolate,
+ Object* object,
+ void*) {
Object* script = JSValue::cast(object)->value();
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(
Script::cast(script)->eval_from_shared()));
@@ -440,15 +478,30 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
//
-Handle<Object> Accessors::FunctionGetPrototype(Handle<Object> object) {
- Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(
- isolate, Accessors::FunctionGetPrototype(*object, 0), Object);
+Handle<Object> Accessors::FunctionGetPrototype(Handle<JSFunction> function) {
+ CALL_HEAP_FUNCTION(function->GetIsolate(),
+ Accessors::FunctionGetPrototype(function->GetIsolate(),
+ *function,
+ NULL),
+ Object);
}
-MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
+Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
+ Handle<Object> prototype) {
+ ASSERT(function->should_have_prototype());
+ CALL_HEAP_FUNCTION(function->GetIsolate(),
+ Accessors::FunctionSetPrototype(function->GetIsolate(),
+ *function,
+ *prototype,
+ NULL),
+ Object);
+}
+
+
+MaybeObject* Accessors::FunctionGetPrototype(Isolate* isolate,
+ Object* object,
+ void*) {
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
if (function_raw == NULL) return isolate->heap()->undefined_value();
while (!function_raw->should_have_prototype()) {
@@ -469,18 +522,17 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
}
-MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
+MaybeObject* Accessors::FunctionSetPrototype(Isolate* isolate,
+ JSObject* object,
Object* value_raw,
void*) {
- Isolate* isolate = object->GetIsolate();
Heap* heap = isolate->heap();
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
if (function_raw == NULL) return heap->undefined_value();
if (!function_raw->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
- return object->SetLocalPropertyIgnoreAttributes(heap->prototype_string(),
- value_raw,
- NONE);
+ return object->SetLocalPropertyIgnoreAttributesTrampoline(
+ heap->prototype_string(), value_raw, NONE);
}
HandleScope scope(isolate);
@@ -523,8 +575,9 @@ const AccessorDescriptor Accessors::FunctionPrototype = {
//
-MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
+MaybeObject* Accessors::FunctionGetLength(Isolate* isolate,
+ Object* object,
+ void*) {
JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
if (function == NULL) return Smi::FromInt(0);
// Check if already compiled.
@@ -554,8 +607,9 @@ const AccessorDescriptor Accessors::FunctionLength = {
//
-MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
+MaybeObject* Accessors::FunctionGetName(Isolate* isolate,
+ Object* object,
+ void*) {
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
return holder == NULL
? isolate->heap()->undefined_value()
@@ -575,10 +629,12 @@ const AccessorDescriptor Accessors::FunctionName = {
//
-Handle<Object> Accessors::FunctionGetArguments(Handle<Object> object) {
- Isolate* isolate = Isolate::Current();
- CALL_HEAP_FUNCTION(
- isolate, Accessors::FunctionGetArguments(*object, 0), Object);
+Handle<Object> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
+ CALL_HEAP_FUNCTION(function->GetIsolate(),
+ Accessors::FunctionGetArguments(function->GetIsolate(),
+ *function,
+ NULL),
+ Object);
}
@@ -609,8 +665,9 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
}
-MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
+MaybeObject* Accessors::FunctionGetArguments(Isolate* isolate,
+ Object* object,
+ void*) {
HandleScope scope(isolate);
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
if (holder == NULL) return isolate->heap()->undefined_value();
@@ -732,8 +789,9 @@ class FrameFunctionIterator {
};
-MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
- Isolate* isolate = Isolate::Current();
+MaybeObject* Accessors::FunctionGetCaller(Isolate* isolate,
+ Object* object,
+ void*) {
HandleScope scope(isolate);
DisallowHeapAllocation no_allocation;
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
@@ -839,15 +897,16 @@ Handle<AccessorInfo> Accessors::MakeModuleExport(
Handle<String> name,
int index,
PropertyAttributes attributes) {
- Factory* factory = name->GetIsolate()->factory();
+ Isolate* isolate = name->GetIsolate();
+ Factory* factory = isolate->factory();
Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
info->set_property_attributes(attributes);
info->set_all_can_read(true);
info->set_all_can_write(true);
info->set_name(*name);
info->set_data(Smi::FromInt(index));
- Handle<Object> getter = v8::FromCData(&ModuleGetExport);
- Handle<Object> setter = v8::FromCData(&ModuleSetExport);
+ Handle<Object> getter = v8::FromCData(isolate, &ModuleGetExport);
+ Handle<Object> setter = v8::FromCData(isolate, &ModuleSetExport);
info->set_getter(*getter);
if (!(attributes & ReadOnly)) info->set_setter(*setter);
return info;
diff --git a/chromium/v8/src/accessors.h b/chromium/v8/src/accessors.h
index ae56a3d4449..d9a2130f618 100644
--- a/chromium/v8/src/accessors.h
+++ b/chromium/v8/src/accessors.h
@@ -77,12 +77,10 @@ class Accessors : public AllStatic {
};
// Accessor functions called directly from the runtime system.
- static Handle<Object> FunctionGetPrototype(Handle<Object> object);
- static Handle<Object> FunctionGetArguments(Handle<Object> object);
-
- MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
- Object* value,
- void*);
+ static Handle<Object> FunctionSetPrototype(Handle<JSFunction> object,
+ Handle<Object> value);
+ static Handle<Object> FunctionGetPrototype(Handle<JSFunction> object);
+ static Handle<Object> FunctionGetArguments(Handle<JSFunction> object);
// Accessor infos.
static Handle<AccessorInfo> MakeModuleExport(
@@ -90,34 +88,70 @@ class Accessors : public AllStatic {
private:
// Accessor functions only used through the descriptor.
- static MaybeObject* FunctionGetPrototype(Object* object, void*);
- static MaybeObject* FunctionGetLength(Object* object, void*);
- static MaybeObject* FunctionGetName(Object* object, void*);
- static MaybeObject* FunctionGetArguments(Object* object, void*);
- static MaybeObject* FunctionGetCaller(Object* object, void*);
- MUST_USE_RESULT static MaybeObject* ArraySetLength(JSObject* object,
- Object* value, void*);
- static MaybeObject* ArrayGetLength(Object* object, void*);
- static MaybeObject* StringGetLength(Object* object, void*);
- static MaybeObject* ScriptGetName(Object* object, void*);
- static MaybeObject* ScriptGetId(Object* object, void*);
- static MaybeObject* ScriptGetSource(Object* object, void*);
- static MaybeObject* ScriptGetLineOffset(Object* object, void*);
- static MaybeObject* ScriptGetColumnOffset(Object* object, void*);
- static MaybeObject* ScriptGetData(Object* object, void*);
- static MaybeObject* ScriptGetType(Object* object, void*);
- static MaybeObject* ScriptGetCompilationType(Object* object, void*);
- static MaybeObject* ScriptGetLineEnds(Object* object, void*);
- static MaybeObject* ScriptGetContextData(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromScript(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromScriptPosition(Object* object, void*);
- static MaybeObject* ScriptGetEvalFromFunctionName(Object* object, void*);
+ static MaybeObject* FunctionSetPrototype(Isolate* isolate,
+ JSObject* object,
+ Object*,
+ void*);
+ static MaybeObject* FunctionGetPrototype(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* FunctionGetLength(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* FunctionGetName(Isolate* isolate, Object* object, void*);
+ static MaybeObject* FunctionGetArguments(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* FunctionGetCaller(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ArraySetLength(Isolate* isolate,
+ JSObject* object,
+ Object*,
+ void*);
+ static MaybeObject* ArrayGetLength(Isolate* isolate, Object* object, void*);
+ static MaybeObject* StringGetLength(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetName(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetId(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetSource(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetLineOffset(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetColumnOffset(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetData(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetType(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ScriptGetCompilationType(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetLineEnds(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetContextData(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetEvalFromScript(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetEvalFromScriptPosition(Isolate* isolate,
+ Object* object,
+ void*);
+ static MaybeObject* ScriptGetEvalFromFunctionName(Isolate* isolate,
+ Object* object,
+ void*);
// Helper functions.
- static Object* FlattenNumber(Object* value);
- static MaybeObject* IllegalSetter(JSObject*, Object*, void*);
- static Object* IllegalGetAccessor(Object* object, void*);
- static MaybeObject* ReadOnlySetAccessor(JSObject*, Object* value, void*);
+ static Object* FlattenNumber(Isolate* isolate, Object* value);
+ static MaybeObject* IllegalSetter(Isolate* isolate,
+ JSObject*,
+ Object*,
+ void*);
+ static Object* IllegalGetAccessor(Isolate* isolate, Object* object, void*);
+ static MaybeObject* ReadOnlySetAccessor(Isolate* isolate,
+ JSObject*,
+ Object* value,
+ void*);
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/api.cc b/chromium/v8/src/api.cc
index 7b2524cc4d7..71a8f4a6cf7 100644
--- a/chromium/v8/src/api.cc
+++ b/chromium/v8/src/api.cc
@@ -46,12 +46,14 @@
#include "heap-profiler.h"
#include "heap-snapshot-generator-inl.h"
#include "icu_util.h"
+#include "json-parser.h"
#include "messages.h"
#ifdef COMPRESS_STARTUP_DATA_BZ2
#include "natives.h"
#endif
#include "parser.h"
#include "platform.h"
+#include "platform/time.h"
#include "profile-generator-inl.h"
#include "property-details.h"
#include "property.h"
@@ -60,6 +62,7 @@
#include "scanner-character-streams.h"
#include "snapshot.h"
#include "unicode-inl.h"
+#include "utils/random-number-generator.h"
#include "v8threads.h"
#include "version.h"
#include "vm-state-inl.h"
@@ -219,25 +222,27 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
// HeapIterator here without doing a special GC.
isolate->heap()->RecordStats(&heap_stats, false);
}
- i::V8::SetFatalError();
+ isolate->SignalFatalError();
FatalErrorCallback callback = GetFatalErrorHandler();
const char* message = "Allocation failed - process out of memory";
callback(location, message);
// If the callback returns, we stop execution.
- UNREACHABLE();
+ FATAL("API fatal error handler returned after process out of memory");
}
bool Utils::ReportApiFailure(const char* location, const char* message) {
FatalErrorCallback callback = GetFatalErrorHandler();
callback(location, message);
- i::V8::SetFatalError();
+ i::Isolate* isolate = i::Isolate::Current();
+ isolate->SignalFatalError();
return false;
}
bool V8::IsDead() {
- return i::V8::IsDead();
+ i::Isolate* isolate = i::Isolate::Current();
+ return isolate->IsDead();
}
@@ -276,7 +281,7 @@ static bool ReportEmptyHandle(const char* location) {
*/
static inline bool IsDeadCheck(i::Isolate* isolate, const char* location) {
return !isolate->IsInitialized()
- && i::V8::IsDead() ? ReportV8Dead(location) : false;
+ && isolate->IsDead() ? ReportV8Dead(location) : false;
}
@@ -398,9 +403,6 @@ enum CompressedStartupDataItems {
kSnapshotContext,
kLibraries,
kExperimentalLibraries,
-#if defined(V8_I18N_SUPPORT)
- kI18NExtension,
-#endif
kCompressedStartupDataCount
};
@@ -441,17 +443,6 @@ void V8::GetCompressedStartupData(StartupData* compressed_data) {
exp_libraries_source.length();
compressed_data[kExperimentalLibraries].raw_size =
i::ExperimentalNatives::GetRawScriptsSize();
-
-#if defined(V8_I18N_SUPPORT)
- i::Vector<const ii:byte> i18n_extension_source =
- i::I18NNatives::GetScriptsSource();
- compressed_data[kI18NExtension].data =
- reinterpret_cast<const char*>(i18n_extension_source.start());
- compressed_data[kI18NExtension].compressed_size =
- i18n_extension_source.length();
- compressed_data[kI18NExtension].raw_size =
- i::I18NNatives::GetRawScriptsSize();
-#endif
#endif
}
@@ -481,15 +472,6 @@ void V8::SetDecompressedStartupData(StartupData* decompressed_data) {
decompressed_data[kExperimentalLibraries].data,
decompressed_data[kExperimentalLibraries].raw_size);
i::ExperimentalNatives::SetRawScriptsSource(exp_libraries_source);
-
-#if defined(V8_I18N_SUPPORT)
- ASSERT_EQ(i::I18NNatives::GetRawScriptsSize(),
- decompressed_data[kI18NExtension].raw_size);
- i::Vector<const char> i18n_extension_source(
- decompressed_data[kI18NExtension].data,
- decompressed_data[kI18NExtension].raw_size);
- i::I18NNatives::SetRawScriptsSource(i18n_extension_source);
-#endif
#endif
}
@@ -620,7 +602,8 @@ ResourceConstraints::ResourceConstraints()
: max_young_space_size_(0),
max_old_space_size_(0),
max_executable_size_(0),
- stack_limit_(NULL) { }
+ stack_limit_(NULL),
+ is_memory_constrained_() { }
bool SetResourceConstraints(ResourceConstraints* constraints) {
@@ -641,6 +624,10 @@ bool SetResourceConstraints(ResourceConstraints* constraints) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
isolate->stack_guard()->SetStackLimit(limit);
}
+ if (constraints->is_memory_constrained().has_value) {
+ isolate->set_is_memory_constrained(
+ constraints->is_memory_constrained().value);
+ }
return true;
}
@@ -656,11 +643,22 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
}
+i::Object** V8::CopyPersistent(i::Object** obj) {
+ i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(obj);
+#ifdef DEBUG
+ (*obj)->Verify();
+#endif // DEBUG
+ return result.location();
+}
+
+
void V8::MakeWeak(i::Object** object,
void* parameters,
+ WeakCallback weak_callback,
RevivableCallback weak_reference_callback) {
i::GlobalHandles::MakeWeak(object,
parameters,
+ weak_callback,
weak_reference_callback);
}
@@ -675,14 +673,22 @@ void V8::DisposeGlobal(i::Object** obj) {
}
-// --- H a n d l e s ---
+void V8::Eternalize(Isolate* v8_isolate, Value* value, int* index) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ i::Object* object = *Utils::OpenHandle(value);
+ isolate->eternal_handles()->Create(isolate, object, index);
+}
-HandleScope::HandleScope() {
- Initialize(reinterpret_cast<Isolate*>(i::Isolate::Current()));
+Local<Value> V8::GetEternal(Isolate* v8_isolate, int index) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ return Utils::ToLocal(isolate->eternal_handles()->Get(index));
}
+// --- H a n d l e s ---
+
+
HandleScope::HandleScope(Isolate* isolate) {
Initialize(isolate);
}
@@ -770,7 +776,6 @@ void Context::Exit() {
i::Context* last_context =
isolate->handle_scope_implementer()->RestoreContext();
isolate->set_context(last_context);
- isolate->set_context_exit_happened(true);
}
@@ -945,21 +950,62 @@ static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
}
-void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
+static void TemplateSet(i::Isolate* isolate,
+ v8::Template* templ,
+ int length,
+ v8::Handle<v8::Data>* data) {
+ i::Handle<i::Object> list(Utils::OpenHandle(templ)->property_list(), isolate);
+ if (list->IsUndefined()) {
+ list = NeanderArray().value();
+ Utils::OpenHandle(templ)->set_property_list(*list);
+ }
+ NeanderArray array(list);
+ array.add(Utils::OpenHandle(*v8::Integer::New(length)));
+ for (int i = 0; i < length; i++) {
+ i::Handle<i::Object> value = data[i].IsEmpty() ?
+ i::Handle<i::Object>(isolate->factory()->undefined_value()) :
+ Utils::OpenHandle(*data[i]);
+ array.add(value);
+ }
+}
+
+
+void Template::Set(v8::Handle<String> name,
+ v8::Handle<Data> value,
v8::PropertyAttribute attribute) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Template::Set()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list(), isolate);
- if (list->IsUndefined()) {
- list = NeanderArray().value();
- Utils::OpenHandle(this)->set_property_list(*list);
- }
- NeanderArray array(list);
- array.add(Utils::OpenHandle(*name));
- array.add(Utils::OpenHandle(*value));
- array.add(Utils::OpenHandle(*v8::Integer::New(attribute)));
+ const int kSize = 3;
+ v8::Handle<v8::Data> data[kSize] = {
+ name,
+ value,
+ v8::Integer::New(attribute)};
+ TemplateSet(isolate, this, kSize, data);
+}
+
+
+void Template::SetAccessorProperty(
+ v8::Local<v8::String> name,
+ v8::Local<FunctionTemplate> getter,
+ v8::Local<FunctionTemplate> setter,
+ v8::PropertyAttribute attribute,
+ v8::AccessControl access_control) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Template::SetAccessor()")) return;
+ ENTER_V8(isolate);
+ ASSERT(!name.IsEmpty());
+ ASSERT(!getter.IsEmpty() || !setter.IsEmpty());
+ i::HandleScope scope(isolate);
+ const int kSize = 5;
+ v8::Handle<v8::Data> data[kSize] = {
+ name,
+ getter,
+ setter,
+ v8::Integer::New(attribute),
+ v8::Integer::New(access_control)};
+ TemplateSet(isolate, this, kSize, data);
}
@@ -995,69 +1041,48 @@ void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
}
-// TODO(dcarney): Remove this abstraction when old callbacks are removed.
-class CallHandlerHelper {
- public:
- static inline void Set(Local<FunctionTemplate> function_template,
- InvocationCallback callback,
- v8::Handle<Value> data) {
- function_template->SetCallHandlerInternal(callback, data);
- }
- static inline void Set(Local<FunctionTemplate> function_template,
- FunctionCallback callback,
- v8::Handle<Value> data) {
- function_template->SetCallHandler(callback, data);
- }
-};
-
-
-template<typename Callback>
static Local<FunctionTemplate> FunctionTemplateNew(
- Callback callback,
+ i::Isolate* isolate,
+ FunctionCallback callback,
v8::Handle<Value> data,
v8::Handle<Signature> signature,
- int length) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
- LOG_API(isolate, "FunctionTemplate::New");
- ENTER_V8(isolate);
+ int length,
+ bool do_not_cache) {
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
i::Handle<i::FunctionTemplateInfo> obj =
i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
InitializeFunctionTemplate(obj);
- int next_serial_number = isolate->next_serial_number();
- isolate->set_next_serial_number(next_serial_number + 1);
+ obj->set_do_not_cache(do_not_cache);
+ int next_serial_number = 0;
+ if (!do_not_cache) {
+ next_serial_number = isolate->next_serial_number() + 1;
+ isolate->set_next_serial_number(next_serial_number);
+ }
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != 0) {
if (data.IsEmpty()) data = v8::Undefined();
- CallHandlerHelper::Set(Utils::ToLocal(obj), callback, data);
+ Utils::ToLocal(obj)->SetCallHandler(callback, data);
}
obj->set_length(length);
obj->set_undetectable(false);
obj->set_needs_access_check(false);
-
if (!signature.IsEmpty())
obj->set_signature(*Utils::OpenHandle(*signature));
return Utils::ToLocal(obj);
}
-
-Local<FunctionTemplate> FunctionTemplate::New(
- InvocationCallback callback,
- v8::Handle<Value> data,
- v8::Handle<Signature> signature,
- int length) {
- return FunctionTemplateNew(callback, data, signature, length);
-}
-
-
Local<FunctionTemplate> FunctionTemplate::New(
FunctionCallback callback,
v8::Handle<Value> data,
v8::Handle<Signature> signature,
int length) {
- return FunctionTemplateNew(callback, data, signature, length);
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
+ LOG_API(isolate, "FunctionTemplate::New");
+ ENTER_V8(isolate);
+ return FunctionTemplateNew(
+ isolate, callback, data, signature, length, false);
}
@@ -1245,16 +1270,14 @@ int TypeSwitch::match(v8::Handle<Value> value) {
#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
- i::Handle<i::Object> foreign = FromCData(cdata); \
+ i::Handle<i::Object> foreign = FromCData(obj->GetIsolate(), cdata); \
(obj)->setter(*foreign); \
} while (false)
-template<typename Callback>
-static void FunctionTemplateSetCallHandler(FunctionTemplate* function_template,
- Callback callback_in,
- v8::Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(function_template)->GetIsolate();
+void FunctionTemplate::SetCallHandler(FunctionCallback callback,
+ v8::Handle<Value> data) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -1262,28 +1285,12 @@ static void FunctionTemplateSetCallHandler(FunctionTemplate* function_template,
isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
- FunctionCallback callback =
- i::CallbackTable::Register(isolate, callback_in);
SET_FIELD_WRAPPED(obj, set_callback, callback);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
- Utils::OpenHandle(function_template)->set_call_code(*obj);
+ Utils::OpenHandle(this)->set_call_code(*obj);
}
-void FunctionTemplate::SetCallHandler(InvocationCallback callback,
- v8::Handle<Value> data) {
- FunctionTemplateSetCallHandler(this, callback, data);
-}
-
-void FunctionTemplate::SetCallHandlerInternal(InvocationCallback callback,
- v8::Handle<Value> data) {
- FunctionTemplateSetCallHandler(this, callback, data);
-}
-
-void FunctionTemplate::SetCallHandler(FunctionCallback callback,
- v8::Handle<Value> data) {
- FunctionTemplateSetCallHandler(this, callback, data);
-}
static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
i::Handle<i::AccessorInfo> obj,
@@ -1306,8 +1313,8 @@ static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
template<typename Getter, typename Setter>
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::Handle<String> name,
- Getter getter_in,
- Setter setter_in,
+ Getter getter,
+ Setter setter,
v8::Handle<Value> data,
v8::AccessControl settings,
v8::PropertyAttribute attributes,
@@ -1315,11 +1322,7 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
i::Handle<i::ExecutableAccessorInfo> obj =
isolate->factory()->NewExecutableAccessorInfo();
- AccessorGetterCallback getter =
- i::CallbackTable::Register(isolate, getter_in);
SET_FIELD_WRAPPED(obj, set_getter, getter);
- AccessorSetterCallback setter =
- i::CallbackTable::Register(isolate, setter_in);
SET_FIELD_WRAPPED(obj, set_setter, setter);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
@@ -1397,124 +1400,14 @@ void FunctionTemplate::ReadOnlyPrototype() {
Utils::OpenHandle(this)->set_read_only_prototype(true);
}
-template<
- typename Getter,
- typename Setter,
- typename Query,
- typename Deleter,
- typename Enumerator>
-static void SetNamedInstancePropertyHandler(
- i::Handle<i::FunctionTemplateInfo> function_template,
- Getter getter_in,
- Setter setter_in,
- Query query_in,
- Deleter remover_in,
- Enumerator enumerator_in,
- Handle<Value> data) {
- i::Isolate* isolate = function_template->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetNamedInstancePropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
- i::Handle<i::InterceptorInfo> obj =
- i::Handle<i::InterceptorInfo>::cast(struct_obj);
-
- NamedPropertyGetterCallback getter =
- i::CallbackTable::Register(isolate, getter_in);
- if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- NamedPropertySetterCallback setter =
- i::CallbackTable::Register(isolate, setter_in);
- if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- NamedPropertyQueryCallback query =
- i::CallbackTable::Register(isolate, query_in);
- if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- NamedPropertyDeleterCallback remover =
- i::CallbackTable::Register(isolate, remover_in);
- if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- NamedPropertyEnumeratorCallback enumerator =
- i::CallbackTable::Register(isolate, enumerator_in);
- if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- function_template->set_named_property_handler(*obj);
-}
-
-
-template<
- typename Getter,
- typename Setter,
- typename Query,
- typename Deleter,
- typename Enumerator>
-static void SetIndexedInstancePropertyHandler(
- i::Handle<i::FunctionTemplateInfo> function_template,
- Getter getter_in,
- Setter setter_in,
- Query query_in,
- Deleter remover_in,
- Enumerator enumerator_in,
- Handle<Value> data) {
- i::Isolate* isolate = function_template->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetIndexedInstancePropertyHandler()")) {
- return;
- }
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
- i::Handle<i::InterceptorInfo> obj =
- i::Handle<i::InterceptorInfo>::cast(struct_obj);
-
- IndexedPropertyGetterCallback getter =
- i::CallbackTable::Register(isolate, getter_in);
- if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
- IndexedPropertySetterCallback setter =
- i::CallbackTable::Register(isolate, setter_in);
- if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
- IndexedPropertyQueryCallback query =
- i::CallbackTable::Register(isolate, query_in);
- if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
- IndexedPropertyDeleterCallback remover =
- i::CallbackTable::Register(isolate, remover_in);
- if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
- IndexedPropertyEnumeratorCallback enumerator =
- i::CallbackTable::Register(isolate, enumerator_in);
- if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- function_template->set_indexed_property_handler(*obj);
-}
-
-template<typename Callback>
-static void SetInstanceCallAsFunctionHandler(
- i::Handle<i::FunctionTemplateInfo> function_template,
- Callback callback_in,
- Handle<Value> data) {
- i::Isolate* isolate = function_template->GetIsolate();
- if (IsDeadCheck(isolate,
- "v8::FunctionTemplate::SetInstanceCallAsFunctionHandler()")) {
+void FunctionTemplate::RemovePrototype() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::RemovePrototype()")) {
return;
}
ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
- i::Handle<i::CallHandlerInfo> obj =
- i::Handle<i::CallHandlerInfo>::cast(struct_obj);
- FunctionCallback callback =
- i::CallbackTable::Register(isolate, callback_in);
- SET_FIELD_WRAPPED(obj, set_callback, callback);
- if (data.IsEmpty()) data = v8::Undefined();
- obj->set_data(*Utils::OpenHandle(*data));
- function_template->set_instance_call_handler(*obj);
+ Utils::OpenHandle(this)->set_remove_prototype(true);
}
@@ -1549,63 +1442,91 @@ Local<ObjectTemplate> ObjectTemplate::New(
// Ensure that the object template has a constructor. If no
// constructor is available we create one.
-static void EnsureConstructor(ObjectTemplate* object_template) {
- if (Utils::OpenHandle(object_template)->constructor()->IsUndefined()) {
- Local<FunctionTemplate> templ = FunctionTemplate::New();
- i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
- constructor->set_instance_template(*Utils::OpenHandle(object_template));
- Utils::OpenHandle(object_template)->set_constructor(*constructor);
+static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
+ ObjectTemplate* object_template) {
+ i::Object* obj = Utils::OpenHandle(object_template)->constructor();
+ if (!obj ->IsUndefined()) {
+ i::FunctionTemplateInfo* info = i::FunctionTemplateInfo::cast(obj);
+ return i::Handle<i::FunctionTemplateInfo>(info, info->GetIsolate());
}
+ Local<FunctionTemplate> templ = FunctionTemplate::New();
+ i::Handle<i::FunctionTemplateInfo> constructor = Utils::OpenHandle(*templ);
+ constructor->set_instance_template(*Utils::OpenHandle(object_template));
+ Utils::OpenHandle(object_template)->set_constructor(*constructor);
+ return constructor;
}
-static inline void AddPropertyToFunctionTemplate(
- i::Handle<i::FunctionTemplateInfo> cons,
+static inline void AddPropertyToTemplate(
+ i::Handle<i::TemplateInfo> info,
i::Handle<i::AccessorInfo> obj) {
- i::Handle<i::Object> list(cons->property_accessors(), cons->GetIsolate());
+ i::Handle<i::Object> list(info->property_accessors(), info->GetIsolate());
if (list->IsUndefined()) {
list = NeanderArray().value();
- cons->set_property_accessors(*list);
+ info->set_property_accessors(*list);
}
NeanderArray array(list);
array.add(obj);
}
-template<typename Setter, typename Getter, typename Data>
-static bool ObjectTemplateSetAccessor(
- ObjectTemplate* object_template,
- v8::Handle<String> name,
+static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
+ Template* template_obj) {
+ return Utils::OpenHandle(template_obj);
+}
+
+
+// TODO(dcarney): remove this with ObjectTemplate::SetAccessor
+static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
+ ObjectTemplate* object_template) {
+ EnsureConstructor(object_template);
+ return Utils::OpenHandle(object_template);
+}
+
+
+template<typename Setter, typename Getter, typename Data, typename Template>
+static bool TemplateSetAccessor(
+ Template* template_obj,
+ v8::Local<String> name,
Getter getter,
Setter setter,
Data data,
AccessControl settings,
PropertyAttribute attribute,
- v8::Handle<AccessorSignature> signature) {
- i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate();
+ v8::Local<AccessorSignature> signature) {
+ i::Isolate* isolate = Utils::OpenHandle(template_obj)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return false;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(object_template);
- i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
- Utils::OpenHandle(object_template)->constructor());
- i::Handle<i::FunctionTemplateInfo> cons(constructor);
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(
name, getter, setter, data, settings, attribute, signature);
if (obj.is_null()) return false;
- AddPropertyToFunctionTemplate(cons, obj);
+ i::Handle<i::TemplateInfo> info = GetTemplateInfo(template_obj);
+ AddPropertyToTemplate(info, obj);
return true;
}
-void ObjectTemplate::SetAccessor(v8::Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- AccessControl settings,
- PropertyAttribute attribute,
- v8::Handle<AccessorSignature> signature) {
- ObjectTemplateSetAccessor(
+bool Template::SetDeclaredAccessor(
+ Local<String> name,
+ Local<DeclaredAccessorDescriptor> descriptor,
+ PropertyAttribute attribute,
+ Local<AccessorSignature> signature,
+ AccessControl settings) {
+ void* null = NULL;
+ return TemplateSetAccessor(
+ this, name, descriptor, null, null, settings, attribute, signature);
+}
+
+
+void Template::SetNativeDataProperty(v8::Local<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter,
+ v8::Handle<Value> data,
+ PropertyAttribute attribute,
+ v8::Local<AccessorSignature> signature,
+ AccessControl settings) {
+ TemplateSetAccessor(
this, name, getter, setter, data, settings, attribute, signature);
}
@@ -1617,77 +1538,42 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
AccessControl settings,
PropertyAttribute attribute,
v8::Handle<AccessorSignature> signature) {
- ObjectTemplateSetAccessor(
+ TemplateSetAccessor(
this, name, getter, setter, data, settings, attribute, signature);
}
-bool ObjectTemplate::SetAccessor(Handle<String> name,
- Handle<DeclaredAccessorDescriptor> descriptor,
- AccessControl settings,
- PropertyAttribute attribute,
- Handle<AccessorSignature> signature) {
- void* null = NULL;
- return ObjectTemplateSetAccessor(
- this, name, descriptor, null, null, settings, attribute, signature);
-}
-
-
-template<
- typename Getter,
- typename Setter,
- typename Query,
- typename Deleter,
- typename Enumerator>
-static void ObjectTemplateSetNamedPropertyHandler(
- ObjectTemplate* object_template,
- Getter getter,
- Setter setter,
- Query query,
- Deleter remover,
- Enumerator enumerator,
+void ObjectTemplate::SetNamedPropertyHandler(
+ NamedPropertyGetterCallback getter,
+ NamedPropertySetterCallback setter,
+ NamedPropertyQueryCallback query,
+ NamedPropertyDeleterCallback remover,
+ NamedPropertyEnumeratorCallback enumerator,
Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate();
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
return;
}
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(object_template);
+ EnsureConstructor(this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
- Utils::OpenHandle(object_template)->constructor());
+ Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- SetNamedInstancePropertyHandler(cons,
- getter,
- setter,
- query,
- remover,
- enumerator,
- data);
-}
-
-
-void ObjectTemplate::SetNamedPropertyHandler(
- NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
- ObjectTemplateSetNamedPropertyHandler(
- this, getter, setter, query, remover, enumerator, data);
-}
+ i::Handle<i::Struct> struct_obj =
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ i::Handle<i::InterceptorInfo> obj =
+ i::Handle<i::InterceptorInfo>::cast(struct_obj);
+ if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
+ if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
+ if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+ if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
+ if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-void ObjectTemplate::SetNamedPropertyHandler(
- NamedPropertyGetterCallback getter,
- NamedPropertySetterCallback setter,
- NamedPropertyQueryCallback query,
- NamedPropertyDeleterCallback remover,
- NamedPropertyEnumeratorCallback enumerator,
- Handle<Value> data) {
- ObjectTemplateSetNamedPropertyHandler(
- this, getter, setter, query, remover, enumerator, data);
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ cons->set_named_property_handler(*obj);
}
@@ -1736,93 +1622,61 @@ void ObjectTemplate::SetAccessCheckCallbacks(
}
-template<
- typename Getter,
- typename Setter,
- typename Query,
- typename Deleter,
- typename Enumerator>
-void ObjectTemplateSetIndexedPropertyHandler(
- ObjectTemplate* object_template,
- Getter getter,
- Setter setter,
- Query query,
- Deleter remover,
- Enumerator enumerator,
+void ObjectTemplate::SetIndexedPropertyHandler(
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter,
+ IndexedPropertyQueryCallback query,
+ IndexedPropertyDeleterCallback remover,
+ IndexedPropertyEnumeratorCallback enumerator,
Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate();
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetIndexedPropertyHandler()")) {
return;
}
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(object_template);
+ EnsureConstructor(this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
- Utils::OpenHandle(object_template)->constructor());
+ Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- SetIndexedInstancePropertyHandler(cons,
- getter,
- setter,
- query,
- remover,
- enumerator,
- data);
-}
-
-
-void ObjectTemplate::SetIndexedPropertyHandler(
- IndexedPropertyGetter getter,
- IndexedPropertySetter setter,
- IndexedPropertyQuery query,
- IndexedPropertyDeleter remover,
- IndexedPropertyEnumerator enumerator,
- Handle<Value> data) {
- ObjectTemplateSetIndexedPropertyHandler(
- this, getter, setter, query, remover, enumerator, data);
-}
+ i::Handle<i::Struct> struct_obj =
+ isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
+ i::Handle<i::InterceptorInfo> obj =
+ i::Handle<i::InterceptorInfo>::cast(struct_obj);
+ if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
+ if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
+ if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
+ if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
+ if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-void ObjectTemplate::SetIndexedPropertyHandler(
- IndexedPropertyGetterCallback getter,
- IndexedPropertySetterCallback setter,
- IndexedPropertyQueryCallback query,
- IndexedPropertyDeleterCallback remover,
- IndexedPropertyEnumeratorCallback enumerator,
- Handle<Value> data) {
- ObjectTemplateSetIndexedPropertyHandler(
- this, getter, setter, query, remover, enumerator, data);
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ cons->set_indexed_property_handler(*obj);
}
-template<typename Callback>
-static void ObjectTemplateSetCallAsFunctionHandler(
- ObjectTemplate* object_template,
- Callback callback,
- Handle<Value> data) {
- i::Isolate* isolate = Utils::OpenHandle(object_template)->GetIsolate();
+void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
+ Handle<Value> data) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate,
"v8::ObjectTemplate::SetCallAsFunctionHandler()")) {
return;
}
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- EnsureConstructor(object_template);
+ EnsureConstructor(this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
- Utils::OpenHandle(object_template)->constructor());
+ Utils::OpenHandle(this)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
- SetInstanceCallAsFunctionHandler(cons, callback, data);
-}
-
-
-void ObjectTemplate::SetCallAsFunctionHandler(InvocationCallback callback,
- Handle<Value> data) {
- return ObjectTemplateSetCallAsFunctionHandler(this, callback, data);
-}
-
-
-void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
- Handle<Value> data) {
- return ObjectTemplateSetCallAsFunctionHandler(this, callback, data);
+ i::Handle<i::Struct> struct_obj =
+ isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ i::Handle<i::CallHandlerInfo> obj =
+ i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+ SET_FIELD_WRAPPED(obj, set_callback, callback);
+ if (data.IsEmpty()) data = v8::Undefined();
+ obj->set_data(*Utils::OpenHandle(*data));
+ cons->set_instance_call_handler(*obj);
}
@@ -1862,19 +1716,20 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
ScriptData* ScriptData::PreCompile(const char* input, int length) {
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
- return i::PreParserApi::PreParse(&stream);
+ return i::PreParserApi::PreParse(i::Isolate::Current(), &stream);
}
ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Isolate* isolate = str->GetIsolate();
if (str->IsExternalTwoByteString()) {
i::ExternalTwoByteStringUtf16CharacterStream stream(
i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
- return i::PreParserApi::PreParse(&stream);
+ return i::PreParserApi::PreParse(isolate, &stream);
} else {
i::GenericStringUtf16CharacterStream stream(str, 0, str->length());
- return i::PreParserApi::PreParse(&stream);
+ return i::PreParserApi::PreParse(isolate, &stream);
}
}
@@ -2025,8 +1880,8 @@ Local<Value> Script::Run() {
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> receiver(
isolate->context()->global_proxy(), isolate);
- i::Handle<i::Object> result =
- i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+ i::Handle<i::Object> result = i::Execution::Call(
+ isolate, fun, receiver, 0, NULL, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
raw_result = *result;
}
@@ -2325,8 +2180,8 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str);
i::Handle<i::JSFunction> fun =
i::Handle<i::JSFunction>(i::JSFunction::cast(object_fun));
- i::Handle<i::Object> value =
- i::Execution::Call(fun, recv, argc, argv, has_pending_exception);
+ i::Handle<i::Object> value = i::Execution::Call(
+ isolate, fun, recv, argc, argv, has_pending_exception);
return value;
}
@@ -2468,7 +2323,7 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
- i::Object* raw_object = self->GetElementNoExceptionThrown(index);
+ i::Object* raw_object = self->GetElementNoExceptionThrown(isolate, index);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_object));
return scope.Close(Utils::StackFrameToLocal(obj));
}
@@ -2537,6 +2392,22 @@ int StackFrame::GetColumn() const {
}
+int StackFrame::GetScriptId() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptId()")) {
+ return Message::kNoScriptIdInfo;
+ }
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> scriptId = GetProperty(self, "scriptId");
+ if (!scriptId->IsSmi()) {
+ return Message::kNoScriptIdInfo;
+ }
+ return i::Smi::cast(*scriptId)->value();
+}
+
+
Local<String> StackFrame::GetScriptName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptName()")) {
@@ -2607,6 +2478,29 @@ bool StackFrame::IsConstructor() const {
}
+// --- J S O N ---
+
+Local<Value> JSON::Parse(Local<String> json_string) {
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::JSON::Parse");
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::String> source = i::Handle<i::String>(
+ FlattenGetString(Utils::OpenHandle(*json_string)));
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> result;
+ if (source->IsSeqOneByteString()) {
+ result = i::JsonParser<true>::Parse(source);
+ } else {
+ result = i::JsonParser<false>::Parse(source);
+ }
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
+ return Utils::ToLocal(
+ i::Handle<i::Object>::cast(scope.CloseAndEscape(result)));
+}
+
+
// --- D a t a ---
bool Value::FullIsUndefined() const {
@@ -2880,7 +2774,7 @@ Local<String> Value::ToString() const {
LOG_API(isolate, "ToString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- str = i::Execution::ToString(obj, &has_pending_exception);
+ str = i::Execution::ToString(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
}
return ToApiHandle<String>(str);
@@ -2900,7 +2794,7 @@ Local<String> Value::ToDetailString() const {
LOG_API(isolate, "ToDetailString");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- str = i::Execution::ToDetailString(obj, &has_pending_exception);
+ str = i::Execution::ToDetailString(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
}
return ToApiHandle<String>(str);
@@ -2920,7 +2814,7 @@ Local<v8::Object> Value::ToObject() const {
LOG_API(isolate, "ToObject");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- val = i::Execution::ToObject(obj, &has_pending_exception);
+ val = i::Execution::ToObject(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
}
return ToApiHandle<Object>(val);
@@ -2958,7 +2852,7 @@ Local<Number> Value::ToNumber() const {
LOG_API(isolate, "ToNumber");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToNumber(obj, &has_pending_exception);
+ num = i::Execution::ToNumber(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Number>());
}
return ToApiHandle<Number>(num);
@@ -2976,7 +2870,7 @@ Local<Integer> Value::ToInteger() const {
LOG_API(isolate, "ToInteger");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInteger(obj, &has_pending_exception);
+ num = i::Execution::ToInteger(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Integer>());
}
return ToApiHandle<Integer>(num);
@@ -2985,7 +2879,7 @@ Local<Integer> Value::ToInteger() const {
void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
- ApiCheck(isolate != NULL && isolate->IsInitialized() && !i::V8::IsDead(),
+ ApiCheck(isolate != NULL && isolate->IsInitialized() && !isolate->IsDead(),
"v8::internal::Internals::CheckInitialized()",
"Isolate is not initialized or V8 has died");
}
@@ -3071,12 +2965,6 @@ void v8::ArrayBuffer::CheckCast(Value* that) {
}
-void v8::ArrayBuffer::Allocator::Free(void* data) {
- API_Fatal("v8::ArrayBuffer::Allocator::Free",
- "Override Allocator::Free(void*, size_t)");
-}
-
-
void v8::ArrayBufferView::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArrayBufferView(),
@@ -3196,7 +3084,7 @@ double Value::NumberValue() const {
LOG_API(isolate, "NumberValue");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToNumber(obj, &has_pending_exception);
+ num = i::Execution::ToNumber(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, i::OS::nan_value());
}
return num->Number();
@@ -3214,7 +3102,7 @@ int64_t Value::IntegerValue() const {
LOG_API(isolate, "IntegerValue");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInteger(obj, &has_pending_exception);
+ num = i::Execution::ToInteger(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
}
if (num->IsSmi()) {
@@ -3236,7 +3124,7 @@ Local<Int32> Value::ToInt32() const {
LOG_API(isolate, "ToInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToInt32(obj, &has_pending_exception);
+ num = i::Execution::ToInt32(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Int32>());
}
return ToApiHandle<Int32>(num);
@@ -3254,7 +3142,7 @@ Local<Uint32> Value::ToUint32() const {
LOG_API(isolate, "ToUInt32");
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
- num = i::Execution::ToUint32(obj, &has_pending_exception);
+ num = i::Execution::ToUint32(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
}
return ToApiHandle<Uint32>(num);
@@ -3273,7 +3161,7 @@ Local<Uint32> Value::ToArrayIndex() const {
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> string_obj =
- i::Execution::ToString(obj, &has_pending_exception);
+ i::Execution::ToString(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj);
uint32_t index;
@@ -3301,7 +3189,7 @@ int32_t Value::Int32Value() const {
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> num =
- i::Execution::ToInt32(obj, &has_pending_exception);
+ i::Execution::ToInt32(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
if (num->IsSmi()) {
return i::Smi::cast(*num)->value();
@@ -3382,7 +3270,7 @@ uint32_t Value::Uint32Value() const {
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> num =
- i::Execution::ToUint32(obj, &has_pending_exception);
+ i::Execution::ToUint32(isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, 0);
if (num->IsSmi()) {
return i::Smi::cast(*num)->value();
@@ -3502,7 +3390,7 @@ Local<Value> v8::Object::Get(uint32_t index) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::Object::GetElement(self, index);
+ i::Handle<i::Object> result = i::Object::GetElement(isolate, self, index);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
@@ -3519,7 +3407,7 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
if (!key_obj->IsName()) {
EXCEPTION_PREAMBLE(isolate);
- key_obj = i::Execution::ToString(key_obj, &has_pending_exception);
+ key_obj = i::Execution::ToString(isolate, key_obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
}
i::Handle<i::Name> key_name = i::Handle<i::Name>::cast(key_obj);
@@ -3729,7 +3617,7 @@ bool v8::Object::Delete(uint32_t index) {
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::JSObject::DeleteElement(self, index)->IsTrue();
+ return i::JSReceiver::DeleteElement(self, index)->IsTrue();
}
@@ -3758,7 +3646,8 @@ static inline bool ObjectSetAccessor(Object* obj,
name, getter, setter, data, settings, attributes, signature);
if (info.is_null()) return false;
bool fast = Utils::OpenHandle(obj)->HasFastProperties();
- i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(obj), info);
+ i::Handle<i::Object> result =
+ i::JSObject::SetAccessor(Utils::OpenHandle(obj), info);
if (result.is_null() || result->IsUndefined()) return false;
if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(obj), 0);
return true;
@@ -3766,17 +3655,6 @@ static inline bool ObjectSetAccessor(Object* obj,
bool Object::SetAccessor(Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter,
- v8::Handle<Value> data,
- AccessControl settings,
- PropertyAttribute attributes) {
- return ObjectSetAccessor(
- this, name, getter, setter, data, settings, attributes);
-}
-
-
-bool Object::SetAccessor(Handle<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter,
v8::Handle<Value> data,
@@ -3787,10 +3665,10 @@ bool Object::SetAccessor(Handle<String> name,
}
-bool Object::SetAccessor(Handle<String> name,
- Handle<DeclaredAccessorDescriptor> descriptor,
- AccessControl settings,
- PropertyAttribute attributes) {
+bool Object::SetDeclaredAccessor(Local<String> name,
+ Local<DeclaredAccessorDescriptor> descriptor,
+ PropertyAttribute attributes,
+ AccessControl settings) {
void* null = NULL;
return ObjectSetAccessor(
this, name, descriptor, null, null, settings, attributes);
@@ -4020,7 +3898,7 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
- self->DeleteHiddenProperty(*key_string);
+ i::JSObject::DeleteHiddenProperty(self, key_string);
return true;
}
@@ -4231,7 +4109,7 @@ bool v8::Object::IsCallable() {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
if (obj->IsJSFunction()) return true;
- return i::Execution::GetFunctionDelegate(obj)->IsJSFunction();
+ return i::Execution::GetFunctionDelegate(isolate, obj)->IsJSFunction();
}
@@ -4255,15 +4133,15 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
fun = i::Handle<i::JSFunction>::cast(obj);
} else {
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> delegate =
- i::Execution::TryGetFunctionDelegate(obj, &has_pending_exception);
+ i::Handle<i::Object> delegate = i::Execution::TryGetFunctionDelegate(
+ isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
fun = i::Handle<i::JSFunction>::cast(delegate);
recv_obj = obj;
}
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+ i::Handle<i::Object> returned = i::Execution::Call(
+ isolate, fun, recv_obj, argc, args, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
return Utils::ToLocal(scope.CloseAndEscape(returned));
}
@@ -4292,14 +4170,14 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
i::Handle<i::JSObject>::cast(returned)));
}
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> delegate =
- i::Execution::TryGetConstructorDelegate(obj, &has_pending_exception);
+ i::Handle<i::Object> delegate = i::Execution::TryGetConstructorDelegate(
+ isolate, obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
if (!delegate->IsUndefined()) {
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(delegate);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, obj, argc, args, &has_pending_exception);
+ i::Handle<i::Object> returned = i::Execution::Call(
+ isolate, fun, obj, argc, args, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
ASSERT(!delegate->IsUndefined());
return Utils::ToLocal(scope.CloseAndEscape(returned));
@@ -4308,6 +4186,19 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
}
+Local<Function> Function::New(Isolate* v8_isolate,
+ FunctionCallback callback,
+ Local<Value> data,
+ int length) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ LOG_API(isolate, "Function::New");
+ ENTER_V8(isolate);
+ return FunctionTemplateNew(
+ isolate, callback, data, Local<Signature>(), length, true)->
+ GetFunction();
+}
+
+
Local<v8::Object> Function::NewInstance() const {
return NewInstance(0, NULL);
}
@@ -4350,8 +4241,8 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned =
- i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+ i::Handle<i::Object> returned = i::Execution::Call(
+ isolate, fun, recv_obj, argc, args, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
raw_result = *returned;
}
@@ -5289,8 +5180,8 @@ bool v8::V8::Initialize() {
}
-void v8::V8::SetEntropySource(EntropySource source) {
- i::V8::SetEntropySource(source);
+void v8::V8::SetEntropySource(EntropySource entropy_source) {
+ i::RandomNumberGenerator::SetEntropySource(entropy_source);
}
@@ -5435,7 +5326,8 @@ bool v8::V8::IdleNotification(int hint) {
// continue to call IdleNotification.
i::Isolate* isolate = i::Isolate::Current();
if (isolate == NULL || !isolate->IsInitialized()) return true;
- return i::V8::IdleNotification(hint);
+ if (!i::FLAG_use_idle_notification) return true;
+ return isolate->heap()->IdleNotification(hint);
}
@@ -5463,18 +5355,6 @@ const char* v8::V8::GetVersion() {
}
-static i::Handle<i::FunctionTemplateInfo>
- EnsureConstructor(i::Handle<i::ObjectTemplateInfo> templ) {
- if (templ->constructor()->IsUndefined()) {
- Local<FunctionTemplate> constructor = FunctionTemplate::New();
- Utils::OpenHandle(*constructor)->set_instance_template(*templ);
- templ->set_constructor(*Utils::OpenHandle(*constructor));
- }
- return i::Handle<i::FunctionTemplateInfo>(
- i::FunctionTemplateInfo::cast(templ->constructor()));
-}
-
-
static i::Handle<i::Context> CreateEnvironment(
i::Isolate* isolate,
v8::ExtensionConfiguration* extensions,
@@ -5491,13 +5371,11 @@ static i::Handle<i::Context> CreateEnvironment(
if (!global_template.IsEmpty()) {
// Make sure that the global_template has a constructor.
- global_constructor =
- EnsureConstructor(Utils::OpenHandle(*global_template));
+ global_constructor = EnsureConstructor(*global_template);
// Create a fresh template for the global proxy object.
proxy_template = ObjectTemplate::New();
- proxy_constructor =
- EnsureConstructor(Utils::OpenHandle(*proxy_template));
+ proxy_constructor = EnsureConstructor(*proxy_template);
// Set the global template to be the prototype template of
// global proxy template.
@@ -5540,26 +5418,6 @@ static i::Handle<i::Context> CreateEnvironment(
return env;
}
-#ifdef V8_USE_UNSAFE_HANDLES
-Persistent<Context> v8::Context::New(
- v8::ExtensionConfiguration* extensions,
- v8::Handle<ObjectTemplate> global_template,
- v8::Handle<Value> global_object) {
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- Isolate* external_isolate = reinterpret_cast<Isolate*>(isolate);
- EnsureInitializedForIsolate(isolate, "v8::Context::New()");
- LOG_API(isolate, "Context::New");
- ON_BAILOUT(isolate, "v8::Context::New()", return Persistent<Context>());
- i::HandleScope scope(isolate);
- i::Handle<i::Context> env =
- CreateEnvironment(isolate, extensions, global_template, global_object);
- if (env.is_null()) return Persistent<Context>();
- return Persistent<Context>::New(external_isolate, Utils::ToLocal(env));
-}
-#endif
-
-
Local<Context> v8::Context::New(
v8::Isolate* external_isolate,
v8::ExtensionConfiguration* extensions,
@@ -6202,7 +6060,7 @@ Local<v8::Value> v8::Date::New(double time) {
ENTER_V8(isolate);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj =
- i::Execution::NewDate(time, &has_pending_exception);
+ i::Execution::NewDate(isolate, time, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Value>());
return Utils::ToLocal(obj);
}
@@ -6904,47 +6762,6 @@ void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
}
-void V8::PauseProfiler() {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->logger()->PauseProfiler();
-}
-
-
-void V8::ResumeProfiler() {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->logger()->ResumeProfiler();
-}
-
-
-bool V8::IsProfilerPaused() {
- i::Isolate* isolate = i::Isolate::Current();
- return isolate->logger()->IsProfilerPaused();
-}
-
-
-int V8::GetCurrentThreadId() {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "V8::GetCurrentThreadId()");
- return isolate->thread_id().ToInteger();
-}
-
-
-void V8::TerminateExecution(int thread_id) {
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
- API_ENTRY_CHECK(isolate, "V8::TerminateExecution()");
- // If the thread_id identifies the current thread just terminate
- // execution right away. Otherwise, ask the thread manager to
- // terminate the thread with the given id if any.
- i::ThreadId internal_tid = i::ThreadId::FromInteger(thread_id);
- if (isolate->thread_id().Equals(internal_tid)) {
- isolate->stack_guard()->TerminateExecution();
- } else {
- isolate->thread_manager()->TerminateExecution(internal_tid);
- }
-}
-
-
void V8::TerminateExecution(Isolate* isolate) {
// If no isolate is supplied, use the default isolate.
if (isolate != NULL) {
@@ -7178,37 +6995,6 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
#ifdef ENABLE_DEBUGGER_SUPPORT
-static void EventCallbackWrapper(const v8::Debug::EventDetails& event_details) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->debug_event_callback() != NULL) {
- isolate->debug_event_callback()(event_details.GetEvent(),
- event_details.GetExecutionState(),
- event_details.GetEventData(),
- event_details.GetCallbackData());
- }
-}
-
-
-bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener()");
- ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
- ENTER_V8(isolate);
-
- isolate->set_debug_event_callback(that);
-
- i::HandleScope scope(isolate);
- i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
- if (that != NULL) {
- foreign =
- isolate->factory()->NewForeign(FUNCTION_ADDR(EventCallbackWrapper));
- }
- isolate->debugger()->SetEventListener(foreign,
- Utils::OpenHandle(*data, true));
- return true;
-}
-
-
bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener2()");
@@ -7268,35 +7054,6 @@ void Debug::DebugBreakForCommand(ClientData* data, Isolate* isolate) {
}
-static void MessageHandlerWrapper(const v8::Debug::Message& message) {
- i::Isolate* isolate = i::Isolate::Current();
- if (isolate->message_handler()) {
- v8::String::Value json(message.GetJSON());
- (isolate->message_handler())(*json, json.length(), message.GetClientData());
- }
-}
-
-
-void Debug::SetMessageHandler(v8::Debug::MessageHandler handler,
- bool message_handler_thread) {
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
- ENTER_V8(isolate);
-
- // Message handler thread not supported any more. Parameter temporally left in
- // the API for client compatibility reasons.
- CHECK(!message_handler_thread);
-
- // TODO(sgjesse) support the old message handler API through a simple wrapper.
- isolate->set_message_handler(handler);
- if (handler != NULL) {
- isolate->debugger()->SetMessageHandler(MessageHandlerWrapper);
- } else {
- isolate->debugger()->SetMessageHandler(NULL);
- }
-}
-
-
void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
@@ -7325,7 +7082,8 @@ void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::SetHostDispatchHandler");
ENTER_V8(isolate);
- isolate->debugger()->SetHostDispatchHandler(handler, period);
+ isolate->debugger()->SetHostDispatchHandler(
+ handler, i::TimeDelta::FromMilliseconds(period));
}
@@ -7399,7 +7157,7 @@ void Debug::DisableAgent() {
void Debug::ProcessDebugMessages() {
- i::Execution::ProcessDebugMessages(true);
+ i::Execution::ProcessDebugMessages(i::Isolate::Current(), true);
}
@@ -7444,8 +7202,6 @@ Handle<String> CpuProfileNode::GetFunctionName() const {
int CpuProfileNode::GetScriptId() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptId");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
const i::CodeEntry* entry = node->entry();
return entry->script_id();
@@ -7462,43 +7218,29 @@ Handle<String> CpuProfileNode::GetScriptResourceName() const {
int CpuProfileNode::GetLineNumber() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetLineNumber");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
}
-double CpuProfileNode::GetTotalTime() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalTime");
- return reinterpret_cast<const i::ProfileNode*>(this)->GetTotalMillis();
-}
-
-
-double CpuProfileNode::GetSelfTime() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfTime");
- return reinterpret_cast<const i::ProfileNode*>(this)->GetSelfMillis();
+const char* CpuProfileNode::GetBailoutReason() const {
+ const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ return node->entry()->bailout_reason();
}
-double CpuProfileNode::GetTotalSamplesCount() const {
+double CpuProfileNode::GetSelfSamplesCount() const {
i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalSamplesCount");
- return reinterpret_cast<const i::ProfileNode*>(this)->total_ticks();
+ IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount");
+ return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
}
-double CpuProfileNode::GetSelfSamplesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount");
+unsigned CpuProfileNode::GetHitCount() const {
return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
}
unsigned CpuProfileNode::GetCallUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetCallUid");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
}
@@ -7509,15 +7251,11 @@ unsigned CpuProfileNode::GetNodeId() const {
int CpuProfileNode::GetChildrenCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetChildrenCount");
return reinterpret_cast<const i::ProfileNode*>(this)->children()->length();
}
const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfileNode::GetChild");
const i::ProfileNode* child =
reinterpret_cast<const i::ProfileNode*>(this)->children()->at(index);
return reinterpret_cast<const CpuProfileNode*>(child);
@@ -7538,8 +7276,6 @@ void CpuProfile::Delete() {
unsigned CpuProfile::GetUid() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetUid");
return reinterpret_cast<const i::CpuProfile*>(this)->uid();
}
@@ -7554,8 +7290,6 @@ Handle<String> CpuProfile::GetTitle() const {
const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::CpuProfile::GetTopDownRoot");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root());
}
@@ -7569,13 +7303,13 @@ const CpuProfileNode* CpuProfile::GetSample(int index) const {
int64_t CpuProfile::GetStartTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return profile->start_time_us();
+ return (profile->start_time() - i::Time::UnixEpoch()).InMicroseconds();
}
int64_t CpuProfile::GetEndTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return profile->end_time_us();
+ return (profile->end_time() - i::Time::UnixEpoch()).InMicroseconds();
}
@@ -7589,6 +7323,13 @@ int CpuProfiler::GetProfileCount() {
}
+void CpuProfiler::SetSamplingInterval(int us) {
+ ASSERT(us >= 0);
+ return reinterpret_cast<i::CpuProfiler*>(this)->set_sampling_interval(
+ i::TimeDelta::FromMicroseconds(us));
+}
+
+
const CpuProfile* CpuProfiler::GetCpuProfile(int index) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(index));
@@ -7613,6 +7354,19 @@ void CpuProfiler::DeleteAllCpuProfiles() {
}
+void CpuProfiler::SetIdle(bool is_idle) {
+ i::Isolate* isolate = reinterpret_cast<i::CpuProfiler*>(this)->isolate();
+ i::StateTag state = isolate->current_vm_state();
+ ASSERT(state == i::EXTERNAL || state == i::IDLE);
+ if (isolate->js_entry_sp() != NULL) return;
+ if (is_idle) {
+ isolate->set_current_vm_state(i::IDLE);
+ } else if (state == i::IDLE) {
+ isolate->set_current_vm_state(i::EXTERNAL);
+ }
+}
+
+
static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
return const_cast<i::HeapGraphEdge*>(
reinterpret_cast<const i::HeapGraphEdge*>(edge));
@@ -8091,20 +7845,6 @@ void DeferredHandles::Iterate(ObjectVisitor* v) {
}
-v8::Handle<v8::Value> InvokeAccessorGetter(
- v8::Local<v8::String> property,
- const v8::AccessorInfo& info,
- v8::AccessorGetter getter) {
- Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
- Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
- getter));
- // Leaving JavaScript.
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, getter_address);
- return getter(property, info);
-}
-
-
void InvokeAccessorGetterCallback(
v8::Local<v8::String> property,
const v8::PropertyCallbackInfo<v8::Value>& info,
@@ -8115,19 +7855,7 @@ void InvokeAccessorGetterCallback(
getter));
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, getter_address);
- return getter(property, info);
-}
-
-
-v8::Handle<v8::Value> InvokeInvocationCallback(
- const v8::Arguments& args,
- v8::InvocationCallback callback) {
- Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
- Address callback_address =
- reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, callback_address);
- return callback(args);
+ getter(property, info);
}
@@ -8138,7 +7866,7 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, callback_address);
- return callback(info);
+ callback(info);
}
diff --git a/chromium/v8/src/api.h b/chromium/v8/src/api.h
index 0f33bc815fe..51bc4942b24 100644
--- a/chromium/v8/src/api.h
+++ b/chromium/v8/src/api.h
@@ -125,8 +125,8 @@ template <typename T> inline T ToCData(v8::internal::Object* obj) {
template <typename T>
-inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
- v8::internal::Isolate* isolate = v8::internal::Isolate::Current();
+inline v8::internal::Handle<v8::internal::Object> FromCData(
+ v8::internal::Isolate* isolate, T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
return isolate->factory()->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
@@ -690,19 +690,11 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
// Interceptor functions called from generated inline caches to notify
// CPU profiler that external callbacks are invoked.
-v8::Handle<v8::Value> InvokeAccessorGetter(
- v8::Local<v8::String> property,
- const v8::AccessorInfo& info,
- v8::AccessorGetter getter);
-
-
void InvokeAccessorGetterCallback(
v8::Local<v8::String> property,
const v8::PropertyCallbackInfo<v8::Value>& info,
v8::AccessorGetterCallback getter);
-v8::Handle<v8::Value> InvokeInvocationCallback(const v8::Arguments& args,
- v8::InvocationCallback callback);
void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback);
diff --git a/chromium/v8/src/apinatives.js b/chromium/v8/src/apinatives.js
index ccbedd6d397..5fb36c09e72 100644
--- a/chromium/v8/src/apinatives.js
+++ b/chromium/v8/src/apinatives.js
@@ -74,25 +74,31 @@ function InstantiateFunction(data, name) {
cache[serialNumber] = null;
var fun = %CreateApiFunction(data);
if (name) %FunctionSetName(fun, name);
- cache[serialNumber] = fun;
- var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
var flags = %GetTemplateField(data, kApiFlagOffset);
- // Note: Do not directly use an object template as a condition, our
- // internal ToBoolean doesn't handle that!
- fun.prototype = typeof prototype === 'undefined' ?
- {} : Instantiate(prototype);
- if (flags & (1 << kReadOnlyPrototypeBit)) {
- %FunctionSetReadOnlyPrototype(fun);
- }
- %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
- var parent = %GetTemplateField(data, kApiParentTemplateOffset);
- // Note: Do not directly use a function template as a condition, our
- // internal ToBoolean doesn't handle that!
- if (!(typeof parent === 'undefined')) {
- var parent_fun = Instantiate(parent);
- %SetPrototype(fun.prototype, parent_fun.prototype);
+ var doNotCache = flags & (1 << kDoNotCacheBit);
+ if (!doNotCache) cache[serialNumber] = fun;
+ if (flags & (1 << kRemovePrototypeBit)) {
+ %FunctionRemovePrototype(fun);
+ } else {
+ var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
+ // Note: Do not directly use an object template as a condition, our
+ // internal ToBoolean doesn't handle that!
+ fun.prototype = typeof prototype === 'undefined' ?
+ {} : Instantiate(prototype);
+ if (flags & (1 << kReadOnlyPrototypeBit)) {
+ %FunctionSetReadOnlyPrototype(fun);
+ }
+ %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
+ var parent = %GetTemplateField(data, kApiParentTemplateOffset);
+ // Note: Do not directly use a function template as a condition, our
+ // internal ToBoolean doesn't handle that!
+ if (!(typeof parent === 'undefined')) {
+ var parent_fun = Instantiate(parent);
+ %SetPrototype(fun.prototype, parent_fun.prototype);
+ }
}
ConfigureTemplateInstance(fun, data);
+ if (doNotCache) return fun;
} catch (e) {
cache[serialNumber] = kUninitialized;
throw e;
@@ -104,19 +110,32 @@ function InstantiateFunction(data, name) {
function ConfigureTemplateInstance(obj, data) {
var properties = %GetTemplateField(data, kApiPropertyListOffset);
- if (properties) {
- // Disable access checks while instantiating the object.
- var requires_access_checks = %DisableAccessChecks(obj);
- try {
- for (var i = 0; i < properties[0]; i += 3) {
+ if (!properties) return;
+ // Disable access checks while instantiating the object.
+ var requires_access_checks = %DisableAccessChecks(obj);
+ try {
+ for (var i = 1; i < properties[0];) {
+ var length = properties[i];
+ if (length == 3) {
var name = properties[i + 1];
var prop_data = properties[i + 2];
var attributes = properties[i + 3];
var value = Instantiate(prop_data, name);
%SetProperty(obj, name, value, attributes);
+ } else if (length == 5) {
+ var name = properties[i + 1];
+ var getter = properties[i + 2];
+ var setter = properties[i + 3];
+ var attribute = properties[i + 4];
+ var access_control = properties[i + 5];
+ %SetAccessorProperty(
+ obj, name, getter, setter, attribute, access_control);
+ } else {
+ throw "Bad properties array";
}
- } finally {
- if (requires_access_checks) %EnableAccessChecks(obj);
+ i += length + 1;
}
+ } finally {
+ if (requires_access_checks) %EnableAccessChecks(obj);
}
}
diff --git a/chromium/v8/src/arguments.cc b/chromium/v8/src/arguments.cc
index 11d9279e81e..287805717e5 100644
--- a/chromium/v8/src/arguments.cc
+++ b/chromium/v8/src/arguments.cc
@@ -34,49 +34,6 @@ namespace v8 {
namespace internal {
-static bool Match(void* a, void* b) {
- return a == b;
-}
-
-
-static uint32_t Hash(void* function) {
- uintptr_t as_int = reinterpret_cast<uintptr_t>(function);
- if (sizeof(function) == 4) return static_cast<uint32_t>(as_int);
- uint64_t as_64 = static_cast<uint64_t>(as_int);
- return
- static_cast<uint32_t>(as_64 >> 32) ^
- static_cast<uint32_t>(as_64);
-}
-
-
-CallbackTable::CallbackTable(): map_(Match, 64) {}
-
-
-bool CallbackTable::Contains(void* function) {
- ASSERT(function != NULL);
- return map_.Lookup(function, Hash(function), false) != NULL;
-}
-
-
-void CallbackTable::InsertCallback(Isolate* isolate,
- void* function,
- bool returns_void) {
- if (function == NULL) return;
- // Don't store for performance.
- if (kStoreVoidFunctions != returns_void) return;
- CallbackTable* table = isolate->callback_table();
- if (table == NULL) {
- table = new CallbackTable();
- isolate->set_callback_table(table);
- }
- typedef HashMap::Entry Entry;
- Entry* entry = table->map_.Lookup(function, Hash(function), true);
- ASSERT(entry != NULL);
- ASSERT(entry->value == NULL || entry->value == function);
- entry->value = function;
-}
-
-
template<typename T>
template<typename V>
v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
@@ -88,110 +45,67 @@ v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
}
-v8::Handle<v8::Value> FunctionCallbackArguments::Call(InvocationCallback f) {
+v8::Handle<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
- void* f_as_void = CallbackTable::FunctionToVoidPtr(f);
- bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- if (new_style) {
- FunctionCallback c = reinterpret_cast<FunctionCallback>(f);
- FunctionCallbackInfo<v8::Value> info(end(),
- argv_,
- argc_,
- is_construct_call_);
- c(info);
- } else {
- v8::Arguments args(end(),
- argv_,
- argc_,
- is_construct_call_);
- v8::Handle<v8::Value> return_value = f(args);
- if (!return_value.IsEmpty()) return return_value;
- }
+ FunctionCallbackInfo<v8::Value> info(end(),
+ argv_,
+ argc_,
+ is_construct_call_);
+ f(info);
return GetReturnValue<v8::Value>(isolate);
}
-#define WRITE_CALL_0(OldFunction, NewFunction, ReturnValue) \
-v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f) { \
+#define WRITE_CALL_0(Function, ReturnValue) \
+v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
Isolate* isolate = this->isolate(); \
- void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
- bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- if (new_style) { \
- NewFunction c = reinterpret_cast<NewFunction>(f); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
- c(info); \
- } else { \
- v8::AccessorInfo info(end()); \
- v8::Handle<ReturnValue> return_value = f(info); \
- if (!return_value.IsEmpty()) return return_value; \
- } \
+ PropertyCallbackInfo<ReturnValue> info(end()); \
+ f(info); \
return GetReturnValue<ReturnValue>(isolate); \
}
-#define WRITE_CALL_1(OldFunction, NewFunction, ReturnValue, Arg1) \
-v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f, \
+
+#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
+v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Arg1 arg1) { \
Isolate* isolate = this->isolate(); \
- void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
- bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- if (new_style) { \
- NewFunction c = reinterpret_cast<NewFunction>(f); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
- c(arg1, info); \
- } else { \
- v8::AccessorInfo info(end()); \
- v8::Handle<ReturnValue> return_value = f(arg1, info); \
- if (!return_value.IsEmpty()) return return_value; \
- } \
+ PropertyCallbackInfo<ReturnValue> info(end()); \
+ f(arg1, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
-#define WRITE_CALL_2(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
-v8::Handle<ReturnValue> PropertyCallbackArguments::Call(OldFunction f, \
+
+#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
+v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Arg1 arg1, \
Arg2 arg2) { \
Isolate* isolate = this->isolate(); \
- void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
- bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- if (new_style) { \
- NewFunction c = reinterpret_cast<NewFunction>(f); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
- c(arg1, arg2, info); \
- } else { \
- v8::AccessorInfo info(end()); \
- v8::Handle<ReturnValue> return_value = f(arg1, arg2, info); \
- if (!return_value.IsEmpty()) return return_value; \
- } \
+ PropertyCallbackInfo<ReturnValue> info(end()); \
+ f(arg1, arg2, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
-#define WRITE_CALL_2_VOID(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
-void PropertyCallbackArguments::Call(OldFunction f, \
+
+#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
+void PropertyCallbackArguments::Call(Function f, \
Arg1 arg1, \
Arg2 arg2) { \
Isolate* isolate = this->isolate(); \
- void* f_as_void = CallbackTable::FunctionToVoidPtr(f); \
- bool new_style = CallbackTable::ReturnsVoid(isolate, f_as_void); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- if (new_style) { \
- NewFunction c = reinterpret_cast<NewFunction>(f); \
- PropertyCallbackInfo<ReturnValue> info(end()); \
- c(arg1, arg2, info); \
- } else { \
- v8::AccessorInfo info(end()); \
- f(arg1, arg2, info); \
- } \
+ PropertyCallbackInfo<ReturnValue> info(end()); \
+ f(arg1, arg2, info); \
}
+
FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)
diff --git a/chromium/v8/src/arguments.h b/chromium/v8/src/arguments.h
index f9dca110c37..c1db98b53db 100644
--- a/chromium/v8/src/arguments.h
+++ b/chromium/v8/src/arguments.h
@@ -83,116 +83,49 @@ class Arguments BASE_EMBEDDED {
};
-// mappings from old property callbacks to new ones
-// F(old name, new name, return value, parameters...)
-//
+// For each type of callback, we have a list of arguments
+// They are used to generate the Call() functions below
// These aren't included in the list as they have duplicate signatures
-// F(NamedPropertyEnumerator, NamedPropertyEnumeratorCallback, ...)
-// F(NamedPropertyGetter, NamedPropertyGetterCallback, ...)
+// F(NamedPropertyEnumeratorCallback, ...)
+// F(NamedPropertyGetterCallback, ...)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
- F(IndexedPropertyEnumerator, IndexedPropertyEnumeratorCallback, v8::Array) \
+ F(IndexedPropertyEnumeratorCallback, v8::Array) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
- F(AccessorGetter, AccessorGetterCallback, v8::Value, v8::Local<v8::String>) \
- F(NamedPropertyQuery, \
- NamedPropertyQueryCallback, \
+ F(AccessorGetterCallback, v8::Value, v8::Local<v8::String>) \
+ F(NamedPropertyQueryCallback, \
v8::Integer, \
v8::Local<v8::String>) \
- F(NamedPropertyDeleter, \
- NamedPropertyDeleterCallback, \
+ F(NamedPropertyDeleterCallback, \
v8::Boolean, \
v8::Local<v8::String>) \
- F(IndexedPropertyGetter, \
- IndexedPropertyGetterCallback, \
+ F(IndexedPropertyGetterCallback, \
v8::Value, \
uint32_t) \
- F(IndexedPropertyQuery, \
- IndexedPropertyQueryCallback, \
+ F(IndexedPropertyQueryCallback, \
v8::Integer, \
uint32_t) \
- F(IndexedPropertyDeleter, \
- IndexedPropertyDeleterCallback, \
+ F(IndexedPropertyDeleterCallback, \
v8::Boolean, \
uint32_t) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F) \
- F(NamedPropertySetter, \
- NamedPropertySetterCallback, \
+ F(NamedPropertySetterCallback, \
v8::Value, \
v8::Local<v8::String>, \
v8::Local<v8::Value>) \
- F(IndexedPropertySetter, \
- IndexedPropertySetterCallback, \
+ F(IndexedPropertySetterCallback, \
v8::Value, \
uint32_t, \
v8::Local<v8::Value>) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
- F(AccessorSetter, \
- AccessorSetterCallback, \
+ F(AccessorSetterCallback, \
void, \
v8::Local<v8::String>, \
v8::Local<v8::Value>) \
-// All property callbacks as well as invocation callbacks
-#define FOR_EACH_CALLBACK_TABLE_MAPPING(F) \
- F(InvocationCallback, FunctionCallback) \
- F(AccessorGetter, AccessorGetterCallback) \
- F(AccessorSetter, AccessorSetterCallback) \
- F(NamedPropertySetter, NamedPropertySetterCallback) \
- F(NamedPropertyQuery, NamedPropertyQueryCallback) \
- F(NamedPropertyDeleter, NamedPropertyDeleterCallback) \
- F(IndexedPropertyGetter, IndexedPropertyGetterCallback) \
- F(IndexedPropertySetter, IndexedPropertySetterCallback) \
- F(IndexedPropertyQuery, IndexedPropertyQueryCallback) \
- F(IndexedPropertyDeleter, IndexedPropertyDeleterCallback) \
- F(IndexedPropertyEnumerator, IndexedPropertyEnumeratorCallback) \
-
-
-// TODO(dcarney): Remove this class when old callbacks are gone.
-class CallbackTable {
- public:
- static const bool kStoreVoidFunctions = false;
- static inline bool ReturnsVoid(Isolate* isolate, void* function) {
- CallbackTable* table = isolate->callback_table();
- bool contains =
- table != NULL &&
- table->map_.occupancy() != 0 &&
- table->Contains(function);
- return contains == kStoreVoidFunctions;
- }
-
- STATIC_ASSERT(sizeof(intptr_t) == sizeof(AccessorGetterCallback));
-
- template<typename F>
- static inline void* FunctionToVoidPtr(F function) {
- return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(function));
- }
-
-#define WRITE_REGISTER(OldFunction, NewFunction) \
- static NewFunction Register(Isolate* isolate, OldFunction f) { \
- InsertCallback(isolate, FunctionToVoidPtr(f), false); \
- return reinterpret_cast<NewFunction>(f); \
- } \
- \
- static NewFunction Register(Isolate* isolate, NewFunction f) { \
- InsertCallback(isolate, FunctionToVoidPtr(f), true); \
- return f; \
- }
- FOR_EACH_CALLBACK_TABLE_MAPPING(WRITE_REGISTER)
-#undef WRITE_REGISTER
-
- private:
- CallbackTable();
- bool Contains(void* function);
- static void InsertCallback(Isolate* isolate,
- void* function,
- bool returns_void);
- HashMap map_;
- DISALLOW_COPY_AND_ASSIGN(CallbackTable);
-};
-
// Custom arguments replicate a small segment of stack that can be
// accessed through an Arguments object the same way the actual stack
@@ -218,7 +151,6 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
typedef CustomArgumentsBase<T::kArgsLength> Super;
~CustomArguments() {
- // TODO(dcarney): create a new zap value for this.
this->end()[kReturnValueOffset] =
reinterpret_cast<Object*>(kHandleZapValue);
}
@@ -243,6 +175,10 @@ class PropertyCallbackArguments
static const int kArgsLength = T::kArgsLength;
static const int kThisIndex = T::kThisIndex;
static const int kHolderIndex = T::kHolderIndex;
+ static const int kDataIndex = T::kDataIndex;
+ static const int kReturnValueDefaultValueIndex =
+ T::kReturnValueDefaultValueIndex;
+ static const int kIsolateIndex = T::kIsolateIndex;
PropertyCallbackArguments(Isolate* isolate,
Object* data,
@@ -271,17 +207,17 @@ class PropertyCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
-#define WRITE_CALL_0(OldFunction, NewFunction, ReturnValue) \
- v8::Handle<ReturnValue> Call(OldFunction f); \
+#define WRITE_CALL_0(Function, ReturnValue) \
+ v8::Handle<ReturnValue> Call(Function f); \
-#define WRITE_CALL_1(OldFunction, NewFunction, ReturnValue, Arg1) \
- v8::Handle<ReturnValue> Call(OldFunction f, Arg1 arg1); \
+#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
+ v8::Handle<ReturnValue> Call(Function f, Arg1 arg1); \
-#define WRITE_CALL_2(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
- v8::Handle<ReturnValue> Call(OldFunction f, Arg1 arg1, Arg2 arg2); \
+#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
+ v8::Handle<ReturnValue> Call(Function f, Arg1 arg1, Arg2 arg2); \
-#define WRITE_CALL_2_VOID(OldFunction, NewFunction, ReturnValue, Arg1, Arg2) \
- void Call(OldFunction f, Arg1 arg1, Arg2 arg2); \
+#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
+ void Call(Function f, Arg1 arg1, Arg2 arg2); \
FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
@@ -336,7 +272,7 @@ class FunctionCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
- v8::Handle<v8::Value> Call(InvocationCallback f);
+ v8::Handle<v8::Value> Call(FunctionCallback f);
private:
internal::Object** argv_;
diff --git a/chromium/v8/src/arm/assembler-arm-inl.h b/chromium/v8/src/arm/assembler-arm-inl.h
index bfe9bc8335a..a1d1e1b5670 100644
--- a/chromium/v8/src/arm/assembler-arm-inl.h
+++ b/chromium/v8/src/arm/assembler-arm-inl.h
@@ -279,7 +279,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
@@ -292,12 +292,11 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
diff --git a/chromium/v8/src/arm/assembler-arm.cc b/chromium/v8/src/arm/assembler-arm.cc
index a9db5a59949..bd8b0613eb9 100644
--- a/chromium/v8/src/arm/assembler-arm.cc
+++ b/chromium/v8/src/arm/assembler-arm.cc
@@ -39,6 +39,7 @@
#if V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
+#include "macro-assembler.h"
#include "serialize.h"
namespace v8 {
@@ -152,7 +153,8 @@ void CpuFeatures::Probe() {
#else // __arm__
// Probe for additional features not already known to be available.
- if (!IsSupported(VFP3) && FLAG_enable_vfp3 && OS::ArmCpuHasFeature(VFP3)) {
+ CPU cpu;
+ if (!IsSupported(VFP3) && FLAG_enable_vfp3 && cpu.has_vfp3()) {
// This implementation also sets the VFP flags if runtime
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
// 0406B, page A1-6.
@@ -161,38 +163,40 @@ void CpuFeatures::Probe() {
static_cast<uint64_t>(1) << ARMv7;
}
- if (!IsSupported(NEON) && FLAG_enable_neon && OS::ArmCpuHasFeature(NEON)) {
+ if (!IsSupported(NEON) && FLAG_enable_neon && cpu.has_neon()) {
found_by_runtime_probing_only_ |= 1u << NEON;
}
- if (!IsSupported(ARMv7) && FLAG_enable_armv7 && OS::ArmCpuHasFeature(ARMv7)) {
+ if (!IsSupported(ARMv7) && FLAG_enable_armv7 && cpu.architecture() >= 7) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
}
- if (!IsSupported(SUDIV) && FLAG_enable_sudiv && OS::ArmCpuHasFeature(SUDIV)) {
+ if (!IsSupported(SUDIV) && FLAG_enable_sudiv && cpu.has_idiva()) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV;
}
if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
- && OS::ArmCpuHasFeature(ARMv7)) {
+ && cpu.architecture() >= 7) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
}
- CpuImplementer implementer = OS::GetCpuImplementer();
- if (implementer == QUALCOMM_IMPLEMENTER &&
- FLAG_enable_movw_movt && OS::ArmCpuHasFeature(ARMv7)) {
+ // Use movw/movt for QUALCOMM ARMv7 cores.
+ if (cpu.implementer() == CPU::QUALCOMM &&
+ cpu.architecture() >= 7 &&
+ FLAG_enable_movw_movt) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
}
- CpuPart part = OS::GetCpuPart(implementer);
- if ((part == CORTEX_A9) || (part == CORTEX_A5)) {
+ // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
+ if (cpu.implementer() == CPU::ARM &&
+ (cpu.part() == CPU::ARM_CORTEX_A5 ||
+ cpu.part() == CPU::ARM_CORTEX_A9)) {
cache_line_size_ = 32;
}
- if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs
- && OS::ArmCpuHasFeature(VFP32DREGS)) {
+ if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs && cpu.has_vfp3_d32()) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
}
@@ -321,15 +325,12 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// See assembler-arm-inl.h for inlined constructors
Operand::Operand(Handle<Object> handle) {
-#ifdef DEBUG
- Isolate* isolate = Isolate::Current();
-#endif
AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!isolate->heap()->InNewSpace(obj));
if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@@ -775,9 +776,9 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
- // Emitted label constant, not part of a branch.
- return instr - (Code::kHeaderSize - kHeapObjectTag);
+ if (is_uint24(instr)) {
+ // Emitted link to a label, not part of a branch.
+ return instr;
}
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
int imm26 = ((instr & kImm24Mask) << 8) >> 6;
@@ -792,11 +793,72 @@ int Assembler::target_at(int pos) {
void Assembler::target_at_put(int pos, int target_pos) {
Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
+ if (is_uint24(instr)) {
ASSERT(target_pos == pos || target_pos >= 0);
- // Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ // Emitted link to a label, not part of a branch.
+ // Load the position of the label relative to the generated code object
+ // pointer in a register.
+
+ // Here are the instructions we need to emit:
+ // For ARMv7: target24 => target16_1:target16_0
+ // movw dst, #target16_0
+ // movt dst, #target16_1
+ // For ARMv6: target24 => target8_2:target8_1:target8_0
+ // mov dst, #target8_0
+ // orr dst, dst, #target8_1 << 8
+ // orr dst, dst, #target8_2 << 16
+
+ // We extract the destination register from the emitted nop instruction.
+ Register dst = Register::from_code(
+ Instruction::RmValue(instr_at(pos + kInstrSize)));
+ ASSERT(IsNop(instr_at(pos + kInstrSize), dst.code()));
+ uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ ASSERT(is_uint24(target24));
+ if (is_uint8(target24)) {
+ // If the target fits in a byte then only patch with a mov
+ // instruction.
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 1,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->mov(dst, Operand(target24));
+ } else {
+ uint16_t target16_0 = target24 & kImm16Mask;
+ uint16_t target16_1 = target24 >> 16;
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ // Patch with movw/movt.
+ if (target16_1 == 0) {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 1,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->movw(dst, target16_0);
+ } else {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 2,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->movw(dst, target16_0);
+ patcher.masm()->movt(dst, target16_1);
+ }
+ } else {
+ // Patch with a sequence of mov/orr/orr instructions.
+ uint8_t target8_0 = target16_0 & kImm8Mask;
+ uint8_t target8_1 = target16_0 >> 8;
+ uint8_t target8_2 = target16_1 & kImm8Mask;
+ if (target8_2 == 0) {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 2,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->mov(dst, Operand(target8_0));
+ patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
+ } else {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ 3,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->mov(dst, Operand(target8_0));
+ patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
+ patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
+ }
+ }
+ }
return;
}
int imm26 = target_pos - (pos + kPcLoadDelta);
@@ -1229,21 +1291,6 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
}
-void Assembler::label_at_put(Label* L, int at_offset) {
- int target_pos;
- ASSERT(!L->is_bound());
- if (L->is_linked()) {
- // Point to previous instruction that uses the link.
- target_pos = L->pos();
- } else {
- // First entry of the link chain points to itself.
- target_pos = at_offset;
- }
- L->link_to(at_offset);
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
-}
-
-
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0);
@@ -1386,6 +1433,45 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
}
+void Assembler::mov_label_offset(Register dst, Label* label) {
+ if (label->is_bound()) {
+ mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
+ } else {
+ // Emit the link to the label in the code stream followed by extra nop
+ // instructions.
+ // If the label is not linked, then start a new link chain by linking it to
+ // itself, emitting pc_offset().
+ int link = label->is_linked() ? label->pos() : pc_offset();
+ label->link_to(pc_offset());
+
+ // When the label is bound, these instructions will be patched with a
+ // sequence of movw/movt or mov/orr/orr instructions. They will load the
+ // destination register with the position of the label from the beginning
+ // of the code.
+ //
+ // The link will be extracted from the first instruction and the destination
+ // register from the second.
+ // For ARMv7:
+ // link
+ // mov dst, dst
+ // For ARMv6:
+ // link
+ // mov dst, dst
+ // mov dst, dst
+ //
+ // When the label gets bound: target_at extracts the link and target_at_put
+ // patches the instructions.
+ ASSERT(is_uint24(link));
+ BlockConstPoolScope block_const_pool(this);
+ emit(link);
+ nop(dst.code());
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ nop(dst.code());
+ }
+ }
+}
+
+
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
ASSERT(immediate < 0x10000);
// May use movw if supported, but on unsupported platforms will try to use
diff --git a/chromium/v8/src/arm/assembler-arm.h b/chromium/v8/src/arm/assembler-arm.h
index f647848de56..866b1c9024d 100644
--- a/chromium/v8/src/arm/assembler-arm.h
+++ b/chromium/v8/src/arm/assembler-arm.h
@@ -748,10 +748,6 @@ class Assembler : public AssemblerBase {
// Manages the jump elimination optimization if the second parameter is true.
int branch_offset(Label* L, bool jump_elimination_allowed);
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
-
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
@@ -903,6 +899,10 @@ class Assembler : public AssemblerBase {
mov(dst, Operand(src), s, cond);
}
+ // Load the position of the label relative to the generated code object
+ // pointer in a register.
+ void mov_label_offset(Register dst, Label* label);
+
// ARMv7 instructions for loading a 32 bit immediate in two instructions.
// This may actually emit a different mov instruction, but on an ARMv7 it
// is guaranteed to only emit one instruction.
@@ -1561,7 +1561,6 @@ class Assembler : public AssemblerBase {
void RecordRelocInfo(double data);
void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
- friend class RegExpMacroAssemblerARM;
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
diff --git a/chromium/v8/src/arm/builtins-arm.cc b/chromium/v8/src/arm/builtins-arm.cc
index eff47e2692b..f60e1f86714 100644
--- a/chromium/v8/src/arm/builtins-arm.cc
+++ b/chromium/v8/src/arm/builtins-arm.cc
@@ -119,9 +119,9 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(r2);
- __ Assert(ne, "Unexpected initial map for InternalArray function");
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for InternalArray function");
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -147,9 +147,9 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(r2);
- __ Assert(ne, "Unexpected initial map for Array function");
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
@@ -178,7 +178,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r2);
__ cmp(function, Operand(r2));
- __ Assert(eq, "Unexpected String function");
+ __ Assert(eq, kUnexpectedStringFunction);
}
// Load the first arguments in r0 and get rid of the rest.
@@ -224,10 +224,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ ldrb(r4, FieldMemOperand(map, Map::kInstanceSizeOffset));
__ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(eq, "Unexpected string wrapper instance size");
+ __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
__ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
__ cmp(r4, Operand::Zero());
- __ Assert(eq, "Unexpected unused properties of string wrapper");
+ __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
}
__ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -291,68 +291,55 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
+ // Function is also the parameter to the runtime call.
+ __ push(r1);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore receiver.
+ __ pop(r1);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mov(pc, r2);
+ __ Jump(r2);
}
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
+ __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r0);
+
+ __ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ Jump(r2);
-}
-
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- __ push(r1); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(r5);
- // Restore receiver.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -471,7 +458,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
__ cmp(r0, r6);
- __ Assert(le, "Unexpected number of pre-allocated property fields.");
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(r5, r0, r7);
// To allow for truncation.
@@ -503,7 +490,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Done if no extra properties are to be allocated.
__ b(eq, &allocated);
- __ Assert(pl, "Property allocation count failed.");
+ __ Assert(pl, kPropertyAllocationCountFailed);
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
@@ -547,7 +534,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
} else if (FLAG_debug_code) {
__ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
__ cmp(r7, r8);
- __ Assert(eq, "Undefined value not loaded.");
+ __ Assert(eq, kUndefinedValueNotLoaded);
}
__ b(&entry);
__ bind(&loop);
@@ -795,59 +782,17 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
// Do a tail-call of the compiled function.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
@@ -966,31 +911,48 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame and push it as an
- // argument to the on-stack replacement function.
+ // Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Lookup and calculate pc offset.
+ __ ldr(r1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ ldr(r2, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sub(r1, r1, r2);
+ __ SmiTag(r1);
+
+ // Pass both function and pc offset as arguments.
__ push(r0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ push(r1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
+ // If the code object is null, just return to the unoptimized code.
Label skip;
- __ cmp(r0, Operand(Smi::FromInt(-1)));
+ __ cmp(r0, Operand(Smi::FromInt(0)));
__ b(ne, &skip);
__ Ret();
__ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(r0);
- __ push(r0);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ ldr(r1, MemOperand(r0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ ldr(r1, MemOperand(r1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ add(r0, r0, Operand::SmiUntag(r1));
+ __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
}
diff --git a/chromium/v8/src/arm/code-stubs-arm.cc b/chromium/v8/src/arm/code-stubs-arm.cc
index ba98b963153..cd1809fb2a8 100644
--- a/chromium/v8/src/arm/code-stubs-arm.cc
+++ b/chromium/v8/src/arm/code-stubs-arm.cc
@@ -38,6 +38,17 @@ namespace v8 {
namespace internal {
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -246,17 +257,6 @@ void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
-void UnaryOpStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r0 };
- descriptor->register_param_count_ = 1;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(UnaryOpIC_Miss);
-}
-
-
void StoreGlobalStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -320,134 +320,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
-
- // Pop the function info from the stack.
- __ pop(r3);
-
- // Attempt to allocate new JSFunction in new space.
- __ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
-
- int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
- __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
- __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
- __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ ldr(r1,
- FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ tst(r1, r1);
- __ b(ne, &check_optimized);
- }
- __ bind(&install_unoptimized);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
-
- // r2 holds native context, r1 points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into r4.
- __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot));
- __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot));
- __ cmp(r2, r5);
- __ b(eq, &install_optimized);
-
- // Iterate through the rest of map backwards. r4 holds an index as a Smi.
- Label loop;
- __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
- __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
- __ b(eq, &install_unoptimized);
- __ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
- __ ldr(r5, MemOperand(r5));
- __ cmp(r2, r5);
- __ b(ne, &loop);
- // Hit: fetch the optimized code.
- __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
- __ add(r5, r5, Operand(kPointerSize));
- __ ldr(r4, MemOperand(r5));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(),
- 1, r6, r7);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Now link a function into a list of optimized functions.
- __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(r4, r0);
- __ RecordWriteContextSlot(
- r2,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- r4,
- r1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ LoadRoot(r4, Heap::kFalseValueRootIndex);
- __ Push(cp, r3, r4);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
@@ -520,9 +392,8 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
Label after_sentinel;
__ JumpIfNotSmi(r3, &after_sentinel);
if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
__ cmp(r3, Operand::Zero());
- __ Assert(eq, message);
+ __ Assert(eq, kExpected0AsASmiSentinel);
}
__ ldr(r3, GlobalObjectOperand());
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
@@ -646,7 +517,112 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label out_of_range, only_low, negate, done;
+ Register input_reg = source();
+ Register result_reg = destination();
+
+ int double_offset = offset();
+ // Account for saved regs if input is sp.
+ if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
+
+ // Immediate values for this stub fit in instructions, so it's safe to use ip.
+ Register scratch = ip;
+ Register scratch_low =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch_high =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
+ LowDwVfpRegister double_scratch = kScratchDoubleReg;
+
+ __ Push(scratch_high, scratch_low);
+
+ if (!skip_fastpath()) {
+ // Load double input.
+ __ vldr(double_scratch, MemOperand(input_reg, double_offset));
+ __ vmov(scratch_low, scratch_high, double_scratch);
+
+ // Do fast-path convert from double to int.
+ __ vcvt_s32_f64(double_scratch.low(), double_scratch);
+ __ vmov(result_reg, double_scratch.low());
+
+ // If result is not saturated (0x7fffffff or 0x80000000), we are done.
+ __ sub(scratch, result_reg, Operand(1));
+ __ cmp(scratch, Operand(0x7ffffffe));
+ __ b(lt, &done);
+ } else {
+ // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
+ // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
+ if (double_offset == 0) {
+ __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
+ } else {
+ __ ldr(scratch_low, MemOperand(input_reg, double_offset));
+ __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
+ }
+ }
+
+ __ Ubfx(scratch, scratch_high,
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // Load scratch with exponent - 1. This is faster than loading
+ // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
+ STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
+ __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
+ // If exponent is greater than or equal to 84, the 32 less significant
+ // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
+ // the result is 0.
+ // Compare exponent with 84 (compare exponent - 1 with 83).
+ __ cmp(scratch, Operand(83));
+ __ b(ge, &out_of_range);
+
+ // If we reach this code, 31 <= exponent <= 83.
+ // So, we don't have to handle cases where 0 <= exponent <= 20 for
+ // which we would need to shift right the high part of the mantissa.
+ // Scratch contains exponent - 1.
+ // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
+ __ rsb(scratch, scratch, Operand(51), SetCC);
+ __ b(ls, &only_low);
+ // 21 <= exponent <= 51, shift scratch_low and scratch_high
+ // to generate the result.
+ __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
+ // Scratch contains: 52 - exponent.
+ // We needs: exponent - 20.
+ // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
+ __ rsb(scratch, scratch, Operand(32));
+ __ Ubfx(result_reg, scratch_high,
+ 0, HeapNumber::kMantissaBitsInTopWord);
+ // Set the implicit 1 before the mantissa part in scratch_high.
+ __ orr(result_reg, result_reg,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
+ __ b(&negate);
+
+ __ bind(&out_of_range);
+ __ mov(result_reg, Operand::Zero());
+ __ b(&done);
+
+ __ bind(&only_low);
+ // 52 <= exponent <= 83, shift only scratch_low.
+ // On entry, scratch contains: 52 - exponent.
+ __ rsb(scratch, scratch, Operand::Zero());
+ __ mov(result_reg, Operand(scratch_low, LSL, scratch));
+
+ __ bind(&negate);
+ // If input was positive, scratch_high ASR 31 equals 0 and
+ // scratch_high LSR 31 equals zero.
+ // New result = (result eor 0) + 0 = result.
+ // If the input was negative, we have to negate the result.
+ // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
+ // New result = (result eor 0xffffffff) + 1 = 0 - result.
+ __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
+ __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
+
+ __ bind(&done);
+
+ __ Pop(scratch_high, scratch_low);
+ __ Ret();
+}
+
+
+bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
// These variants are compiled ahead of time. See next method.
if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
return true;
@@ -1603,7 +1579,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
Register right = r0;
Register scratch1 = r6;
Register scratch2 = r7;
- Register scratch3 = r4;
ASSERT(smi_operands || (not_numbers != NULL));
if (smi_operands) {
@@ -1701,12 +1676,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ SmiUntag(r2, right);
} else {
// Convert operands to 32-bit integers. Right in r2 and left in r3.
- __ ConvertNumberToInt32(
- left, r3, heap_number_map,
- scratch1, scratch2, scratch3, d0, d1, not_numbers);
- __ ConvertNumberToInt32(
- right, r2, heap_number_map,
- scratch1, scratch2, scratch3, d0, d1, not_numbers);
+ __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers);
+ __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers);
}
Label result_not_a_smi;
@@ -2520,16 +2491,6 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = r1;
const Register exponent = r2;
@@ -2733,8 +2694,8 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
+ return (!save_doubles_ || isolate->fp_stubs_generated()) &&
result_size_ == 1;
}
@@ -3917,9 +3878,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
__ SmiTst(regexp_data);
- __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
+ __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
- __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
+ __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
}
// regexp_data: RegExp data (FixedArray)
@@ -4261,7 +4222,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ tst(r0, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found");
+ __ Assert(eq, kExternalStringExpectedButNotFound);
}
__ ldr(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
@@ -4442,6 +4403,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r0);
__ push(r0);
__ push(r1);
__ push(r2);
@@ -4452,6 +4414,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ pop(r2);
__ pop(r1);
__ pop(r0);
+ __ SmiUntag(r0);
}
__ b(&done);
@@ -4643,7 +4606,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
// Index is not a smi.
__ bind(&index_not_smi_);
@@ -4688,7 +4651,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
@@ -4718,7 +4681,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
@@ -4728,7 +4691,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
@@ -4785,7 +4748,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
// Check that destination is actually word aligned if the flag says
// that it is.
__ tst(dest, Operand(kPointerAlignmentMask));
- __ Check(eq, "Destination of copy not aligned.");
+ __ Check(eq, kDestinationOfCopyNotAligned);
}
const int kReadAlignment = 4;
@@ -5014,7 +4977,7 @@ void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
if (FLAG_debug_code) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(ip, candidate);
- __ Assert(eq, "oddball in string table is not undefined or the hole");
+ __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole);
}
__ jmp(&next_probe[i]);
@@ -5827,7 +5790,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ b(lt, &done);
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
NumberToStringStub::GenerateLookupNumberStringCache(masm,
@@ -5836,26 +5798,9 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch2,
scratch3,
scratch4,
- &not_cached);
+ slow);
__ mov(arg, scratch1);
__ str(arg, MemOperand(sp, stack_offset));
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CompareObjectType(
- arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
- __ b(ne, slow);
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ and_(scratch2,
- scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ cmp(scratch2,
- Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, slow);
- __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
- __ str(arg, MemOperand(sp, stack_offset));
-
__ bind(&done);
}
@@ -6180,6 +6125,11 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // Place the return address on the stack, making the call
+ // GC safe. The RegExp backend also relies on this.
+ __ str(lr, MemOperand(sp, 0));
+ __ blx(ip); // Call the C++ function.
+ __ VFPEnsureFPSCRState(r2);
__ ldr(pc, MemOperand(sp, 0));
}
@@ -6188,21 +6138,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
intptr_t code =
reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ __ Move(ip, target);
__ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
-
- // Prevent literal pool emission during calculation of return address.
- Assembler::BlockConstPoolScope block_const_pool(masm);
-
- // Push return address (accessible to GC through exit frame pc).
- // Note that using pc with str is deprecated.
- Label start;
- __ bind(&start);
- __ add(ip, pc, Operand(Assembler::kInstrSize));
- __ str(ip, MemOperand(sp, 0));
- __ Jump(target); // Call the C++ function.
- ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta,
- masm->SizeOfCodeGeneratedSince(&start));
- __ VFPEnsureFPSCRState(r2);
+ __ blx(lr); // Call the stub.
}
@@ -6468,8 +6406,6 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
{ REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
{ REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
@@ -6501,7 +6437,7 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -6880,6 +6816,9 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
+ // It additionally takes an isolate as a third parameter
+ __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
+
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ mov(ip, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
@@ -6898,90 +6837,128 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(r3, Operand(kind));
- __ b(ne, &next);
- T stub(kind);
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(r3, Operand(kind));
+ __ b(ne, &next);
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort("Unexpected ElementsKind in array constructor");
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
- // r2 - type info cell
- // r3 - kind
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // r2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// r0 - number of arguments
// r1 - constructor?
// sp[0] - last argument
- ASSERT(FAST_SMI_ELEMENTS == 0);
- ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ASSERT(FAST_ELEMENTS == 2);
- ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // is the low bit set? If so, we are holey and that is good.
- __ tst(r3, Operand(1));
Label normal_sequence;
- __ b(ne, &normal_sequence);
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ tst(r3, Operand(1));
+ __ b(ne, &normal_sequence);
+ }
// look at the first argument
__ ldr(r5, MemOperand(sp, 0));
__ cmp(r5, Operand::Zero());
__ b(eq, &normal_sequence);
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
- __ add(r3, r3, Operand(1));
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(eq, &normal_sequence);
- __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
- __ ldr(r5, FieldMemOperand(r5, 0));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &normal_sequence);
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
- // Save the resulting elements kind in type info
- __ SmiTag(r3);
- __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
- __ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
- __ SmiUntag(r3);
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
- __ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(r3, Operand(kind));
- __ b(ne, &next);
- ArraySingleArgumentConstructorStub stub(kind);
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the cell).
+ __ add(r3, r3, Operand(1));
+ __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+
+ if (FLAG_debug_code) {
+ __ ldr(r5, FieldMemOperand(r5, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSiteInCell);
+ __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+ }
+
+ // Save the resulting elements kind in type info
+ __ SmiTag(r3);
+ __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
+ __ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(r3);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(r3, Operand(kind));
+ __ b(ne, &next);
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort("Unexpected ElementsKind in array constructor");
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -7014,6 +6991,34 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
}
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ tst(r0, r0);
+ __ b(ne, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmp(r0, Operand(1));
+ __ b(gt, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc (only if argument_count_ == ANY)
@@ -7030,9 +7035,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ tst(r3, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function");
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in ebx or a valid cell
Label okay_here;
@@ -7041,54 +7046,28 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ b(eq, &okay_here);
__ ldr(r3, FieldMemOperand(r2, 0));
__ cmp(r3, Operand(cell_map));
- __ Assert(eq, "Expected property cell in register ebx");
+ __ Assert(eq, kExpectedPropertyCellInRegisterEbx);
__ bind(&okay_here);
}
- Label no_info, switch_ready;
+ Label no_info;
// Get the elements kind and case on that.
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
__ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
- // The type cell may have undefined in its value.
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &no_info);
-
- // The type cell has either an AllocationSite or a JSFunction
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
__ ldr(r4, FieldMemOperand(r3, 0));
__ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &no_info);
__ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ mov(r3, Operand(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
-
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ tst(r0, r0);
- __ b(ne, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-
- __ bind(&not_zero_case);
- __ cmp(r0, Operand(1));
- __ b(gt, &not_one_case);
- CreateArrayDispatchOneArgument(masm);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
}
@@ -7144,9 +7123,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ tst(r3, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function");
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -7163,7 +7142,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ b(eq, &done);
__ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
__ Assert(eq,
- "Invalid ElementsKind for InternalArray or InternalPackedArray");
+ kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
diff --git a/chromium/v8/src/arm/code-stubs-arm.h b/chromium/v8/src/arm/code-stubs-arm.h
index 6eab8d128ee..d05e9a1d840 100644
--- a/chromium/v8/src/arm/code-stubs-arm.h
+++ b/chromium/v8/src/arm/code-stubs-arm.h
@@ -68,7 +68,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -232,7 +232,7 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
- bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
@@ -305,7 +305,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -376,7 +376,7 @@ class RecordWriteStub: public PlatformCodeStub {
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+ scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
@@ -419,19 +419,6 @@ class RecordWriteStub: public PlatformCodeStub {
Register scratch0_;
Register scratch1_;
- Register GetRegThatIsNotOneOf(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
friend class RecordWriteStub;
};
@@ -478,23 +465,6 @@ class RecordWriteStub: public PlatformCodeStub {
};
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM.
-class RegExpCEntryStub: public PlatformCodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
diff --git a/chromium/v8/src/arm/codegen-arm.cc b/chromium/v8/src/arm/codegen-arm.cc
index 7559373ee9a..1bcf3e3a605 100644
--- a/chromium/v8/src/arm/codegen-arm.cc
+++ b/chromium/v8/src/arm/codegen-arm.cc
@@ -532,7 +532,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ SmiTag(r9);
__ orr(r9, r9, Operand(1));
__ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "object found in smi-only array");
+ __ Assert(eq, kObjectFoundInSmiOnlyArray);
}
__ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
@@ -728,7 +728,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ tst(result, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found");
+ __ Assert(eq, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
diff --git a/chromium/v8/src/arm/codegen-arm.h b/chromium/v8/src/arm/codegen-arm.h
index c020ab601c8..54530d87262 100644
--- a/chromium/v8/src/arm/codegen-arm.h
+++ b/chromium/v8/src/arm/codegen-arm.h
@@ -44,8 +44,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
- CodeGenerator() {
- InitializeAstVisitor();
+ explicit CodeGenerator(Isolate* isolate) {
+ InitializeAstVisitor(isolate);
}
static bool MakeCode(CompilationInfo* info);
@@ -61,7 +61,7 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
diff --git a/chromium/v8/src/arm/constants-arm.h b/chromium/v8/src/arm/constants-arm.h
index 9bfccf822b3..703613932cd 100644
--- a/chromium/v8/src/arm/constants-arm.h
+++ b/chromium/v8/src/arm/constants-arm.h
@@ -220,6 +220,8 @@ enum {
kCoprocessorMask = 15 << 8,
kOpCodeMask = 15 << 21, // In data-processing instructions.
kImm24Mask = (1 << 24) - 1,
+ kImm16Mask = (1 << 16) - 1,
+ kImm8Mask = (1 << 8) - 1,
kOff12Mask = (1 << 12) - 1,
kOff8Mask = (1 << 8) - 1
};
diff --git a/chromium/v8/src/arm/cpu-arm.cc b/chromium/v8/src/arm/cpu-arm.cc
index 8766a24bb2a..cf531e1292b 100644
--- a/chromium/v8/src/arm/cpu-arm.cc
+++ b/chromium/v8/src/arm/cpu-arm.cc
@@ -106,15 +106,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif
}
-
-void CPU::DebugBreak() {
-#if !defined (__arm__)
- UNIMPLEMENTED(); // when building ARM emulator target
-#else
- asm volatile("bkpt 0");
-#endif
-}
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/chromium/v8/src/arm/debug-arm.cc b/chromium/v8/src/arm/debug-arm.cc
index 7faea08034b..efd11069b32 100644
--- a/chromium/v8/src/arm/debug-arm.cc
+++ b/chromium/v8/src/arm/debug-arm.cc
@@ -55,7 +55,8 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
- patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry());
+ patcher.Emit(
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry());
patcher.masm()->bkpt(0);
}
@@ -95,7 +96,8 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
- patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry());
+ patcher.Emit(
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry());
}
@@ -130,7 +132,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ tst(reg, Operand(0xc0000000));
- __ Assert(eq, "Unable to encode value as smi");
+ __ Assert(eq, kUnableToEncodeValueAsSmi);
}
__ SmiTag(reg);
}
@@ -313,12 +315,12 @@ void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on arm");
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm);
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on arm");
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm);
}
const bool Debug::kFrameDropperSupported = false;
diff --git a/chromium/v8/src/arm/deoptimizer-arm.cc b/chromium/v8/src/arm/deoptimizer-arm.cc
index 5b42116ad4b..3c57b643956 100644
--- a/chromium/v8/src/arm/deoptimizer-arm.cc
+++ b/chromium/v8/src/arm/deoptimizer-arm.cc
@@ -101,12 +101,7 @@ static const int32_t kBranchBeforeInterrupt = 0x5a000004;
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Turn the jump into nops.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
@@ -125,12 +120,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the original jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
@@ -150,10 +140,10 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
@@ -164,185 +154,27 @@ bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
- ASSERT(reinterpret_cast<uint32_t>(replacement_code->entry()) ==
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
- return true;
+ return PATCHED_FOR_OSR;
} else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
- ASSERT(reinterpret_cast<uint32_t>(interrupt_code->entry()) ==
+ ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
-
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
- output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -555,11 +387,8 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
- __ push(r6);
- }
-
+ __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
+ __ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
__ push(r6);
__ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
diff --git a/chromium/v8/src/arm/disasm-arm.cc b/chromium/v8/src/arm/disasm-arm.cc
index ecdf638a1da..acffaa3f230 100644
--- a/chromium/v8/src/arm/disasm-arm.cc
+++ b/chromium/v8/src/arm/disasm-arm.cc
@@ -50,9 +50,6 @@
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
#include "v8.h"
diff --git a/chromium/v8/src/arm/full-codegen-arm.cc b/chromium/v8/src/arm/full-codegen-arm.cc
index ea7b73f2fe9..b6fb70b5df0 100644
--- a/chromium/v8/src/arm/full-codegen-arm.cc
+++ b/chromium/v8/src/arm/full-codegen-arm.cc
@@ -296,8 +296,7 @@ void FullCodeGenerator::Generate() {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -366,8 +365,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
}
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
- InterruptStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
@@ -416,8 +414,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(r2);
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(r0);
EmitProfilingCounterReset();
@@ -786,9 +784,9 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// Check that we're not inside a with or catch context.
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
- __ Check(ne, "Declaration in with context.");
+ __ Check(ne, kDeclarationInWithContext);
__ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
- __ Check(ne, "Declaration in catch context.");
+ __ Check(ne, kDeclarationInCatchContext);
}
}
@@ -1330,8 +1328,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
- __ mov(r0, Operand(info));
- __ push(r0);
+ __ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
__ mov(r0, Operand(info));
@@ -2512,7 +2509,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Check for an uninitialized let binding.
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ Check(eq, "Let binding re-initialization.");
+ __ Check(eq, kLetBindingReInitialization);
}
// Perform the assignment.
__ str(r0, location);
@@ -3010,7 +3007,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -3022,7 +3019,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
__ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, if_true);
+ __ b(ne, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
__ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
@@ -3068,6 +3065,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ b(ne, &loop);
__ bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+ __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+
+ __ bind(&skip_lookup);
+
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
@@ -3077,16 +3082,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ cmp(r2, r3);
- __ b(ne, if_false);
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -3320,7 +3318,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
@@ -3473,23 +3471,23 @@ void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
Register value,
uint32_t encoding_mask) {
__ SmiTst(index);
- __ Check(eq, "Non-smi index");
+ __ Check(eq, kNonSmiIndex);
__ SmiTst(value);
- __ Check(eq, "Non-smi value");
+ __ Check(eq, kNonSmiValue);
__ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
__ cmp(index, ip);
- __ Check(lt, "Index is too large");
+ __ Check(lt, kIndexIsTooLarge);
__ cmp(index, Operand(Smi::FromInt(0)));
- __ Check(ge, "Index is negative");
+ __ Check(ge, kIndexIsNegative);
__ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
__ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
__ cmp(ip, Operand(encoding_mask));
- __ Check(eq, "Unexpected string type");
+ __ Check(eq, kUnexpectedStringType);
}
@@ -3849,7 +3847,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
+ __ Abort(kAttemptToUseUndefinedCache);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
context()->Plug(r0);
return;
@@ -4030,7 +4028,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// elements_end: Array end.
if (generate_debug_code_) {
__ cmp(array_length, Operand::Zero());
- __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
@@ -4349,35 +4347,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
default:
UNREACHABLE();
}
}
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- // TODO(svenpanne): Allowing format strings in Comment would be nice here...
- Comment cmt(masm_, comment);
- UnaryOpStub stub(expr->op());
- // UnaryOpStub expects the argument to be in the
- // accumulator register r0.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
diff --git a/chromium/v8/src/arm/ic-arm.cc b/chromium/v8/src/arm/ic-arm.cc
index 511a3c74f26..f15d4b11f84 100644
--- a/chromium/v8/src/arm/ic-arm.cc
+++ b/chromium/v8/src/arm/ic-arm.cc
@@ -354,7 +354,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
extra_state,
Code::NORMAL,
argc);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
// If the stub cache probing failed, the receiver might be a value.
@@ -393,7 +393,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
__ bind(&miss);
@@ -658,7 +658,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
@@ -1490,7 +1490,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::STUB, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
diff --git a/chromium/v8/src/arm/lithium-arm.cc b/chromium/v8/src/arm/lithium-arm.cc
index e9ae04a1ee8..b8b22df4e3b 100644
--- a/chromium/v8/src/arm/lithium-arm.cc
+++ b/chromium/v8/src/arm/lithium-arm.cc
@@ -30,6 +30,7 @@
#include "lithium-allocator-inl.h"
#include "arm/lithium-arm.h"
#include "arm/lithium-codegen-arm.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -260,6 +261,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
@@ -425,6 +434,15 @@ LPlatformChunk* LChunkBuilder::Build() {
chunk_ = new(zone()) LPlatformChunk(info(), graph());
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(false);
+ }
+ }
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -437,7 +455,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(const char* reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -593,8 +611,10 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
+ &argument_index_accumulator,
+ &objects_to_materialize));
return instr;
}
@@ -645,7 +665,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
- Abort("Out of virtual registers while trying to allocate temp register.");
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
vreg = 0;
}
operand->set_virtual_register(vreg);
@@ -716,12 +736,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Left shifts can deoptimize if we shift by > 0 and the result cannot be
// truncated to smi.
if (instr->representation().IsSmi() && constant_value > 0) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToSmi)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
}
} else {
right = UseRegisterAtStart(right_value);
@@ -733,12 +748,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
@@ -883,6 +893,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
+ instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@@ -898,11 +909,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
+ argument_index_accumulator,
+ objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
@@ -917,16 +930,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
- bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
+ int object_index = objects_to_materialize->length();
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
+ LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- needs_arguments_object_materialization = true;
- op = NULL;
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@@ -937,15 +950,33 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
- if (needs_arguments_object_materialization) {
- HArgumentsObject* arguments = hydrogen_env->entry() == NULL
- ? graph()->GetArgumentsObject()
- : hydrogen_env->entry()->arguments_object();
- ASSERT(arguments->IsLinked());
- for (int i = 1; i < arguments->arguments_count(); ++i) {
- HValue* value = arguments->arguments_values()->at(i);
- ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
- LOperand* op = UseAny(value);
+ for (int i = object_index; i < objects_to_materialize->length(); ++i) {
+ HValue* object_to_materialize = objects_to_materialize->at(i);
+ int previously_materialized_object = -1;
+ for (int prev = 0; prev < i; ++prev) {
+ if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
+ previously_materialized_object = prev;
+ break;
+ }
+ }
+ int length = object_to_materialize->OperandCount();
+ bool is_arguments = object_to_materialize->IsArgumentsObject();
+ if (previously_materialized_object >= 0) {
+ result->AddDuplicateObject(previously_materialized_object);
+ continue;
+ } else {
+ result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ }
+ for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+ LOperand* op;
+ HValue* value = object_to_materialize->OperandAt(i);
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
+ } else {
+ ASSERT(!value->IsPushArgument());
+ op = UseAny(value);
+ }
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
@@ -1066,6 +1097,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
@@ -1325,15 +1364,6 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LBitNotI(value));
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
@@ -1491,20 +1521,39 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left;
- LOperand* right = UseOrConstant(instr->BetterRightOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (instr->CheckFlag(HValue::kCanOverflow) ||
- !right->IsConstantOperand())) {
- left = UseRegister(instr->BetterLeftOperand());
- temp = TempRegister();
+ HValue* left = instr->BetterLeftOperand();
+ HValue* right = instr->BetterRightOperand();
+ LOperand* left_op;
+ LOperand* right_op;
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+ if (right->IsConstant()) {
+ HConstant* constant = HConstant::cast(right);
+ int32_t constant_value = constant->Integer32Value();
+ // Constants -1, 0 and 1 can be optimized if the result can overflow.
+ // For other constants, it can be optimized only without overflow.
+ if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+ left_op = UseRegisterAtStart(left);
+ right_op = UseConstant(right);
+ } else {
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
+ }
} else {
- left = UseRegisterAtStart(instr->BetterLeftOperand());
+ if (bailout_on_minus_zero) {
+ left_op = UseRegister(left);
+ } else {
+ left_op = UseRegisterAtStart(left);
+ }
+ right_op = UseRegister(right);
}
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ LMulI* mul = new(zone()) LMulI(left_op, right_op);
+ if (can_overflow || bailout_on_minus_zero) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
@@ -1675,9 +1724,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), r0);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, d7), instr);
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, d7);
}
@@ -1695,9 +1748,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(
- instr->right()->representation()));
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCompareNumericAndBranch(left, right);
@@ -1720,6 +1772,13 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return new(zone()) LCmpHoleAndBranch(object);
+}
+
+
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1832,17 +1891,6 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
}
-LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
- HInductionVariableAnnotation* instr) {
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@@ -1857,13 +1905,6 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), r0);
return MarkAsCall(new(zone()) LThrow(value), instr);
@@ -1910,19 +1951,17 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
ASSERT(to.IsInteger32());
LOperand* value = NULL;
LInstruction* res = NULL;
- if (instr->value()->type().IsSmi()) {
- value = UseRegisterAtStart(instr->value());
+ HValue* val = instr->value();
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ value = UseRegisterAtStart(val);
res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
- value = UseRegister(instr->value());
+ value = UseRegister(val);
LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
- : NULL;
- LOperand* temp3 = FixedTemp(d11);
+ LOperand* temp2 = FixedTemp(d11);
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
- temp2,
- temp3));
+ temp2));
res = AssignEnvironment(res);
}
return res;
@@ -1942,14 +1981,12 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignPointerMap(result);
} else if (to.IsSmi()) {
LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToSmi(value,
- TempRegister(), TempRegister())));
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
- LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
+ LDoubleToI* res = new(zone()) LDoubleToI(value);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
@@ -1969,8 +2006,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ LInstruction* result = val->CheckFlag(HInstruction::kUint32)
+ ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
+ : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
@@ -2016,31 +2054,24 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
}
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp1 = NULL;
- LOperand* temp2 = NULL;
- if (!instr->CanOmitPrototypeChecks()) {
- temp1 = TempRegister();
- temp2 = TempRegister();
- }
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- if (instr->CanOmitPrototypeChecks()) return result;
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckMaps(value);
- if (instr->CanOmitMapChecks()) return result;
- return AssignEnvironment(result);
+ if (!instr->CanOmitMapChecks()) {
+ value = UseRegisterAtStart(instr->value());
+ if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
+ }
+ LCheckMaps* result = new(zone()) LCheckMaps(value);
+ if (!instr->CanOmitMapChecks()) {
+ AssignEnvironment(result);
+ if (instr->has_migration_target()) return AssignPointerMap(result);
+ }
+ return result;
}
@@ -2151,23 +2182,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), r0);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, r0), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), r0);
LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
@@ -2322,7 +2336,7 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
+ bool needs_write_barrier_for_map = instr->has_transition() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
@@ -2440,10 +2454,18 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
@@ -2464,6 +2486,14 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
@@ -2505,20 +2535,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
diff --git a/chromium/v8/src/arm/lithium-arm.h b/chromium/v8/src/arm/lithium-arm.h
index c568ad6f95e..76bb9049ebc 100644
--- a/chromium/v8/src/arm/lithium-arm.h
+++ b/chromium/v8/src/arm/lithium-arm.h
@@ -50,7 +50,6 @@ class LCodeGen;
V(ArithmeticD) \
V(ArithmeticT) \
V(BitI) \
- V(BitNotI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
@@ -63,19 +62,19 @@ class LCodeGen;
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckNonSmi) \
V(CheckMaps) \
V(CheckMapValue) \
- V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(ConstantD) \
@@ -128,7 +127,6 @@ class LCodeGen;
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
@@ -164,6 +162,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
@@ -186,18 +185,23 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -207,13 +211,16 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
- virtual ~LInstruction() { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {
+ set_position(RelocInfo::kNoPosition);
+ }
+
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -251,20 +258,30 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
+ // The 31 bits PositionBits is used to store the int position value. And the
+ // position value may be RelocInfo::kNoPosition (-1). The accessor always
+ // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
+ // and can fit into the 31 bits PositionBits.
+ void set_position(int pos) {
+ bit_field_ = PositionBits::update(bit_field_, pos + 1);
+ }
+ int position() { return PositionBits::decode(bit_field_) - 1; }
+
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
- void MarkAsCall() { is_call_ = true; }
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
// Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return is_call_; }
- bool ClobbersRegisters() const { return is_call_; }
- bool ClobbersDoubleRegisters() const { return is_call_; }
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ bool ClobbersDoubleRegisters() const { return IsCall(); }
// Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsCall() const { return IsCall(); }
virtual bool HasResult() const = 0;
virtual LOperand* result() const = 0;
@@ -288,10 +305,13 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
+ class IsCallBits: public BitField<bool, 0, 1> {};
+ class PositionBits: public BitField<int, 1, 31> {};
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- bool is_call_;
+ int bit_field_;
};
@@ -299,11 +319,13 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0 && result() != NULL; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
@@ -313,15 +335,15 @@ class LTemplateInstruction: public LInstruction {
EmbeddedContainer<LOperand*, T> temps_;
private:
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -332,8 +354,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -369,11 +391,11 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const {
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
return !IsRedundant();
}
@@ -381,14 +403,14 @@ class LInstructionGap: public LGap {
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(int block_id) : block_id_(block_id) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const;
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
int block_id() const { return block_id_; }
@@ -397,7 +419,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -413,7 +435,7 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -422,22 +444,24 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -453,14 +477,14 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -471,19 +495,21 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const { return true; }
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -522,7 +548,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -536,7 +562,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -557,7 +583,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -571,11 +597,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -587,14 +613,14 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModI: public LTemplateInstruction<1, 2, 2> {
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LModI(LOperand* left,
LOperand* right,
@@ -616,7 +642,7 @@ class LModI: public LTemplateInstruction<1, 2, 2> {
};
-class LDivI: public LTemplateInstruction<1, 2, 1> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -633,7 +659,7 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
};
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
@@ -652,17 +678,15 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 1> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
@@ -670,7 +694,7 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
+class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LMultiplyAddD(LOperand* addend, LOperand* multiplier,
LOperand* multiplicand) {
@@ -688,7 +712,7 @@ class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
// Instruction for computing minuend - multiplier * multiplicand.
-class LMultiplySubD: public LTemplateInstruction<1, 3, 0> {
+class LMultiplySubD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LMultiplySubD(LOperand* minuend, LOperand* multiplier,
LOperand* multiplicand) {
@@ -705,13 +729,13 @@ class LMultiplySubD: public LTemplateInstruction<1, 3, 0> {
};
-class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -730,11 +754,11 @@ class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LMathFloor: public LTemplateInstruction<1, 1, 0> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
@@ -747,7 +771,7 @@ class LMathFloor: public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound: public LTemplateInstruction<1, 1, 1> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -762,7 +786,7 @@ class LMathRound: public LTemplateInstruction<1, 1, 1> {
};
-class LMathAbs: public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathAbs(LOperand* value) {
inputs_[0] = value;
@@ -775,7 +799,7 @@ class LMathAbs: public LTemplateInstruction<1, 1, 0> {
};
-class LMathLog: public LTemplateInstruction<1, 1, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -787,7 +811,7 @@ class LMathLog: public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin: public LTemplateInstruction<1, 1, 0> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSin(LOperand* value) {
inputs_[0] = value;
@@ -799,7 +823,7 @@ class LMathSin: public LTemplateInstruction<1, 1, 0> {
};
-class LMathCos: public LTemplateInstruction<1, 1, 0> {
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathCos(LOperand* value) {
inputs_[0] = value;
@@ -811,7 +835,7 @@ class LMathCos: public LTemplateInstruction<1, 1, 0> {
};
-class LMathTan: public LTemplateInstruction<1, 1, 0> {
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathTan(LOperand* value) {
inputs_[0] = value;
@@ -823,7 +847,7 @@ class LMathTan: public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 3> {
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
LOperand* double_temp,
@@ -845,7 +869,7 @@ class LMathExp: public LTemplateInstruction<1, 1, 3> {
};
-class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -857,7 +881,7 @@ class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf: public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathPowHalf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -871,7 +895,7 @@ class LMathPowHalf: public LTemplateInstruction<1, 1, 1> {
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -881,13 +905,25 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
};
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -900,11 +936,11 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsNumberAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -917,7 +953,7 @@ class LIsNumberAndBranch: public LControlInstruction<1, 0> {
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -930,11 +966,11 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -945,11 +981,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -963,11 +999,11 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LStringCompareAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -983,11 +1019,11 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -999,11 +1035,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1016,7 +1052,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1028,11 +1065,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1046,11 +1083,11 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1067,7 +1104,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1081,7 +1118,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1099,7 +1136,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1108,7 +1146,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInstanceSize(LOperand* object) {
inputs_[0] = object;
@@ -1121,7 +1159,7 @@ class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1136,7 +1174,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1153,7 +1191,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1174,7 +1212,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1189,7 +1227,7 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LRSubI: public LTemplateInstruction<1, 2, 0> {
+class LRSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LRSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1204,7 +1242,7 @@ class LRSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1213,7 +1251,7 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS: public LTemplateInstruction<1, 0, 0> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1222,7 +1260,7 @@ class LConstantS: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1231,7 +1269,7 @@ class LConstantD: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantE: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1242,16 +1280,18 @@ class LConstantE: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 0> {
+class LBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1262,11 +1302,11 @@ class LBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpMapAndBranch: public LControlInstruction<1, 1> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1283,7 +1323,7 @@ class LCmpMapAndBranch: public LControlInstruction<1, 1> {
};
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1295,7 +1335,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1308,7 +1348,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1323,7 +1363,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
-class LDateField: public LTemplateInstruction<1, 1, 1> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1342,7 +1382,7 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LSeqStringSetChar(String::Encoding encoding,
LOperand* string,
@@ -1366,7 +1406,7 @@ class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
@@ -1378,19 +1418,7 @@ class LThrow: public LTemplateInstruction<0, 1, 0> {
};
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1405,7 +1433,7 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1420,7 +1448,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1435,20 +1463,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* global_object() const { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1460,16 +1497,18 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1481,16 +1520,18 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
LOperand* right() { return inputs_[1]; }
Token::Value op() const { return op_; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LReturn: public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LReturn(LOperand* value, LOperand* parameter_count) {
inputs_[0] = value;
@@ -1512,7 +1553,7 @@ class LReturn: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1525,20 +1566,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
@@ -1553,7 +1581,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1566,7 +1594,8 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1579,7 +1608,7 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1598,12 +1627,12 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* object, LOperand* key) {
inputs_[0] = object;
@@ -1617,14 +1646,14 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadGlobalGeneric(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1640,7 +1669,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1655,7 +1684,7 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LStoreGlobalGeneric(LOperand* global_object,
LOperand* value) {
@@ -1674,7 +1703,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1687,11 +1716,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1706,11 +1735,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1722,7 +1751,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1735,7 +1764,24 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
@@ -1744,28 +1790,28 @@ class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
LOperand* base_object() { return inputs_[0]; }
int offset() { return hydrogen()->offset(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1777,14 +1823,14 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
@@ -1796,7 +1842,7 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1808,19 +1854,19 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInvokeFunction(LOperand* function) {
inputs_[0] = function;
@@ -1831,13 +1877,13 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
inputs_[0] = key;
@@ -1848,26 +1894,26 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallFunction(LOperand* function) {
inputs_[0] = function;
@@ -1882,30 +1928,30 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1916,13 +1962,13 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNewArray(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1933,13 +1979,13 @@ class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
@@ -1949,7 +1995,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1961,7 +2007,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToSmi(LOperand* value) {
inputs_[0] = value;
@@ -1974,7 +2020,7 @@ class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1986,7 +2032,20 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1998,7 +2057,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagU(LOperand* value) {
inputs_[0] = value;
@@ -2010,7 +2069,7 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -2027,17 +2086,13 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
};
-class LDoubleToSmi: public LTemplateInstruction<1, 1, 2> {
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToSmi(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -2047,17 +2102,13 @@ class LDoubleToSmi: public LTemplateInstruction<1, 1, 2> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -2067,22 +2118,19 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LTaggedToI(LOperand* value,
LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
+ LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp;
temps_[1] = temp2;
- temps_[2] = temp3;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -2091,7 +2139,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
};
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2103,7 +2151,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -2116,7 +2164,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2133,7 +2181,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -2148,16 +2196,16 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> transition() const { return hydrogen()->transition(); }
+ Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
return hydrogen()->field_representation();
}
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamedGeneric(LOperand* object, LOperand* value) {
inputs_[0] = object;
@@ -2170,14 +2218,14 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2196,7 +2244,7 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() {
if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
@@ -2208,7 +2256,7 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
inputs_[0] = obj;
@@ -2223,13 +2271,13 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LTransitionElementsKind: public LTemplateInstruction<0, 1, 1> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp) {
@@ -2244,7 +2292,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 1> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
@@ -2253,7 +2301,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 1> {
};
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2269,7 +2317,7 @@ class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2285,7 +2333,7 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> {
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -2300,7 +2348,7 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringCharFromCode(LOperand* char_code) {
inputs_[0] = char_code;
@@ -2313,20 +2361,20 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2339,7 +2387,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2352,27 +2400,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
- public:
- LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- ZoneList<Handle<JSObject> >* prototypes() const {
- return hydrogen()->prototypes();
- }
- ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2384,7 +2412,7 @@ class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2397,7 +2425,7 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2409,7 +2437,7 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2421,7 +2449,7 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2435,7 +2463,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocate: public LTemplateInstruction<1, 2, 2> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
inputs_[1] = size;
@@ -2452,21 +2480,21 @@ class LAllocate: public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2479,7 +2507,7 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
@@ -2491,7 +2519,7 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2504,11 +2532,11 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2521,16 +2549,18 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2542,7 +2572,7 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
inputs_[0] = object;
@@ -2554,7 +2584,7 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2570,7 +2600,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2584,7 +2614,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2599,7 +2629,7 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
@@ -2609,7 +2639,7 @@ class LPlatformChunk: public LChunk {
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
@@ -2670,7 +2700,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* reason);
+ void Abort(BailoutReason reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2752,7 +2782,8 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);
diff --git a/chromium/v8/src/arm/lithium-codegen-arm.cc b/chromium/v8/src/arm/lithium-codegen-arm.cc
index cf1e7c70f5f..1e06f8b7557 100644
--- a/chromium/v8/src/arm/lithium-codegen-arm.cc
+++ b/chromium/v8/src/arm/lithium-codegen-arm.cc
@@ -31,12 +31,13 @@
#include "arm/lithium-gap-resolver-arm.h"
#include "code-stubs.h"
#include "stub-cache.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -44,11 +45,11 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const { }
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -91,7 +92,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::Abort(const char* reason) {
+void LCodeGen::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -253,6 +254,21 @@ bool LCodeGen::GeneratePrologue() {
}
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ sub(sp, sp, Operand(slots * kPointerSize));
+}
+
+
bool LCodeGen::GenerateBody() {
ASSERT(is_generating());
bool emit_instructions = true;
@@ -274,6 +290,8 @@ bool LCodeGen::GenerateBody() {
instr->Mnemonic());
}
+ RecordAndUpdatePosition(instr->position());
+
instr->CompileToNative(this);
}
EnsureSpaceForLazyDeopt();
@@ -287,6 +305,10 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
+
+ int pos = instructions_->at(code->instruction_index())->position();
+ RecordAndUpdatePosition(pos);
+
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
code->instruction_index(),
@@ -334,7 +356,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
// 32bit data after it.
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
deopt_jump_table_.length() * 7)) {
- Abort("Generated code is too large");
+ Abort(kGeneratedCodeIsTooLarge);
}
if (deopt_jump_table_.length() > 0) {
@@ -417,15 +439,15 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
__ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
} else if (r.IsDouble()) {
- Abort("EmitLoadRegister: Unsupported double immediate.");
+ Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
- ASSERT(r.IsTagged());
+ ASSERT(r.IsSmiOrTagged());
__ LoadObject(scratch, literal);
}
return scratch;
@@ -452,7 +474,7 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -461,9 +483,9 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
return dbl_scratch;
} else if (r.IsDouble()) {
- Abort("unsupported double immediate");
+ Abort(kUnsupportedDoubleImmediate);
} else if (r.IsTagged()) {
- Abort("unsupported tagged immediate");
+ Abort(kUnsupportedTaggedImmediate);
}
} else if (op->IsStackSlot() || op->IsArgument()) {
// TODO(regis): Why is vldr not taking a MemOperand?
@@ -480,7 +502,7 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle();
+ return constant->handle(isolate());
}
@@ -534,14 +556,14 @@ Operand LCodeGen::ToOperand(LOperand* op) {
ASSERT(constant->HasInteger32Value());
return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
- Abort("ToOperand Unsupported double immediate.");
+ Abort(kToOperandUnsupportedDoubleImmediate);
}
ASSERT(r.IsTagged());
- return Operand(constant->handle());
+ return Operand(constant->handle(isolate()));
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
- Abort("ToOperand IsDoubleRegister unimplemented");
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
return Operand::Zero();
}
// Stack slots not implemented, use ToMemOperand instead.
@@ -605,37 +627,57 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
break;
}
+ int object_index = 0;
+ int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
-
- // TODO(mstarzinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- if (value == NULL) {
- int arguments_count = environment->values()->length() - translation_size;
- translation->BeginArgumentsObject(arguments_count);
- for (int i = 0; i < arguments_count; ++i) {
- LOperand* value = environment->values()->at(translation_size + i);
- AddToTranslation(translation,
- value,
- environment->HasTaggedValueAt(translation_size + i),
- environment->HasUint32ValueAt(translation_size + i));
- }
- continue;
- }
-
- AddToTranslation(translation,
+ AddToTranslation(environment,
+ translation,
value,
environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i));
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
}
}
-void LCodeGen::AddToTranslation(Translation* translation,
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32) {
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@@ -664,7 +706,7 @@ void LCodeGen::AddToTranslation(Translation* translation,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -762,7 +804,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
-void LCodeGen::DeoptimizeIf(Condition cc,
+void LCodeGen::DeoptimizeIf(Condition condition,
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -772,7 +814,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
- Abort("bailout was not prepared");
+ Abort(kBailoutWasNotPrepared);
return;
}
@@ -785,12 +827,12 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return;
}
- if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
- __ stop("trap_on_deopt", cc);
+ if (info()->ShouldTrapOnDeopt()) {
+ __ stop("trap_on_deopt", condition);
}
ASSERT(info()->IsStub() || frame_is_built_);
- if (cc == al && frame_is_built_) {
+ if (condition == al && frame_is_built_) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
@@ -804,17 +846,17 @@ void LCodeGen::DeoptimizeIf(Condition cc,
!frame_is_built_);
deopt_jump_table_.Add(table_entry, zone());
}
- __ b(cc, &deopt_jump_table_.last().label);
+ __ b(condition, &deopt_jump_table_.last().label);
}
}
-void LCodeGen::DeoptimizeIf(Condition cc,
+void LCodeGen::DeoptimizeIf(Condition condition,
LEnvironment* environment) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, environment, bailout_type);
+ DeoptimizeIf(condition, environment, bailout_type);
}
@@ -977,6 +1019,14 @@ void LCodeGen::RecordPosition(int position) {
}
+void LCodeGen::RecordAndUpdatePosition(int position) {
+ if (position >= 0 && position != old_position_) {
+ masm()->positions_recorder()->RecordPosition(position);
+ old_position_ = position;
+ }
+}
+
+
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -1064,8 +1114,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Record the address of the first unknown OSR value as the place to enter.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
@@ -1364,6 +1413,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
__ mov(dividend, Operand(dividend, ASR, power));
if (divisor > 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
+ if (divisor < 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, gt);
return; // Don't fall through to "__ rsb" below.
} else {
// Deoptimize if remainder is not 0.
@@ -1538,21 +1588,17 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
void LCodeGen::DoMulI(LMulI* instr) {
- Register scratch = scratch0();
Register result = ToRegister(instr->result());
// Note that result may alias left.
Register left = ToRegister(instr->left());
LOperand* right_op = instr->right();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+ bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (right_op->IsConstantOperand() && !can_overflow) {
- // Use optimized code for specific constants.
- int32_t constant = ToRepresentation(
- LConstantOperand::cast(right_op),
- instr->hydrogen()->right()->representation());
+ if (right_op->IsConstantOperand()) {
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
@@ -1563,7 +1609,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
switch (constant) {
case -1:
- __ rsb(result, left, Operand::Zero());
+ if (overflow) {
+ __ rsb(result, left, Operand::Zero(), SetCC);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ rsb(result, left, Operand::Zero());
+ }
break;
case 0:
if (bailout_on_minus_zero) {
@@ -1584,23 +1635,21 @@ void LCodeGen::DoMulI(LMulI* instr) {
int32_t mask = constant >> 31;
uint32_t constant_abs = (constant + mask) ^ mask;
- if (IsPowerOf2(constant_abs) ||
- IsPowerOf2(constant_abs - 1) ||
- IsPowerOf2(constant_abs + 1)) {
- if (IsPowerOf2(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ mov(result, Operand(left, LSL, shift));
- } else if (IsPowerOf2(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ add(result, left, Operand(left, LSL, shift));
- } else if (IsPowerOf2(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ rsb(result, left, Operand(left, LSL, shift));
- }
-
+ if (IsPowerOf2(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ mov(result, Operand(left, LSL, shift));
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) __ rsb(result, result, Operand::Zero());
+ } else if (IsPowerOf2(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ add(result, left, Operand(left, LSL, shift));
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) __ rsb(result, result, Operand::Zero());
+ } else if (IsPowerOf2(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ rsb(result, left, Operand(left, LSL, shift));
// Correct the sign of the result is the constant is negative.
if (constant < 0) __ rsb(result, result, Operand::Zero());
-
} else {
// Generate standard code.
__ mov(ip, Operand(constant));
@@ -1609,12 +1658,11 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else {
- Register right = EmitLoadRegister(right_op, scratch);
- if (bailout_on_minus_zero) {
- __ orr(ToRegister(instr->temp()), left, right);
- }
+ ASSERT(right_op->IsRegister());
+ Register right = ToRegister(right_op);
- if (can_overflow) {
+ if (overflow) {
+ Register scratch = scratch0();
// scratch:result = left * right.
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1634,12 +1682,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (bailout_on_minus_zero) {
- // Bail out if the result is supposed to be negative zero.
Label done;
+ __ teq(left, Operand(right));
+ __ b(pl, &done);
+ // Bail out if the result is minus zero.
__ cmp(result, Operand::Zero());
- __ b(ne, &done);
- __ cmp(ToRegister(instr->temp()), Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
+ DeoptimizeIf(eq, instr->environment());
__ bind(&done);
}
}
@@ -1669,7 +1717,11 @@ void LCodeGen::DoBitI(LBitI* instr) {
__ orr(result, left, right);
break;
case Token::BIT_XOR:
- __ eor(result, left, right);
+ if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+ __ mvn(result, Operand(left));
+ } else {
+ __ eor(result, left, right);
+ }
break;
default:
UNREACHABLE();
@@ -1832,7 +1884,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
+ Handle<Object> value = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
__ LoadObject(ToRegister(instr->result()), value);
}
@@ -1936,7 +1988,7 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, "Unexpected string type");
+ __ Check(eq, kUnexpectedStringType);
}
__ add(ip,
@@ -1953,13 +2005,6 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ mvn(result, Operand(input));
-}
-
-
void LCodeGen::DoThrow(LThrow* instr) {
Register input_reg = EmitLoadRegister(instr->value(), ip);
__ push(input_reg);
@@ -2121,25 +2166,32 @@ int LCodeGen::GetNextEmittedBlock() const {
}
template<class InstrType>
-void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
int left_block = instr->TrueDestination(chunk_);
int right_block = instr->FalseDestination(chunk_);
int next_block = GetNextEmittedBlock();
- if (right_block == left_block || cc == al) {
+ if (right_block == left_block || condition == al) {
EmitGoto(left_block);
} else if (left_block == next_block) {
- __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+ __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
} else if (right_block == next_block) {
- __ b(cc, chunk_->GetAssemblyLabel(left_block));
+ __ b(condition, chunk_->GetAssemblyLabel(left_block));
} else {
- __ b(cc, chunk_->GetAssemblyLabel(left_block));
+ __ b(condition, chunk_->GetAssemblyLabel(left_block));
__ b(chunk_->GetAssemblyLabel(right_block));
}
}
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ b(condition, chunk_->GetAssemblyLabel(false_block));
+}
+
+
void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
__ stop("LBreak");
}
@@ -2395,6 +2447,26 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ mov(ip, Operand(factory()->the_hole_value()));
+ __ cmp(input_reg, ip);
+ EmitBranch(instr, eq);
+ return;
+ }
+
+ DwVfpRegister input_reg = ToDoubleRegister(instr->object());
+ __ VFPCompareAndSetFlags(input_reg, input_reg);
+ EmitFalseBranch(instr, vc);
+
+ Register scratch = scratch0();
+ __ VmovHigh(scratch, input_reg);
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ EmitBranch(instr, eq);
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -2676,15 +2748,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -3021,91 +3093,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstant()) {
- Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
- __ LoadObject(result, constant);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset));
- __ cmp(result, Operand(Handle<Map>(current->map())));
- DeoptimizeIf(ne, env);
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- }
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- Register object_map = scratch0();
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(al, instr->environment());
- return;
- }
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMap(object_map, map, &check_passed);
- if (last && !need_generic) {
- DeoptimizeIf(ne, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- __ b(ne, &next);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ b(&done);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ mov(r2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
- }
- __ bind(&done);
-}
-
-
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
@@ -3200,7 +3187,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
key = ToRegister(instr->key());
@@ -3284,7 +3271,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
key = ToRegister(instr->key());
@@ -3545,7 +3532,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->value();
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort("DoPushArgument not implemented for double type.");
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
} else {
Register argument_reg = EmitLoadRegister(argument, ip);
__ push(argument_reg);
@@ -3748,14 +3735,14 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3765,7 +3752,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
__ vabs(result, input);
- } else if (r.IsInteger32()) {
+ } else if (r.IsSmiOrInteger32()) {
EmitIntegerMathAbs(instr);
} else {
// Representation is tagged.
@@ -3904,80 +3891,64 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(d7));
- ASSERT(ToRegister(instr->global_object()).is(r0));
-
+ // Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
+ // Load native context
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ ldr(native_context, FieldMemOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds)
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
- // r2: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ ldr(state, FieldMemOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
- __ cmp(r1, Operand::Zero());
- __ b(eq, deferred->entry());
+ Register state0 = ToRegister(instr->scratch());
+ __ ldr(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
- // r1: state[0].
- // r0: state[1].
+ Register state1 = ToRegister(instr->scratch2());
+ __ ldr(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ and_(r3, r1, Operand(0xFFFF));
- __ mov(r4, Operand(18273));
- __ mul(r3, r3, r4);
- __ add(r1, r3, Operand(r1, LSR, 16));
+ Register scratch3 = ToRegister(instr->scratch3());
+ Register scratch4 = scratch0();
+ __ and_(scratch3, state0, Operand(0xFFFF));
+ __ mov(scratch4, Operand(18273));
+ __ mul(scratch3, scratch3, scratch4);
+ __ add(state0, scratch3, Operand(state0, LSR, 16));
// Save state[0].
- __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
+ __ str(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ and_(r3, r0, Operand(0xFFFF));
- __ mov(r4, Operand(36969));
- __ mul(r3, r3, r4);
- __ add(r0, r3, Operand(r0, LSR, 16));
+ __ and_(scratch3, state1, Operand(0xFFFF));
+ __ mov(scratch4, Operand(36969));
+ __ mul(scratch3, scratch3, scratch4);
+ __ add(state1, scratch3, Operand(state1, LSR, 16));
// Save state[1].
- __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
+ __ str(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ and_(r0, r0, Operand(0x3FFFF));
- __ add(r0, r0, Operand(r1, LSL, 14));
+ Register random = scratch4;
+ __ and_(random, state1, Operand(0x3FFFF));
+ __ add(random, random, Operand(state0, LSL, 14));
- __ bind(deferred->exit());
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
+ __ mov(scratch3, Operand(0x41000000));
+ __ orr(scratch3, scratch3, Operand(0x300000));
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ __ vmov(result, random, scratch3);
// Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand::Zero());
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, scratch0());
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in r0.
+ __ mov(scratch4, Operand::Zero());
+ DwVfpRegister scratch5 = double_scratch0();
+ __ vmov(scratch5, scratch4, scratch3);
+ __ vsub(result, result, scratch5);
}
@@ -4172,6 +4143,15 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(code_object,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
@@ -4278,14 +4258,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
+void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) {
if (FLAG_debug_code && check->hydrogen()->skip_check()) {
Label done;
- __ b(NegateCondition(cc), &done);
+ __ b(NegateCondition(condition), &done);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, check->environment());
+ DeoptimizeIf(condition, check->environment());
}
}
@@ -4319,7 +4299,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
key = ToRegister(instr->key());
@@ -4392,7 +4372,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
key = ToRegister(instr->key());
@@ -4415,7 +4395,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
if (masm()->emit_debug_code()) {
__ vmrs(ip);
__ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
- __ Assert(ne, "Default NaN mode not set");
+ __ Assert(ne, kDefaultNaNModeNotSet);
}
__ VFPCanonicalizeNaN(value);
}
@@ -4516,12 +4496,13 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, GetLinkRegisterState(), kDontSaveFPRegs);
} else {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(
+ this, Safepoint::kWithRegistersAndDoubles);
__ Move(r0, object_reg);
__ Move(r1, to_map);
TransitionElementsKindStub stub(from_kind, to_kind);
__ CallStub(&stub);
- RecordSafepointWithRegisters(
+ RecordSafepointWithRegistersAndDoubles(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
}
__ bind(&not_applicable);
@@ -4545,12 +4526,14 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4598,12 +4581,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4664,9 +4649,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
LOperand* input = instr->value();
- ASSERT(input->IsRegister());
LOperand* output = instr->result();
- ASSERT(output->IsRegister());
__ SmiTag(ToRegister(output), ToRegister(input), SetCC);
if (!instr->hydrogen()->value()->HasRange() ||
!instr->hydrogen()->value()->range()->IsInSmiRange()) {
@@ -4685,17 +4668,29 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ tst(ToRegister(input), Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment());
+ }
+ __ SmiTag(ToRegister(output), ToRegister(input));
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
SIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4711,16 +4706,16 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
UNSIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4793,12 +4788,14 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4809,29 +4806,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register temp1 = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
- bool convert_hole = false;
- HValue* change_input = instr->hydrogen()->value();
- if (change_input->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(change_input);
- convert_hole = load->UsesMustHandleHole();
- }
-
- Label no_special_nan_handling;
- Label done;
- if (convert_hole) {
- DwVfpRegister input_reg = ToDoubleRegister(instr->value());
- __ VFPCompareAndSetFlags(input_reg, input_reg);
- __ b(vc, &no_special_nan_handling);
- __ VmovHigh(scratch, input_reg);
- __ cmp(scratch, Operand(kHoleNanUpper32));
- // If not the hole NaN, force the NaN to be canonical.
- __ VFPCanonicalizeNaN(input_reg, ne);
- __ b(ne, &no_special_nan_handling);
- __ Move(reg, factory()->the_hole_value());
- __ b(&done);
- }
-
- __ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
@@ -4845,7 +4819,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ vstr(input_reg, reg, HeapNumber::kValueOffset);
// Now that we have finished with the object's real address tag it
__ add(reg, reg, Operand(kHeapObjectTag));
- __ bind(&done);
}
@@ -4885,7 +4858,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
DwVfpRegister result_reg,
- bool allow_undefined_as_nan,
+ bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
@@ -4895,9 +4868,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Label load_smi, heap_number, done;
- STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
- NUMBER_CANDIDATE_IS_ANY_TAGGED);
- if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
@@ -4905,7 +4876,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- if (!allow_undefined_as_nan) {
+ if (!can_convert_undefined_to_nan) {
DeoptimizeIf(ne, env);
} else {
Label heap_number, convert;
@@ -4914,11 +4885,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
- __ b(eq, &convert);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- }
DeoptimizeIf(ne, env);
__ bind(&convert);
@@ -4958,7 +4924,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
LowDwVfpRegister double_scratch = double_scratch0();
- DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3());
+ DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4969,18 +4935,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// The carry flag is set when we reach this deferred code as we just executed
// SmiUntag(heap_object, SetCC)
STATIC_ASSERT(kHeapObjectTag == 1);
- __ adc(input_reg, input_reg, Operand(input_reg));
+ __ adc(scratch2, input_reg, Operand(input_reg));
// Heap number map check.
- __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- ASSERT(!scratch3.is(input_reg) &&
- !scratch3.is(scratch1) &&
- !scratch3.is(scratch2));
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
Label heap_number;
@@ -4988,23 +4950,18 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
+ __ cmp(scratch2, Operand(ip));
DeoptimizeIf(ne, instr->environment());
__ mov(input_reg, Operand::Zero());
__ b(&done);
__ bind(&heap_number);
- __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
-
- __ ECMAToInt32(input_reg, double_scratch2,
- scratch1, scratch2, scratch3, double_scratch);
-
+ __ TruncateHeapNumberToI(input_reg, scratch2);
} else {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment());
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
+ __ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
DeoptimizeIf(ne, instr->environment());
@@ -5022,12 +4979,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -5059,21 +5018,12 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
DwVfpRegister result_reg = ToDoubleRegister(result);
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
- if (value->type().IsSmi()) {
- mode = NUMBER_CANDIDATE_IS_SMI;
- } else if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
- }
- }
- }
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->allow_undefined_as_nan(),
+ instr->hydrogen()->can_convert_undefined_to_nan(),
instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment(),
mode);
@@ -5083,14 +5033,11 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
LowDwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- __ ECMAToInt32(result_reg, double_input,
- scratch1, scratch2, scratch3, double_scratch);
+ __ TruncateDoubleToI(result_reg, double_input);
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
@@ -5111,14 +5058,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
LowDwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- __ ECMAToInt32(result_reg, double_input,
- scratch1, scratch2, scratch3, double_scratch);
+ __ TruncateDoubleToI(result_reg, double_input);
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
@@ -5197,50 +5141,84 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
+ Handle<HeapObject> object = instr->hydrogen()->object();
AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*target)) {
+ if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewPropertyCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ mov(ip, Operand(Handle<Object>(cell)));
__ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(reg, ip);
} else {
- __ cmp(reg, Operand(target));
+ __ cmp(reg, Operand(object));
}
DeoptimizeIf(ne, instr->environment());
}
-void LCodeGen::DoCheckMapCommon(Register map_reg,
- Handle<Map> map,
- LEnvironment* env) {
- Label success;
- __ CompareMap(map_reg, map, &success);
- DeoptimizeIf(ne, env);
- __ bind(&success);
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ push(object);
+ CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
+ __ StoreToSafepointRegisterSlot(r0, scratch0());
+ }
+ __ tst(scratch0(), Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment());
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
if (instr->hydrogen()->CanOmitMapChecks()) return;
Register map_reg = scratch0();
+
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- Label success;
SmallMapList* map_set = instr->hydrogen()->map_set();
__ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->has_migration_target()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+ __ bind(deferred->check_maps());
+ }
+
+ Label success;
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
__ CompareMap(map_reg, map, &success);
__ b(eq, &success);
}
+
Handle<Map> map = map_set->last();
- DoCheckMapCommon(map_reg, map, instr->environment());
+ __ CompareMap(map_reg, map, &success);
+ if (instr->hydrogen()->has_migration_target()) {
+ __ b(ne, deferred->entry());
+ } else {
+ DeoptimizeIf(ne, instr->environment());
+ }
+
__ bind(&success);
}
@@ -5295,32 +5273,15 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
-
- Register prototype_reg = ToRegister(instr->temp());
- Register map_reg = ToRegister(instr->temp2());
-
- ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
- ZoneList<Handle<Map> >* maps = instr->maps();
-
- ASSERT(prototypes->length() == maps->length());
-
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(prototype_reg, prototypes->at(i));
- __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
- DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
- }
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5472,8 +5433,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
- __ mov(r1, Operand(instr->hydrogen()->shared_info()));
- __ push(r1);
+ __ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
@@ -5648,6 +5608,8 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
+
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
DeoptimizeIf(al, instr->environment(), type);
}
@@ -5669,12 +5631,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5689,9 +5653,10 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &done);
- StackCheckStub stub;
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt();
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
@@ -5728,9 +5693,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- // Normally we record the first unknown OSR value as the entrypoint to the OSR
- // code, but if there were none, record the entrypoint here.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
diff --git a/chromium/v8/src/arm/lithium-codegen-arm.h b/chromium/v8/src/arm/lithium-codegen-arm.h
index 21f792153ba..4b6b5ca8e36 100644
--- a/chromium/v8/src/arm/lithium-codegen-arm.h
+++ b/chromium/v8/src/arm/lithium-codegen-arm.h
@@ -43,7 +43,7 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen V8_FINAL BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
@@ -66,7 +66,8 @@ class LCodeGen BASE_EMBEDDED {
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
+ expected_safepoint_kind_(Safepoint::kSimple),
+ old_position_(RelocInfo::kNoPosition) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -115,7 +116,7 @@ class LCodeGen BASE_EMBEDDED {
DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
SwVfpRegister flt_scratch,
DwVfpRegister dbl_scratch);
- int ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
@@ -148,14 +149,12 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
-
- void DoCheckMapCommon(Register map_reg, Handle<Map> map, LEnvironment* env);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -214,7 +213,7 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(const char* reason);
+ void Abort(BailoutReason reason);
void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -227,6 +226,9 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@@ -281,16 +283,19 @@ class LCodeGen BASE_EMBEDDED {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc,
+ void DeoptimizeIf(Condition condition,
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
- void ApplyCheckIf(Condition cc, LBoundsCheck* check);
+ void DeoptimizeIf(Condition condition, LEnvironment* environment);
+ void ApplyCheckIf(Condition condition, LBoundsCheck* check);
- void AddToTranslation(Translation* translation,
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32);
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -316,11 +321,14 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
Safepoint::DeoptMode mode);
void RecordPosition(int position);
+ void RecordAndUpdatePosition(int position);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
template<class InstrType>
- void EmitBranch(InstrType instr, Condition cc);
+ void EmitBranch(InstrType instr, Condition condition);
+ template<class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition condition);
void EmitNumberUntagD(Register input,
DwVfpRegister result,
bool allow_undefined_as_nan,
@@ -356,12 +364,6 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -418,7 +420,9 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ int old_position_;
+
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
@@ -466,7 +470,7 @@ class LCodeGen BASE_EMBEDDED {
};
-class LDeferredCode: public ZoneObject {
+class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
@@ -475,7 +479,7 @@ class LDeferredCode: public ZoneObject {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
diff --git a/chromium/v8/src/arm/lithium-gap-resolver-arm.cc b/chromium/v8/src/arm/lithium-gap-resolver-arm.cc
index 7a3c96892c2..88ac7a2a21d 100644
--- a/chromium/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/chromium/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -254,7 +254,7 @@ void LGapResolver::EmitMove(int index) {
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
- } else if (source->IsDoubleRegister()) {
+ } else if (destination->IsDoubleRegister()) {
DwVfpRegister result = cgen_->ToDoubleRegister(destination);
double v = cgen_->ToDouble(constant_source);
__ Vmov(result, v, ip);
diff --git a/chromium/v8/src/arm/lithium-gap-resolver-arm.h b/chromium/v8/src/arm/lithium-gap-resolver-arm.h
index 9dd09c8d03b..044c2864a42 100644
--- a/chromium/v8/src/arm/lithium-gap-resolver-arm.h
+++ b/chromium/v8/src/arm/lithium-gap-resolver-arm.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/chromium/v8/src/arm/macro-assembler-arm.cc b/chromium/v8/src/arm/macro-assembler-arm.cc
index cd124610f97..7df785776dd 100644
--- a/chromium/v8/src/arm/macro-assembler-arm.cc
+++ b/chromium/v8/src/arm/macro-assembler-arm.cc
@@ -375,16 +375,13 @@ void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- !Heap::RootCanBeWrittenAfterInitialization(index) &&
+ isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
!predictable_code_size()) {
- Handle<Object> root(isolate()->heap()->roots_array_start()[index],
- isolate());
- if (!isolate()->heap()->InNewSpace(*root)) {
- // The CPU supports fast immediate values, and this root will never
- // change. We will load it as a relocatable immediate value.
- mov(destination, Operand(root), LeaveCC, cond);
- return;
- }
+ // The CPU supports fast immediate values, and this root will never
+ // change. We will load it as a relocatable immediate value.
+ Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
+ mov(destination, Operand(root), LeaveCC, cond);
+ return;
}
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
@@ -489,7 +486,7 @@ void MacroAssembler::RecordWrite(Register object,
if (emit_debug_code()) {
ldr(ip, MemOperand(address));
cmp(ip, value);
- Check(eq, "Wrong address or value passed to RecordWrite");
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
Label done;
@@ -832,26 +829,6 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
-void MacroAssembler::ConvertNumberToInt32(Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch1,
- LowDwVfpRegister double_scratch2,
- Label* not_number) {
- Label done;
- UntagAndJumpIfSmi(dst, object, &done);
- JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
- vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset));
- ECMAToInt32(dst, double_scratch1,
- scratch1, scratch2, scratch3, double_scratch2);
-
- bind(&done);
-}
-
-
void MacroAssembler::LoadNumber(Register object,
LowDwVfpRegister dst,
Register heap_number_map,
@@ -1490,7 +1467,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
cmp(scratch, Operand::Zero());
- Check(ne, "we should not have an empty lexical context");
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
#endif
// Load the native context of the current context.
@@ -1508,7 +1485,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
LoadRoot(ip, Heap::kNativeContextMapRootIndex);
cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::native_context should be a native context.");
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
pop(holder_reg); // Restore holder.
}
@@ -1525,12 +1502,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
mov(holder_reg, ip); // Move ip to its holding place.
LoadRoot(ip, Heap::kNullValueRootIndex);
cmp(holder_reg, ip);
- Check(ne, "JSGlobalProxy::context() should not be null.");
+ Check(ne, kJSGlobalProxyContextShouldNotBeNull);
ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
LoadRoot(ip, Heap::kNativeContextMapRootIndex);
cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::native_context should be a native context.");
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
// Restore ip is not needed. ip is reloaded below.
pop(holder_reg); // Restore holder.
// Restore ip to holder's context.
@@ -1705,15 +1682,9 @@ void MacroAssembler::Allocate(int object_size,
ASSERT((limit - top) == kPointerSize);
ASSERT(result.code() < ip.code());
- // Set up allocation top address and object size registers.
+ // Set up allocation top address register.
Register topaddr = scratch1;
- Register obj_size_reg = scratch2;
mov(topaddr, Operand(allocation_top));
- Operand obj_size_operand = Operand(object_size);
- if (!obj_size_operand.is_single_instruction(this)) {
- // We are about to steal IP, so we need to load this value first
- mov(obj_size_reg, obj_size_operand);
- }
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
@@ -1727,7 +1698,7 @@ void MacroAssembler::Allocate(int object_size,
// respect to register content between debug and release mode.
ldr(ip, MemOperand(topaddr));
cmp(result, ip);
- Check(eq, "Unexpected allocation top");
+ Check(eq, kUnexpectedAllocationTop);
}
// Load allocation limit into ip. Result already contains allocation top.
ldr(ip, MemOperand(topaddr, limit - top));
@@ -1735,25 +1706,41 @@ void MacroAssembler::Allocate(int object_size,
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand(ip));
+ b(hs, gc_required);
+ }
mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
// Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- if (obj_size_operand.is_single_instruction(this)) {
- // We can add the size as an immediate
- add(scratch2, result, obj_size_operand, SetCC);
- } else {
- // Doesn't fit in an immediate, we have to use the register
- add(scratch2, result, obj_size_reg, SetCC);
+ // to calculate the new top. We must preserve the ip register at this
+ // point, so we cannot just use add().
+ ASSERT(object_size > 0);
+ Register source = result;
+ Condition cond = al;
+ int shift = 0;
+ while (object_size != 0) {
+ if (((object_size >> shift) & 0x03) == 0) {
+ shift += 2;
+ } else {
+ int bits = object_size & (0xff << shift);
+ object_size -= bits;
+ shift += 8;
+ Operand bits_operand(bits);
+ ASSERT(bits_operand.is_single_instruction(this));
+ add(scratch2, source, bits_operand, SetCC, cond);
+ source = scratch2;
+ cond = cc;
+ }
}
b(cs, gc_required);
cmp(scratch2, Operand(ip));
@@ -1825,7 +1812,7 @@ void MacroAssembler::Allocate(Register object_size,
// respect to register content between debug and release mode.
ldr(ip, MemOperand(topaddr));
cmp(result, ip);
- Check(eq, "Unexpected allocation top");
+ Check(eq, kUnexpectedAllocationTop);
}
// Load allocation limit into ip. Result already contains allocation top.
ldr(ip, MemOperand(topaddr, limit - top));
@@ -1833,12 +1820,16 @@ void MacroAssembler::Allocate(Register object_size,
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand(ip));
+ b(hs, gc_required);
+ }
mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
@@ -1859,7 +1850,7 @@ void MacroAssembler::Allocate(Register object_size,
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
tst(scratch2, Operand(kObjectAlignmentMask));
- Check(eq, "Unaligned allocation in new space");
+ Check(eq, kUnalignedAllocationInNewSpace);
}
str(scratch2, MemOperand(topaddr));
@@ -1882,7 +1873,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object,
mov(scratch, Operand(new_space_allocation_top));
ldr(scratch, MemOperand(scratch));
cmp(object, scratch);
- Check(lt, "Undo allocation of non allocated memory");
+ Check(lt, kUndoAllocationOfNonAllocatedMemory);
#endif
// Write the address of the object to un-allocate as the current top.
mov(scratch, Operand(new_space_allocation_top));
@@ -2131,7 +2122,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
if (emit_debug_code()) {
vmrs(ip);
tst(ip, Operand(kVFPDefaultNaNModeControlBit));
- Assert(ne, "Default NaN mode not set");
+ Assert(ne, kDefaultNaNModeNotSet);
}
VFPCanonicalizeNaN(double_scratch);
b(&store);
@@ -2294,7 +2285,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
@@ -2363,15 +2353,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
Label leave_exit_frame;
Label return_value_loaded;
- if (returns_handle) {
- Label load_return_value;
- cmp(r0, Operand::Zero());
- b(eq, &load_return_value);
- // derefernce returned value
- ldr(r0, MemOperand(r0));
- b(&return_value_loaded);
- bind(&load_return_value);
- }
// load value from ReturnValue
ldr(r0, MemOperand(fp, return_value_offset*kPointerSize));
bind(&return_value_loaded);
@@ -2381,7 +2362,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
if (emit_debug_code()) {
ldr(r1, MemOperand(r7, kLevelOffset));
cmp(r1, r6);
- Check(eq, "Unexpected level after return from api call");
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
}
sub(r6, r6, Operand(1));
str(r6, MemOperand(r7, kLevelOffset));
@@ -2527,84 +2508,76 @@ void MacroAssembler::TryInt32Floor(Register result,
bind(&exception);
}
-
-void MacroAssembler::ECMAToInt32(Register result,
- DwVfpRegister double_input,
- Register scratch,
- Register scratch_high,
- Register scratch_low,
- LowDwVfpRegister double_scratch) {
- ASSERT(!scratch_high.is(result));
- ASSERT(!scratch_low.is(result));
- ASSERT(!scratch_low.is(scratch_high));
- ASSERT(!scratch.is(result) &&
- !scratch.is(scratch_high) &&
- !scratch.is(scratch_low));
- ASSERT(!double_input.is(double_scratch));
-
- Label out_of_range, only_low, negate, done;
-
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DwVfpRegister double_input,
+ Label* done) {
+ LowDwVfpRegister double_scratch = kScratchDoubleReg;
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
// If result is not saturated (0x7fffffff or 0x80000000), we are done.
- sub(scratch, result, Operand(1));
- cmp(scratch, Operand(0x7ffffffe));
- b(lt, &done);
+ sub(ip, result, Operand(1));
+ cmp(ip, Operand(0x7ffffffe));
+ b(lt, done);
+}
- vmov(scratch_low, scratch_high, double_input);
- Ubfx(scratch, scratch_high,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // Load scratch with exponent - 1. This is faster than loading
- // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
- sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
- // If exponent is greater than or equal to 84, the 32 less significant
- // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
- // the result is 0.
- // Compare exponent with 84 (compare exponent - 1 with 83).
- cmp(scratch, Operand(83));
- b(ge, &out_of_range);
-
- // If we reach this code, 31 <= exponent <= 83.
- // So, we don't have to handle cases where 0 <= exponent <= 20 for
- // which we would need to shift right the high part of the mantissa.
- // Scratch contains exponent - 1.
- // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
- rsb(scratch, scratch, Operand(51), SetCC);
- b(ls, &only_low);
- // 21 <= exponent <= 51, shift scratch_low and scratch_high
- // to generate the result.
- mov(scratch_low, Operand(scratch_low, LSR, scratch));
- // Scratch contains: 52 - exponent.
- // We needs: exponent - 20.
- // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
- rsb(scratch, scratch, Operand(32));
- Ubfx(result, scratch_high,
- 0, HeapNumber::kMantissaBitsInTopWord);
- // Set the implicit 1 before the mantissa part in scratch_high.
- orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- orr(result, scratch_low, Operand(result, LSL, scratch));
- b(&negate);
-
- bind(&out_of_range);
- mov(result, Operand::Zero());
- b(&done);
- bind(&only_low);
- // 52 <= exponent <= 83, shift only scratch_low.
- // On entry, scratch contains: 52 - exponent.
- rsb(scratch, scratch, Operand::Zero());
- mov(result, Operand(scratch_low, LSL, scratch));
-
- bind(&negate);
- // If input was positive, scratch_high ASR 31 equals 0 and
- // scratch_high LSR 31 equals zero.
- // New result = (result eor 0) + 0 = result.
- // If the input was negative, we have to negate the result.
- // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
- // New result = (result eor 0xffffffff) + 1 = 0 - result.
- eor(result, result, Operand(scratch_high, ASR, 31));
- add(result, result, Operand(scratch_high, LSR, 31));
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DwVfpRegister double_input) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(lr);
+ sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
+ vstr(double_input, MemOperand(sp, 0));
+
+ DoubleToIStub stub(sp, result, 0, true, true);
+ CallStub(&stub);
+
+ add(sp, sp, Operand(kDoubleSize));
+ pop(lr);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result,
+ Register object) {
+ Label done;
+ LowDwVfpRegister double_scratch = kScratchDoubleReg;
+ ASSERT(!result.is(object));
+
+ vldr(double_scratch,
+ MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
+ TryInlineTruncateDoubleToI(result, double_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(lr);
+ DoubleToIStub stub(object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true,
+ true);
+ CallStub(&stub);
+ pop(lr);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Label* not_number) {
+ Label done;
+ ASSERT(!result.is(object));
+
+ UntagAndJumpIfSmi(result, object, &done);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+ TruncateHeapNumberToI(result, object);
bind(&done);
}
@@ -2782,9 +2755,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
-void MacroAssembler::Assert(Condition cond, const char* msg) {
+void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
if (emit_debug_code())
- Check(cond, msg);
+ Check(cond, reason);
}
@@ -2803,23 +2776,23 @@ void MacroAssembler::AssertFastElements(Register elements) {
LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
- Abort("JSObject with fast elements map has slow elements");
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
pop(elements);
}
}
-void MacroAssembler::Check(Condition cond, const char* msg) {
+void MacroAssembler::Check(Condition cond, BailoutReason reason) {
Label L;
b(cond, &L);
- Abort(msg);
+ Abort(reason);
// will not return here
bind(&L);
}
-void MacroAssembler::Abort(const char* msg) {
+void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
// We want to pass the msg string like a smi to avoid GC
@@ -2827,6 +2800,7 @@ void MacroAssembler::Abort(const char* msg) {
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
+ const char* msg = GetBailoutReason(reason);
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
@@ -2835,6 +2809,11 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ stop(msg);
+ return;
+ }
#endif
mov(r0, Operand(p0));
@@ -2969,7 +2948,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
b(&ok);
bind(&fail);
- Abort("Global functions must have initial map");
+ Abort(kGlobalFunctionsMustHaveInitialMap);
bind(&ok);
}
}
@@ -3038,7 +3017,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi");
+ Check(ne, kOperandIsASmi);
}
}
@@ -3047,7 +3026,7 @@ void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(eq, "Operand is not smi");
+ Check(eq, kOperandIsNotSmi);
}
}
@@ -3056,12 +3035,12 @@ void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi and not a string");
+ Check(ne, kOperandIsASmiAndNotAString);
push(object);
ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
pop(object);
- Check(lo, "Operand is not a string");
+ Check(lo, kOperandIsNotAString);
}
}
@@ -3070,12 +3049,12 @@ void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi and not a name");
+ Check(ne, kOperandIsASmiAndNotAName);
push(object);
ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(object, object, LAST_NAME_TYPE);
pop(object);
- Check(le, "Operand is not a name");
+ Check(le, kOperandIsNotAName);
}
}
@@ -3084,7 +3063,7 @@ void MacroAssembler::AssertName(Register object) {
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
CompareRoot(reg, index);
- Check(eq, "HeapNumberMap register clobbered.");
+ Check(eq, kHeapNumberMapRegisterClobbered);
}
}
@@ -3230,7 +3209,7 @@ void MacroAssembler::CopyBytes(Register src,
bind(&word_loop);
if (emit_debug_code()) {
tst(src, Operand(kPointerSize - 1));
- Assert(eq, "Expecting alignment for CopyBytes");
+ Assert(eq, kExpectingAlignmentForCopyBytes);
}
cmp(length, Operand(kPointerSize));
b(lt, &byte_loop);
@@ -3494,7 +3473,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
// Check that the instruction is a ldr reg, [pc + offset] .
and_(result, result, Operand(kLdrPCPattern));
cmp(result, Operand(kLdrPCPattern));
- Check(eq, "The instruction to patch should be a load from pc.");
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
// Result was clobbered. Restore it.
ldr(result, MemOperand(ldr_location));
}
@@ -3818,6 +3797,30 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
}
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
@@ -3842,10 +3845,13 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(byte* address, int instructions)
+CodePatcher::CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap) {
+ masm_(NULL, address, size_ + Assembler::kGap),
+ flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@@ -3855,7 +3861,9 @@ CodePatcher::CodePatcher(byte* address, int instructions)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ if (flush_cache_ == FLUSH) {
+ CPU::FlushICache(address_, size_);
+ }
// Check that the code was patched as expected.
ASSERT(masm_.pc_ == address_ + size_);
diff --git a/chromium/v8/src/arm/macro-assembler-arm.h b/chromium/v8/src/arm/macro-assembler-arm.h
index 38308e5cdef..9abd5a0c3da 100644
--- a/chromium/v8/src/arm/macro-assembler-arm.h
+++ b/chromium/v8/src/arm/macro-assembler-arm.h
@@ -62,6 +62,14 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
+
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
@@ -144,6 +152,8 @@ class MacroAssembler: public Assembler {
Condition cond = al);
void Call(Label* target);
+ void Push(Register src) { push(src); }
+ void Pop(Register dst) { pop(dst); }
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
@@ -489,19 +499,6 @@ class MacroAssembler: public Assembler {
void VmovLow(Register dst, DwVfpRegister src);
void VmovLow(DwVfpRegister dst, Register src);
- // Converts the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- void ConvertNumberToInt32(Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch1,
- LowDwVfpRegister double_scratch2,
- Label* not_int32);
-
// Loads the number from object into dst register.
// If |object| is neither smi nor heap number, |not_number| is jumped to
// with |object| still intact.
@@ -987,15 +984,34 @@ class MacroAssembler: public Assembler {
Label* exact);
// Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result,
+ DwVfpRegister input,
+ Label* done);
+
+ // Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Double_scratch must be between d0 and d15.
- // Exits with 'result' holding the answer and all other registers clobbered.
- void ECMAToInt32(Register result,
- DwVfpRegister double_input,
- Register scratch,
- Register scratch_high,
- Register scratch_low,
- LowDwVfpRegister double_scratch);
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DwVfpRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Label* not_int32);
// Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
@@ -1095,7 +1111,6 @@ class MacroAssembler: public Assembler {
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset_from_fp);
// Jump to a runtime routine.
@@ -1136,14 +1151,14 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cond, const char* msg);
+ void Assert(Condition cond, BailoutReason reason);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cond, const char* msg);
+ void Check(Condition cond, BailoutReason reason);
// Print a message to stdout and abort execution.
- void Abort(const char* msg);
+ void Abort(BailoutReason msg);
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
@@ -1414,7 +1429,14 @@ class MacroAssembler: public Assembler {
// an assertion to fail.
class CodePatcher {
public:
- CodePatcher(byte* address, int instructions);
+ enum FlushICache {
+ FLUSH,
+ DONT_FLUSH
+ };
+
+ CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache = FLUSH);
virtual ~CodePatcher();
// Macro assembler to emit code.
@@ -1434,6 +1456,7 @@ class CodePatcher {
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
+ FlushICache flush_cache_; // Whether to flush the I cache after patching.
};
diff --git a/chromium/v8/src/arm/regexp-macro-assembler-arm.cc b/chromium/v8/src/arm/regexp-macro-assembler-arm.cc
index 189ea8d7779..cbc34e10b95 100644
--- a/chromium/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/chromium/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -134,7 +134,6 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
exit_label_() {
ASSERT_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
- EmitBacktrackConstantPool();
__ bind(&start_label_); // And then continue from here.
}
@@ -872,7 +871,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
masm_->GetCode(&code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
- PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+ PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}
@@ -938,37 +937,8 @@ void RegExpMacroAssemblerARM::PopRegister(int register_index) {
}
-static bool is_valid_memory_offset(int value) {
- if (value < 0) value = -value;
- return value < (1<<12);
-}
-
-
void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
- if (label->is_bound()) {
- int target = label->pos();
- __ mov(r0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
- } else {
- int constant_offset = GetBacktrackConstantPoolEntry();
- masm_->label_at_put(label, constant_offset);
- // Reading pc-relative is based on the address 8 bytes ahead of
- // the current opcode.
- unsigned int offset_of_pc_register_read =
- masm_->pc_offset() + Assembler::kPcLoadDelta;
- int pc_offset_of_constant =
- constant_offset - offset_of_pc_register_read;
- ASSERT(pc_offset_of_constant < 0);
- if (is_valid_memory_offset(pc_offset_of_constant)) {
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ ldr(r0, MemOperand(pc, pc_offset_of_constant));
- } else {
- // Not a 12-bit offset, so it needs to be loaded from the constant
- // pool.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
- __ ldr(r0, MemOperand(pc, r0));
- }
- }
+ __ mov_label_offset(r0, label);
Push(r0);
CheckStackLimit();
}
@@ -1055,16 +1025,34 @@ void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
+ __ PrepareCallCFunction(3, scratch);
+
// RegExp code frame pointer.
__ mov(r2, frame_pointer());
// Code* of self.
__ mov(r1, Operand(masm_->CodeObject()));
- // r0 becomes return address pointer.
+
+ // We need to make room for the return address on the stack.
+ int stack_alignment = OS::ActivationFrameAlignment();
+ ASSERT(IsAligned(stack_alignment, kPointerSize));
+ __ sub(sp, sp, Operand(stack_alignment));
+
+ // r0 will point to the return address, placed by DirectCEntry.
+ __ mov(r0, sp);
+
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
+ __ mov(ip, Operand(stack_guard_check));
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm_, ip);
+
+ // Drop the return address from the stack.
+ __ add(sp, sp, Operand(stack_alignment));
+
+ ASSERT(stack_alignment != 0);
+ __ ldr(sp, MemOperand(sp, 0));
+
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -1079,7 +1067,6 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
@@ -1262,53 +1249,6 @@ void RegExpMacroAssemblerARM::CheckStackLimit() {
}
-void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
- __ CheckConstPool(false, false);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- backtrack_constant_pool_offset_ = masm_->pc_offset();
- for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
- __ emit(0);
- }
-
- backtrack_constant_pool_capacity_ = kBacktrackConstantPoolSize;
-}
-
-
-int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() {
- while (backtrack_constant_pool_capacity_ > 0) {
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- if (masm_->pc_offset() - offset < 2 * KB) {
- return offset;
- }
- }
- Label new_pool_skip;
- __ jmp(&new_pool_skip);
- EmitBacktrackConstantPool();
- __ bind(&new_pool_skip);
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- return offset;
-}
-
-
-void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- // Must pass all arguments in registers. The stub pushes on the stack.
- ASSERT(num_arguments <= 4);
- __ mov(code_pointer(), Operand(function));
- RegExpCEntryStub stub;
- __ CallStub(&stub);
- if (OS::ActivationFrameAlignment() != 0) {
- __ ldr(sp, MemOperand(sp, 0));
- }
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
-}
-
-
bool RegExpMacroAssemblerARM::CanReadUnaligned() {
return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
}
@@ -1351,17 +1291,6 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
}
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- int stack_alignment = OS::ActivationFrameAlignment();
- if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
- // Stack is already aligned for call, so decrement by alignment
- // to make room for storing the link register.
- __ str(lr, MemOperand(sp, stack_alignment, NegPreIndex));
- __ mov(r0, sp);
- __ Call(r5);
- __ ldr(pc, MemOperand(sp, stack_alignment, PostIndex));
-}
-
#undef __
#endif // V8_INTERPRETED_REGEXP
diff --git a/chromium/v8/src/arm/regexp-macro-assembler-arm.h b/chromium/v8/src/arm/regexp-macro-assembler-arm.h
index 1825752ebc2..9f07489e1fc 100644
--- a/chromium/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/chromium/v8/src/arm/regexp-macro-assembler-arm.h
@@ -160,9 +160,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
- void EmitBacktrackConstantPool();
- int GetBacktrackConstantPoolEntry();
-
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
@@ -212,14 +209,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// and increments it by a word size.
inline void Pop(Register target);
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
diff --git a/chromium/v8/src/arm/simulator-arm.cc b/chromium/v8/src/arm/simulator-arm.cc
index c9e3616d9da..def18186305 100644
--- a/chromium/v8/src/arm/simulator-arm.cc
+++ b/chromium/v8/src/arm/simulator-arm.cc
@@ -1686,20 +1686,12 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-typedef void (*SimulatorRuntimeDirectApiCallNew)(int32_t arg0);
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingApiCall)(
- int32_t arg0, int32_t arg1);
-typedef void (*SimulatorRuntimeProfilingApiCallNew)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, int32_t arg1);
// This signature supports direct call to accessor getter callback.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
- int32_t arg1);
-typedef void (*SimulatorRuntimeDirectGetterCallNew)(int32_t arg0,
- int32_t arg1);
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingGetterCall)(
- int32_t arg0, int32_t arg1, int32_t arg2);
-typedef void (*SimulatorRuntimeProfilingGetterCallNew)(
+typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(
int32_t arg0, int32_t arg1, int32_t arg2);
// Software interrupt instructions are used by the simulator to call into the
@@ -1839,9 +1831,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
}
}
- } else if (
- redirection->type() == ExternalReference::DIRECT_API_CALL ||
- redirection->type() == ExternalReference::DIRECT_API_CALL_NEW) {
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x",
reinterpret_cast<void*>(external), arg0);
@@ -1851,22 +1841,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
- v8::Handle<v8::Value> result = target(arg0);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else {
- SimulatorRuntimeDirectApiCallNew target =
- reinterpret_cast<SimulatorRuntimeDirectApiCallNew>(external);
- target(arg0);
- }
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg0);
} else if (
- redirection->type() == ExternalReference::PROFILING_API_CALL ||
- redirection->type() == ExternalReference::PROFILING_API_CALL_NEW) {
+ redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
reinterpret_cast<void*>(external), arg0, arg1);
@@ -1876,22 +1855,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
- SimulatorRuntimeProfilingApiCall target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
- v8::Handle<v8::Value> result = target(arg0, arg1);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else {
- SimulatorRuntimeProfilingApiCallNew target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCallNew>(external);
- target(arg0, arg1);
- }
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg0, arg1);
} else if (
- redirection->type() == ExternalReference::DIRECT_GETTER_CALL ||
- redirection->type() == ExternalReference::DIRECT_GETTER_CALL_NEW) {
+ redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
reinterpret_cast<void*>(external), arg0, arg1);
@@ -1901,22 +1869,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
- v8::Handle<v8::Value> result = target(arg0, arg1);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else {
- SimulatorRuntimeDirectGetterCallNew target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCallNew>(external);
- target(arg0, arg1);
- }
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ target(arg0, arg1);
} else if (
- redirection->type() == ExternalReference::PROFILING_GETTER_CALL ||
- redirection->type() == ExternalReference::PROFILING_GETTER_CALL_NEW) {
+ redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x %08x",
reinterpret_cast<void*>(external), arg0, arg1, arg2);
@@ -1926,20 +1883,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
- SimulatorRuntimeProfilingGetterCall target =
- reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
- v8::Handle<v8::Value> result = target(arg0, arg1, arg2);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else {
- SimulatorRuntimeProfilingGetterCallNew target =
- reinterpret_cast<SimulatorRuntimeProfilingGetterCallNew>(
- external);
- target(arg0, arg1, arg2);
- }
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
+ external);
+ target(arg0, arg1, arg2);
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
diff --git a/chromium/v8/src/arm/stub-cache-arm.cc b/chromium/v8/src/arm/stub-cache-arm.cc
index f7fa9efaca7..ba3d362804e 100644
--- a/chromium/v8/src/arm/stub-cache-arm.cc
+++ b/chromium/v8/src/arm/stub-cache-arm.cc
@@ -121,18 +121,14 @@ static void ProbeTable(Isolate* isolate,
}
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be unique and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<Name> name,
- Register scratch0,
- Register scratch1) {
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
ASSERT(name->IsUniqueName());
+ ASSERT(!receiver.is(scratch0));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -422,12 +418,12 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<Cell> cell = GlobalObject::EnsurePropertyCell(global, name);
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
__ mov(scratch, Operand(cell));
__ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
@@ -445,7 +441,7 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
Label* miss) {
if (holder->IsJSGlobalObject()) {
GenerateCheckPropertyCell(
- masm, Handle<GlobalObject>::cast(holder), name, scratch1(), miss);
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
} else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
GenerateDictionaryNegativeLookup(
masm, miss, holder_reg, name, scratch1(), scratch2());
@@ -785,6 +781,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
@@ -793,10 +794,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(scratch);
__ push(receiver);
__ push(holder);
- __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(scratch);
- __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ push(scratch);
}
@@ -811,7 +808,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
- __ mov(r0, Operand(6));
+ __ mov(r0, Operand(StubCache::kInterceptorArgsLength));
__ mov(r1, Operand(ref));
CEntryStub stub(1);
@@ -903,23 +900,13 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
Address function_address = v8::ToCData<Address>(api_call_info->callback());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
ApiFunction fun(function_address);
- ExternalReference::Type type =
- returns_handle ?
- ExternalReference::DIRECT_API_CALL :
- ExternalReference::DIRECT_API_CALL_NEW;
+ ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
ExternalReference ref = ExternalReference(&fun,
type,
masm->isolate());
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeInvocationCallback)
- : FUNCTION_ADDR(&InvokeFunctionCallback);
- ExternalReference::Type thunk_type =
- returns_handle ?
- ExternalReference::PROFILING_API_CALL :
- ExternalReference::PROFILING_API_CALL_NEW;
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
ApiFunction thunk_fun(thunk_address);
ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
masm->isolate());
@@ -930,11 +917,39 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
thunk_ref,
r1,
kStackUnwindSpace,
- returns_handle,
kFastApiCallArguments + 1);
}
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ const int stack_space = kFastApiCallArguments + argc + 1;
+ // Assign stack space for the call arguments.
+ __ sub(sp, sp, Operand(stack_space * kPointerSize));
+ // Write holder to stack frame.
+ __ str(receiver, MemOperand(sp, 0));
+ // Write receiver to stack frame.
+ int index = stack_space - 1;
+ __ str(receiver, MemOperand(sp, index * kPointerSize));
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ str(receiver, MemOperand(sp, index-- * kPointerSize));
+ }
+
+ GenerateFastApiDirectCall(masm, optimization, argc);
+}
+
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -1092,7 +1107,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
// Leave the internal frame.
@@ -1128,19 +1143,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
+void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
Handle<JSObject> current = object;
while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
+ if (current->IsJSGlobalObject()) {
GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
+ Handle<JSGlobalObject>::cast(current),
name,
scratch,
miss);
@@ -1150,21 +1163,6 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If VFP3 is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register scratch1) {
- __ vmov(s0, ival);
- __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
- __ vcvt_f32_s32(s0, s0);
- __ vstr(s0, scratch1, 0);
-}
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1318,7 +1316,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> holder,
Handle<Name> name,
Label* success,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<Object> callback) {
Label miss;
Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1359,26 +1357,6 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
}
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Label* success,
- Handle<GlobalObject> global) {
- Label miss;
-
- HandlerFrontendHeader(object, receiver(), last, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (!global.is_null()) {
- GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
- }
-
- HandlerFrontendFooter(name, success, &miss);
-}
-
-
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
PropertyIndex field,
@@ -1406,10 +1384,26 @@ void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void BaseLoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ ASSERT(!scratch2().is(reg));
+ ASSERT(!scratch3().is(reg));
+ ASSERT(!scratch4().is(reg));
__ push(receiver());
__ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
@@ -1419,13 +1413,13 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
} else {
__ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
}
- __ Push(reg, scratch3());
+ __ push(scratch3());
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
__ mov(scratch4(), scratch3());
__ Push(scratch3(), scratch4());
__ mov(scratch4(),
Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch4(), name());
+ __ Push(scratch4(), reg, name());
__ mov(r0, sp); // r0 = Handle<Name>
const int kApiStackSpace = 1;
@@ -1439,23 +1433,14 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
const int kStackUnwindSpace = kFastApiCallArguments + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(isolate(), getter_address);
ApiFunction fun(getter_address);
- ExternalReference::Type type =
- returns_handle ?
- ExternalReference::DIRECT_GETTER_CALL :
- ExternalReference::DIRECT_GETTER_CALL_NEW;
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeAccessorGetter)
- : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
ExternalReference::Type thunk_type =
- returns_handle ?
- ExternalReference::PROFILING_GETTER_CALL :
- ExternalReference::PROFILING_GETTER_CALL_NEW;
+ ExternalReference::PROFILING_GETTER_CALL;
ApiFunction thunk_fun(thunk_address);
ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
isolate());
@@ -1464,8 +1449,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
thunk_ref,
r2,
kStackUnwindSpace,
- returns_handle,
- 5);
+ 6);
}
@@ -1553,7 +1537,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
@@ -2811,6 +2795,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -2890,48 +2892,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetICCode(kind(), Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<PropertyCell> cell,
- Handle<Name> name) {
- Label miss;
-
- // Check that the map of the global has not changed.
- __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
- __ cmp(scratch1(), Operand(Handle<Map>(object->map())));
- __ b(ne, &miss);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ mov(scratch1(), Operand(cell));
- __ LoadRoot(scratch2(), Heap::kTheHoleValueRootIndex);
- __ ldr(scratch3(), FieldMemOperand(scratch1(), Cell::kValueOffset));
- __ cmp(scratch3(), scratch2());
- __ b(eq, &miss);
-
- // Store the value in the cell.
- __ str(value(), FieldMemOperand(scratch1(), Cell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(
- counters->named_store_global_inline(), 1, scratch1(), scratch2());
- __ Ret();
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(
- counters->named_store_global_inline_miss(), 1, scratch1(), scratch2());
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), Code::INTERCEPTOR, name);
}
@@ -2939,7 +2900,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
- Handle<GlobalObject> global) {
+ Handle<JSGlobalObject> global) {
Label success;
NonexistentHandlerFrontend(object, last, name, &success, global);
@@ -3190,509 +3151,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch0,
- DwVfpRegister double_scratch0,
- LowDwVfpRegister double_scratch1,
- Label* fail) {
- Label key_ok;
- // Check for smi or a smi inside a heap number. We convert the heap
- // number and check if the conversion is exact and fits into the smi
- // range.
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- scratch0,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
- __ sub(ip, key, Operand(kHeapObjectTag));
- __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
- __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
- __ b(ne, fail);
- __ TrySmiTag(key, scratch0, fail);
- __ bind(&key_ok);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, check_heap_number, miss_force_generic;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- // r3 mostly holds the elements array or the destination external array.
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, d1, d2, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check that the index is in range
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(key, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &miss_force_generic);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // r3: external array.
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Double to pixel conversion is only implemented in the runtime for now.
- __ UntagAndJumpIfNotSmi(r5, value, &slow);
- } else {
- __ UntagAndJumpIfNotSmi(r5, value, &check_heap_number);
- }
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r5: value (integer).
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- // Clamp the value to [0..255].
- __ Usat(r5, 8, Operand(r5));
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Perform int-to-float conversion and store to memory.
- __ SmiUntag(r4, key);
- StoreIntAsFloat(masm, r3, r4, r5, r7);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ vmov(s2, r5);
- __ vcvt_f64_s32(d0, s2);
- __ add(r3, r3, Operand(key, LSL, 2));
- // r3: effective address of the double element
- __ vstr(d0, r3, 0);
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
-
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- // r3: external array.
- __ bind(&check_heap_number);
- __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 1));
- __ vcvt_f32_f64(s0, d0);
- __ vstr(s0, r5, 0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 2));
- __ vstr(d0, r5, 0);
- } else {
- // Hoisted load. vldr requires offset to be a multiple of 4 so we can
- // not include -kHeapObjectTag into it.
- __ sub(r5, value, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ ECMAToInt32(r5, d0, r6, r7, r9, d1);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -- r4 : scratch (elements)
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register scratch = r4;
- Register elements_reg = r3;
- Register length_reg = r5;
- Register scratch2 = r6;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
- }
-
- // Check that the key is within bounds.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- if (is_js_array) {
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis.
- __ cmp(key_reg, scratch);
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- __ b(hs, &grow);
- } else {
- __ b(hs, &miss_force_generic);
- }
-
- // Make sure elements is a fast element array, not 'cow'.
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
- __ str(value_reg, MemOperand(scratch));
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
- __ str(value_reg, MemOperand(scratch));
- __ mov(receiver_reg, value_reg);
- __ RecordWrite(elements_reg, // Object.
- scratch, // Address.
- receiver_reg, // Value.
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- }
- // value_reg (r0) is preserved.
- // Done.
- __ Ret();
-
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags already set by previous compare.
- __ b(ne, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ ldr(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, elements_reg, scratch, scratch2, &slow, TAG_OBJECT);
-
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
- }
-
- // Store the element at index zero.
- __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
-
- // Install the new backing store in the JSArray.
- __ str(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedCOWArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ cmp(length_reg, scratch);
- __ b(hs, &slow);
-
- // Grow the array and finish the store.
- __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch (elements backing store)
- // -- r4 : scratch
- // -- r5 : scratch
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register elements_reg = r3;
- Register scratch1 = r4;
- Register scratch2 = r5;
- Register length_reg = r7;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
-
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch1,
- FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis, unsigned compare catches both negative and out-of-bound
- // indexes.
- __ cmp(key_reg, scratch1);
- if (IsGrowStoreMode(store_mode)) {
- __ b(hs, &grow);
- } else {
- __ b(hs, &miss_force_generic);
- }
-
- __ bind(&finish_store);
- __ StoreNumberToDoubleElements(value_reg, key_reg, elements_reg,
- scratch1, d0, &transition_elements_kind);
- __ Ret();
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags already set by previous compare.
- __ b(ne, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(value_reg, &value_is_smi);
- __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ ldr(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT);
-
- // Initialize the new FixedDoubleArray.
- __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
- __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ mov(scratch1,
- Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ str(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-
- __ mov(scratch1, elements_reg);
- __ StoreNumberToDoubleElements(value_reg, key_reg, scratch1,
- scratch2, d0, &transition_elements_kind);
-
- __ mov(scratch1, Operand(kHoleNanLower32));
- __ mov(scratch2, Operand(kHoleNanUpper32));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ str(scratch1, FieldMemOperand(elements_reg, offset));
- __ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
- }
-
- // Install the new backing store in the JSArray.
- __ str(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Make sure that the backing store can hold additional elements.
- __ ldr(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ cmp(length_reg, scratch1);
- __ b(hs, &slow);
-
- // Grow the array and finish the store.
- __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/array-iterator.js b/chromium/v8/src/array-iterator.js
index 8f1ab47b8a2..defd7342ab2 100644
--- a/chromium/v8/src/array-iterator.js
+++ b/chromium/v8/src/array-iterator.js
@@ -77,16 +77,15 @@ function ArrayIteratorNext() {
return CreateIteratorResultObject(void 0, true);
}
- var elementKey = ToString(index);
iterator[arrayIteratorNextIndexSymbol] = index + 1;
if (itemKind == ARRAY_ITERATOR_KIND_VALUES)
- return CreateIteratorResultObject(array[elementKey], false);
+ return CreateIteratorResultObject(array[index], false);
if (itemKind == ARRAY_ITERATOR_KIND_ENTRIES)
- return CreateIteratorResultObject([elementKey, array[elementKey]], false);
+ return CreateIteratorResultObject([index, array[index]], false);
- return CreateIteratorResultObject(elementKey, false);
+ return CreateIteratorResultObject(index, false);
}
function ArrayEntries() {
diff --git a/chromium/v8/src/assembler.cc b/chromium/v8/src/assembler.cc
index ae8a0b58ba8..fbff62dd65e 100644
--- a/chromium/v8/src/assembler.cc
+++ b/chromium/v8/src/assembler.cc
@@ -43,7 +43,7 @@
#include "deoptimizer.h"
#include "execution.h"
#include "ic.h"
-#include "isolate.h"
+#include "isolate-inl.h"
#include "jsregexp.h"
#include "lazy-instance.h"
#include "platform.h"
@@ -119,7 +119,7 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
- jit_cookie_ = V8::RandomPrivate(isolate);
+ jit_cookie_ = isolate->random_number_generator()->NextInt();
}
if (buffer == NULL) {
@@ -798,7 +798,7 @@ void RelocInfo::Print(Isolate* isolate, FILE* out) {
target_object()->ShortPrint(out);
PrintF(out, ")");
} else if (rmode_ == EXTERNAL_REFERENCE) {
- ExternalReferenceEncoder ref_encoder;
+ ExternalReferenceEncoder ref_encoder(isolate);
PrintF(out, " (%s) (%p)",
ref_encoder.NameOfAddress(*target_reference_address()),
*target_reference_address());
@@ -891,7 +891,7 @@ void ExternalReference::SetUp() {
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
- math_exp_data_mutex = OS::CreateMutex();
+ math_exp_data_mutex = new Mutex();
}
@@ -899,7 +899,7 @@ void ExternalReference::InitializeMathExpData() {
// Early return?
if (math_exp_data_initialized) return;
- math_exp_data_mutex->Lock();
+ LockGuard<Mutex> lock_guard(math_exp_data_mutex);
if (!math_exp_data_initialized) {
// If this is changed, generated code must be adapted too.
const int kTableSizeBits = 11;
@@ -935,7 +935,6 @@ void ExternalReference::InitializeMathExpData() {
math_exp_data_initialized = true;
}
- math_exp_data_mutex->Unlock();
}
diff --git a/chromium/v8/src/assembler.h b/chromium/v8/src/assembler.h
index d70d5aa928b..6b399f20823 100644
--- a/chromium/v8/src/assembler.h
+++ b/chromium/v8/src/assembler.h
@@ -196,7 +196,6 @@ class Label BASE_EMBEDDED {
}
friend class Assembler;
- friend class RegexpAssembler;
friend class Displacement;
friend class RegExpMacroAssemblerIrregexp;
};
@@ -425,7 +424,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Object** call_object_address());
template<typename StaticVisitor> inline void Visit(Heap* heap);
- inline void Visit(ObjectVisitor* v);
+ inline void Visit(Isolate* isolate, ObjectVisitor* v);
// Patch the code with some other code.
void PatchCode(byte* instructions, int instruction_count);
@@ -644,38 +643,21 @@ class ExternalReference BASE_EMBEDDED {
BUILTIN_FP_INT_CALL,
// Direct call to API function callback.
- // Handle<Value> f(v8::Arguments&)
+ // void f(v8::FunctionCallbackInfo&)
DIRECT_API_CALL,
- // Call to invocation callback via InvokeInvocationCallback.
- // Handle<Value> f(v8::Arguments&, v8::InvocationCallback)
- PROFILING_API_CALL,
-
- // Direct call to API function callback.
- // void f(v8::Arguments&)
- DIRECT_API_CALL_NEW,
-
// Call to function callback via InvokeFunctionCallback.
- // void f(v8::Arguments&, v8::FunctionCallback)
- PROFILING_API_CALL_NEW,
+ // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
+ PROFILING_API_CALL,
// Direct call to accessor getter callback.
- // Handle<value> f(Local<String> property, AccessorInfo& info)
+ // void f(Local<String> property, PropertyCallbackInfo& info)
DIRECT_GETTER_CALL,
- // Call to accessor getter callback via InvokeAccessorGetter.
- // Handle<value> f(Local<String> property, AccessorInfo& info,
- // AccessorGetter getter)
- PROFILING_GETTER_CALL,
-
- // Direct call to accessor getter callback.
- // void f(Local<String> property, AccessorInfo& info)
- DIRECT_GETTER_CALL_NEW,
-
// Call to accessor getter callback via InvokeAccessorGetterCallback.
- // void f(Local<String> property, AccessorInfo& info,
+ // void f(Local<String> property, PropertyCallbackInfo& info,
// AccessorGetterCallback callback)
- PROFILING_GETTER_CALL_NEW
+ PROFILING_GETTER_CALL
};
static void SetUp();
@@ -708,7 +690,7 @@ class ExternalReference BASE_EMBEDDED {
explicit ExternalReference(const SCTableReference& table_ref);
- // Isolate::Current() as an external reference.
+ // Isolate as an external reference.
static ExternalReference isolate_address(Isolate* isolate);
// One-of-a-kind references. These references are not part of a general
diff --git a/chromium/v8/src/assert-scope.h b/chromium/v8/src/assert-scope.h
index 13adbd0f9c5..269b280d027 100644
--- a/chromium/v8/src/assert-scope.h
+++ b/chromium/v8/src/assert-scope.h
@@ -41,6 +41,7 @@ enum PerThreadAssertType {
HANDLE_ALLOCATION_ASSERT,
HANDLE_DEREFERENCE_ASSERT,
DEFERRED_HANDLE_DEREFERENCE_ASSERT,
+ CODE_DEPENDENCY_CHANGE_ASSERT,
LAST_PER_THREAD_ASSERT_TYPE
};
@@ -170,6 +171,14 @@ typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
AllowDeferredHandleDereference;
+// Scope to document where we do not expect deferred handles to be dereferenced.
+typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>
+ DisallowCodeDependencyChange;
+
+// Scope to introduce an exception to DisallowDeferredHandleDereference.
+typedef PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>
+ AllowCodeDependencyChange;
+
} } // namespace v8::internal
#endif // V8_ASSERT_SCOPE_H_
diff --git a/chromium/v8/src/ast.cc b/chromium/v8/src/ast.cc
index e0bca67aab1..823dedee098 100644
--- a/chromium/v8/src/ast.cc
+++ b/chromium/v8/src/ast.cc
@@ -273,7 +273,8 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
uint32_t hash = literal->Hash();
// If the key of a computed property is in the table, do not emit
// a store for the property later.
- if (property->kind() == ObjectLiteral::Property::COMPUTED &&
+ if ((property->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL ||
+ property->kind() == ObjectLiteral::Property::COMPUTED) &&
table.Lookup(literal, hash, false, allocator) != NULL) {
property->set_emit_store(false);
} else {
@@ -304,17 +305,6 @@ void UnaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
}
-bool UnaryOperation::ResultOverwriteAllowed() {
- switch (op_) {
- case Token::BIT_NOT:
- case Token::SUB:
- return true;
- default:
- return false;
- }
-}
-
-
void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
// TODO(olivf) If this Operation is used in a test context, then the right
// hand side has a ToBoolean stub and we want to collect the type information.
@@ -609,7 +599,7 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
- if (!HEAP->InNewSpace(*candidate)) {
+ if (!lookup->isolate()->heap()->InNewSpace(*candidate)) {
target_ = candidate;
return true;
}
@@ -656,8 +646,15 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Literal* key = property->key()->AsLiteral();
ASSERT(key != NULL && key->value()->IsString());
Handle<String> name = Handle<String>::cast(key->value());
+ check_type_ = oracle->GetCallCheckType(this);
receiver_types_.Clear();
- oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
+ if (check_type_ == RECEIVER_MAP_CHECK) {
+ oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
+ is_monomorphic_ = is_monomorphic_ && receiver_types_.length() > 0;
+ } else {
+ holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
+ receiver_types_.Add(handle(holder_->map()), oracle->zone());
+ }
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
@@ -667,17 +664,8 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
}
}
#endif
- check_type_ = oracle->GetCallCheckType(this);
if (is_monomorphic_) {
- Handle<Map> map;
- if (receiver_types_.length() > 0) {
- ASSERT(check_type_ == RECEIVER_MAP_CHECK);
- map = receiver_types_.at(0);
- } else {
- ASSERT(check_type_ != RECEIVER_MAP_CHECK);
- holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
- map = Handle<Map>(holder_->map());
- }
+ Handle<Map> map = receiver_types_.first();
is_monomorphic_ = ComputeTarget(map, name);
}
}
@@ -718,7 +706,9 @@ void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
void AstVisitor::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
- Visit(statements->at(i));
+ Statement* stmt = statements->at(i);
+ Visit(stmt);
+ if (stmt->IsJump()) break;
}
}
@@ -868,12 +858,13 @@ bool RegExpCapture::IsAnchoredAtEnd() {
// in as many cases as possible, to make it more difficult for incorrect
// parses to look as correct ones which is likely if the input and
// output formats are alike.
-class RegExpUnparser: public RegExpVisitor {
+class RegExpUnparser V8_FINAL : public RegExpVisitor {
public:
explicit RegExpUnparser(Zone* zone);
void VisitCharacterRange(CharacterRange that);
SmartArrayPointer<const char> ToString() { return stream_.ToCString(); }
-#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data);
+#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, \
+ void* data) V8_OVERRIDE;
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
#undef MAKE_CASE
private:
@@ -971,12 +962,12 @@ void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
void* RegExpUnparser::VisitText(RegExpText* that, void* data) {
if (that->elements()->length() == 1) {
- that->elements()->at(0).data.u_atom->Accept(this, data);
+ that->elements()->at(0).tree()->Accept(this, data);
} else {
stream()->Add("(!");
for (int i = 0; i < that->elements()->length(); i++) {
stream()->Add(" ");
- that->elements()->at(i).data.u_atom->Accept(this, data);
+ that->elements()->at(i).tree()->Accept(this, data);
}
stream()->Add(")");
}
@@ -1092,7 +1083,7 @@ CaseClause::CaseClause(Isolate* isolate,
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
- add_flag(kDontOptimize); \
+ set_dont_optimize_reason(k##NodeType); \
add_flag(kDontInline); \
add_flag(kDontSelfOptimize); \
}
@@ -1104,7 +1095,7 @@ CaseClause::CaseClause(Isolate* isolate,
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
- add_flag(kDontOptimize); \
+ set_dont_optimize_reason(k##NodeType); \
add_flag(kDontInline); \
add_flag(kDontSelfOptimize); \
add_flag(kDontCache); \
@@ -1190,7 +1181,6 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
Handle<String> Literal::ToString() {
if (value_->IsString()) return Handle<String>::cast(value_);
- Factory* factory = Isolate::Current()->factory();
ASSERT(value_->IsNumber());
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
@@ -1202,7 +1192,7 @@ Handle<String> Literal::ToString() {
} else {
str = DoubleToCString(value_->Number(), buffer);
}
- return factory->NewStringFromAscii(CStrVector(str));
+ return isolate_->factory()->NewStringFromAscii(CStrVector(str));
}
diff --git a/chromium/v8/src/ast.h b/chromium/v8/src/ast.h
index f14156f93c6..c63090687b9 100644
--- a/chromium/v8/src/ast.h
+++ b/chromium/v8/src/ast.h
@@ -123,10 +123,6 @@ namespace internal {
STATEMENT_NODE_LIST(V) \
EXPRESSION_NODE_LIST(V)
-#ifdef WIN32
-#undef Yield
-#endif
-
// Forward declarations
class AstConstructionVisitor;
template<class> class AstNodeFactory;
@@ -165,22 +161,23 @@ typedef ZoneList<Handle<String> > ZoneStringList;
typedef ZoneList<Handle<Object> > ZoneObjectList;
-#define DECLARE_NODE_TYPE(type) \
- virtual void Accept(AstVisitor* v); \
- virtual AstNode::NodeType node_type() const { return AstNode::k##type; } \
+#define DECLARE_NODE_TYPE(type) \
+ virtual void Accept(AstVisitor* v) V8_OVERRIDE; \
+ virtual AstNode::NodeType node_type() const V8_FINAL V8_OVERRIDE { \
+ return AstNode::k##type; \
+ } \
template<class> friend class AstNodeFactory;
enum AstPropertiesFlag {
kDontInline,
- kDontOptimize,
kDontSelfOptimize,
kDontSoftInline,
kDontCache
};
-class AstProperties BASE_EMBEDDED {
+class AstProperties V8_FINAL BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
@@ -209,9 +206,9 @@ class AstNode: public ZoneObject {
return zone->New(static_cast<int>(size));
}
- AstNode() { }
+ AstNode() {}
- virtual ~AstNode() { }
+ virtual ~AstNode() {}
virtual void Accept(AstVisitor* v) = 0;
virtual NodeType node_type() const = 0;
@@ -254,11 +251,12 @@ class AstNode: public ZoneObject {
};
-class Statement: public AstNode {
+class Statement : public AstNode {
public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {}
bool IsEmpty() { return AsEmptyStatement() != NULL; }
+ virtual bool IsJump() const { return false; }
void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
int statement_pos() const { return statement_pos_; }
@@ -268,7 +266,7 @@ class Statement: public AstNode {
};
-class SmallMapList {
+class SmallMapList V8_FINAL {
public:
SmallMapList() {}
SmallMapList(int capacity, Zone* zone) : list_(capacity, zone) {}
@@ -290,8 +288,15 @@ class SmallMapList {
Add(map, zone);
}
+ void FilterForPossibleTransitions(Map* root_map) {
+ for (int i = list_.length() - 1; i >= 0; i--) {
+ if (at(i)->FindRootMap() != root_map) {
+ list_.RemoveElement(list_.at(i));
+ }
+ }
+ }
+
void Add(Handle<Map> handle, Zone* zone) {
- ASSERT(!handle->is_deprecated());
list_.Add(handle.location(), zone);
}
@@ -310,7 +315,7 @@ class SmallMapList {
};
-class Expression: public AstNode {
+class Expression : public AstNode {
public:
enum Context {
// Not assigned a context yet, or else will not be visited during
@@ -369,12 +374,6 @@ class Expression: public AstNode {
UNREACHABLE();
return NULL;
}
- Handle<Map> GetMonomorphicReceiverType() {
- ASSERT(IsMonomorphic());
- SmallMapList* types = GetReceiverTypes();
- ASSERT(types != NULL && types->length() == 1);
- return types->at(0);
- }
virtual KeyedAccessStoreMode GetStoreMode() {
UNREACHABLE();
return STANDARD_STORE;
@@ -389,7 +388,7 @@ class Expression: public AstNode {
protected:
explicit Expression(Isolate* isolate)
- : bounds_(Type::None(), Type::Any(), isolate),
+ : bounds_(Bounds::Unbounded(isolate)),
id_(GetNextId(isolate)),
test_id_(GetNextId(isolate)) {}
void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
@@ -403,7 +402,7 @@ class Expression: public AstNode {
};
-class BreakableStatement: public Statement {
+class BreakableStatement : public Statement {
public:
enum BreakableType {
TARGET_FOR_ANONYMOUS,
@@ -415,7 +414,9 @@ class BreakableStatement: public Statement {
ZoneStringList* labels() const { return labels_; }
// Type testing & conversion.
- virtual BreakableStatement* AsBreakableStatement() { return this; }
+ virtual BreakableStatement* AsBreakableStatement() V8_FINAL V8_OVERRIDE {
+ return this;
+ }
// Code generation
Label* break_target() { return &break_target_; }
@@ -448,7 +449,7 @@ class BreakableStatement: public Statement {
};
-class Block: public BreakableStatement {
+class Block V8_FINAL : public BreakableStatement {
public:
DECLARE_NODE_TYPE(Block)
@@ -459,6 +460,11 @@ class Block: public BreakableStatement {
ZoneList<Statement*>* statements() { return &statements_; }
bool is_initializer_block() const { return is_initializer_block_; }
+ virtual bool IsJump() const V8_OVERRIDE {
+ return !statements_.is_empty() && statements_.last()->IsJump()
+ && labels() == NULL; // Good enough as an approximation...
+ }
+
Scope* scope() const { return scope_; }
void set_scope(Scope* scope) { scope_ = scope; }
@@ -481,7 +487,7 @@ class Block: public BreakableStatement {
};
-class Declaration: public AstNode {
+class Declaration : public AstNode {
public:
VariableProxy* proxy() const { return proxy_; }
VariableMode mode() const { return mode_; }
@@ -508,11 +514,11 @@ class Declaration: public AstNode {
};
-class VariableDeclaration: public Declaration {
+class VariableDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(VariableDeclaration)
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
}
@@ -525,15 +531,15 @@ class VariableDeclaration: public Declaration {
};
-class FunctionDeclaration: public Declaration {
+class FunctionDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(FunctionDeclaration)
FunctionLiteral* fun() const { return fun_; }
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return kCreatedInitialized;
}
- virtual bool IsInlineable() const;
+ virtual bool IsInlineable() const V8_OVERRIDE;
protected:
FunctionDeclaration(VariableProxy* proxy,
@@ -552,12 +558,12 @@ class FunctionDeclaration: public Declaration {
};
-class ModuleDeclaration: public Declaration {
+class ModuleDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ModuleDeclaration)
Module* module() const { return module_; }
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return kCreatedInitialized;
}
@@ -574,12 +580,12 @@ class ModuleDeclaration: public Declaration {
};
-class ImportDeclaration: public Declaration {
+class ImportDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ImportDeclaration)
Module* module() const { return module_; }
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return kCreatedInitialized;
}
@@ -596,11 +602,11 @@ class ImportDeclaration: public Declaration {
};
-class ExportDeclaration: public Declaration {
+class ExportDeclaration V8_FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ExportDeclaration)
- virtual InitializationFlag initialization() const {
+ virtual InitializationFlag initialization() const V8_OVERRIDE {
return kCreatedInitialized;
}
@@ -610,7 +616,7 @@ class ExportDeclaration: public Declaration {
};
-class Module: public AstNode {
+class Module : public AstNode {
public:
Interface* interface() const { return interface_; }
Block* body() const { return body_; }
@@ -629,7 +635,7 @@ class Module: public AstNode {
};
-class ModuleLiteral: public Module {
+class ModuleLiteral V8_FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModuleLiteral)
@@ -638,7 +644,7 @@ class ModuleLiteral: public Module {
};
-class ModuleVariable: public Module {
+class ModuleVariable V8_FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModuleVariable)
@@ -652,7 +658,7 @@ class ModuleVariable: public Module {
};
-class ModulePath: public Module {
+class ModulePath V8_FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModulePath)
@@ -672,7 +678,7 @@ class ModulePath: public Module {
};
-class ModuleUrl: public Module {
+class ModuleUrl V8_FINAL : public Module {
public:
DECLARE_NODE_TYPE(ModuleUrl)
@@ -688,7 +694,7 @@ class ModuleUrl: public Module {
};
-class ModuleStatement: public Statement {
+class ModuleStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(ModuleStatement)
@@ -707,10 +713,12 @@ class ModuleStatement: public Statement {
};
-class IterationStatement: public BreakableStatement {
+class IterationStatement : public BreakableStatement {
public:
// Type testing & conversion.
- virtual IterationStatement* AsIterationStatement() { return this; }
+ virtual IterationStatement* AsIterationStatement() V8_FINAL V8_OVERRIDE {
+ return this;
+ }
Statement* body() const { return body_; }
@@ -740,7 +748,7 @@ class IterationStatement: public BreakableStatement {
};
-class DoWhileStatement: public IterationStatement {
+class DoWhileStatement V8_FINAL : public IterationStatement {
public:
DECLARE_NODE_TYPE(DoWhileStatement)
@@ -756,8 +764,8 @@ class DoWhileStatement: public IterationStatement {
int condition_position() { return condition_position_; }
void set_condition_position(int pos) { condition_position_ = pos; }
- virtual BailoutId ContinueId() const { return continue_id_; }
- virtual BailoutId StackCheckId() const { return back_edge_id_; }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return back_edge_id_; }
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
@@ -779,7 +787,7 @@ class DoWhileStatement: public IterationStatement {
};
-class WhileStatement: public IterationStatement {
+class WhileStatement V8_FINAL : public IterationStatement {
public:
DECLARE_NODE_TYPE(WhileStatement)
@@ -796,8 +804,8 @@ class WhileStatement: public IterationStatement {
may_have_function_literal_ = value;
}
- virtual BailoutId ContinueId() const { return EntryId(); }
- virtual BailoutId StackCheckId() const { return body_id_; }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
BailoutId BodyId() const { return body_id_; }
protected:
@@ -818,7 +826,7 @@ class WhileStatement: public IterationStatement {
};
-class ForStatement: public IterationStatement {
+class ForStatement V8_FINAL : public IterationStatement {
public:
DECLARE_NODE_TYPE(ForStatement)
@@ -843,8 +851,8 @@ class ForStatement: public IterationStatement {
may_have_function_literal_ = value;
}
- virtual BailoutId ContinueId() const { return continue_id_; }
- virtual BailoutId StackCheckId() const { return body_id_; }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
BailoutId BodyId() const { return body_id_; }
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
@@ -877,7 +885,7 @@ class ForStatement: public IterationStatement {
};
-class ForEachStatement: public IterationStatement {
+class ForEachStatement : public IterationStatement {
public:
enum VisitMode {
ENUMERATE, // for (each in subject) body;
@@ -906,7 +914,7 @@ class ForEachStatement: public IterationStatement {
};
-class ForInStatement: public ForEachStatement {
+class ForInStatement V8_FINAL : public ForEachStatement {
public:
DECLARE_NODE_TYPE(ForInStatement)
@@ -921,8 +929,8 @@ class ForInStatement: public ForEachStatement {
BailoutId BodyId() const { return body_id_; }
BailoutId PrepareId() const { return prepare_id_; }
- virtual BailoutId ContinueId() const { return EntryId(); }
- virtual BailoutId StackCheckId() const { return body_id_; }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
protected:
ForInStatement(Isolate* isolate, ZoneStringList* labels)
@@ -938,7 +946,7 @@ class ForInStatement: public ForEachStatement {
};
-class ForOfStatement: public ForEachStatement {
+class ForOfStatement V8_FINAL : public ForEachStatement {
public:
DECLARE_NODE_TYPE(ForOfStatement)
@@ -980,8 +988,8 @@ class ForOfStatement: public ForEachStatement {
return assign_each_;
}
- virtual BailoutId ContinueId() const { return EntryId(); }
- virtual BailoutId StackCheckId() const { return BackEdgeId(); }
+ virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
+ virtual BailoutId StackCheckId() const V8_OVERRIDE { return BackEdgeId(); }
BailoutId BackEdgeId() const { return back_edge_id_; }
@@ -1003,12 +1011,13 @@ class ForOfStatement: public ForEachStatement {
};
-class ExpressionStatement: public Statement {
+class ExpressionStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(ExpressionStatement)
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() const { return expression_; }
+ virtual bool IsJump() const V8_OVERRIDE { return expression_->IsThrow(); }
protected:
explicit ExpressionStatement(Expression* expression)
@@ -1019,7 +1028,16 @@ class ExpressionStatement: public Statement {
};
-class ContinueStatement: public Statement {
+class JumpStatement : public Statement {
+ public:
+ virtual bool IsJump() const V8_FINAL V8_OVERRIDE { return true; }
+
+ protected:
+ JumpStatement() {}
+};
+
+
+class ContinueStatement V8_FINAL : public JumpStatement {
public:
DECLARE_NODE_TYPE(ContinueStatement)
@@ -1034,7 +1052,7 @@ class ContinueStatement: public Statement {
};
-class BreakStatement: public Statement {
+class BreakStatement V8_FINAL : public JumpStatement {
public:
DECLARE_NODE_TYPE(BreakStatement)
@@ -1049,7 +1067,7 @@ class BreakStatement: public Statement {
};
-class ReturnStatement: public Statement {
+class ReturnStatement V8_FINAL : public JumpStatement {
public:
DECLARE_NODE_TYPE(ReturnStatement)
@@ -1064,7 +1082,7 @@ class ReturnStatement: public Statement {
};
-class WithStatement: public Statement {
+class WithStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(WithStatement)
@@ -1085,7 +1103,7 @@ class WithStatement: public Statement {
};
-class CaseClause: public ZoneObject {
+class CaseClause V8_FINAL : public ZoneObject {
public:
CaseClause(Isolate* isolate,
Expression* label,
@@ -1122,7 +1140,7 @@ class CaseClause: public ZoneObject {
};
-class SwitchStatement: public BreakableStatement {
+class SwitchStatement V8_FINAL : public BreakableStatement {
public:
DECLARE_NODE_TYPE(SwitchStatement)
@@ -1157,7 +1175,7 @@ class SwitchStatement: public BreakableStatement {
// the parser implicitly creates an empty statement. Use the
// HasThenStatement() and HasElseStatement() functions to check if a
// given if-statement has a then- or an else-part containing code.
-class IfStatement: public Statement {
+class IfStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(IfStatement)
@@ -1168,6 +1186,11 @@ class IfStatement: public Statement {
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
+ virtual bool IsJump() const V8_OVERRIDE {
+ return HasThenStatement() && then_statement()->IsJump()
+ && HasElseStatement() && else_statement()->IsJump();
+ }
+
BailoutId IfId() const { return if_id_; }
BailoutId ThenId() const { return then_id_; }
BailoutId ElseId() const { return else_id_; }
@@ -1197,7 +1220,7 @@ class IfStatement: public Statement {
// NOTE: TargetCollectors are represented as nodes to fit in the target
// stack in the compiler; this should probably be reworked.
-class TargetCollector: public AstNode {
+class TargetCollector V8_FINAL : public AstNode {
public:
explicit TargetCollector(Zone* zone) : targets_(0, zone) { }
@@ -1207,9 +1230,9 @@ class TargetCollector: public AstNode {
void AddTarget(Label* target, Zone* zone);
// Virtual behaviour. TargetCollectors are never part of the AST.
- virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
- virtual NodeType node_type() const { return kInvalid; }
- virtual TargetCollector* AsTargetCollector() { return this; }
+ virtual void Accept(AstVisitor* v) V8_OVERRIDE { UNREACHABLE(); }
+ virtual NodeType node_type() const V8_OVERRIDE { return kInvalid; }
+ virtual TargetCollector* AsTargetCollector() V8_OVERRIDE { return this; }
ZoneList<Label*>* targets() { return &targets_; }
@@ -1218,7 +1241,7 @@ class TargetCollector: public AstNode {
};
-class TryStatement: public Statement {
+class TryStatement : public Statement {
public:
void set_escaping_targets(ZoneList<Label*>* targets) {
escaping_targets_ = targets;
@@ -1243,7 +1266,7 @@ class TryStatement: public Statement {
};
-class TryCatchStatement: public TryStatement {
+class TryCatchStatement V8_FINAL : public TryStatement {
public:
DECLARE_NODE_TYPE(TryCatchStatement)
@@ -1270,7 +1293,7 @@ class TryCatchStatement: public TryStatement {
};
-class TryFinallyStatement: public TryStatement {
+class TryFinallyStatement V8_FINAL : public TryStatement {
public:
DECLARE_NODE_TYPE(TryFinallyStatement)
@@ -1286,7 +1309,7 @@ class TryFinallyStatement: public TryStatement {
};
-class DebuggerStatement: public Statement {
+class DebuggerStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(DebuggerStatement)
@@ -1295,7 +1318,7 @@ class DebuggerStatement: public Statement {
};
-class EmptyStatement: public Statement {
+class EmptyStatement V8_FINAL : public Statement {
public:
DECLARE_NODE_TYPE(EmptyStatement)
@@ -1304,11 +1327,11 @@ class EmptyStatement: public Statement {
};
-class Literal: public Expression {
+class Literal V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Literal)
- virtual bool IsPropertyName() {
+ virtual bool IsPropertyName() V8_OVERRIDE {
if (value_->IsInternalizedString()) {
uint32_t ignored;
return !String::cast(*value_)->AsArrayIndex(&ignored);
@@ -1321,8 +1344,12 @@ class Literal: public Expression {
return Handle<String>::cast(value_);
}
- virtual bool ToBooleanIsTrue() { return value_->BooleanValue(); }
- virtual bool ToBooleanIsFalse() { return !value_->BooleanValue(); }
+ virtual bool ToBooleanIsTrue() V8_OVERRIDE {
+ return value_->BooleanValue();
+ }
+ virtual bool ToBooleanIsFalse() V8_OVERRIDE {
+ return !value_->BooleanValue();
+ }
// Identity testers.
bool IsNull() const {
@@ -1355,17 +1382,20 @@ class Literal: public Expression {
protected:
Literal(Isolate* isolate, Handle<Object> value)
: Expression(isolate),
- value_(value) { }
+ value_(value),
+ isolate_(isolate) { }
private:
Handle<String> ToString();
Handle<Object> value_;
+ // TODO(dcarney): remove. this is only needed for Match and Hash.
+ Isolate* isolate_;
};
// Base class for literals that needs space in the corresponding JSFunction.
-class MaterializedLiteral: public Expression {
+class MaterializedLiteral : public Expression {
public:
virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
@@ -1397,7 +1427,7 @@ class MaterializedLiteral: public Expression {
// Property is used for passing information
// about an object literal's properties from the parser
// to the code generator.
-class ObjectLiteralProperty: public ZoneObject {
+class ObjectLiteralProperty V8_FINAL : public ZoneObject {
public:
enum Kind {
CONSTANT, // Property with constant value (compile time).
@@ -1440,7 +1470,7 @@ class ObjectLiteralProperty: public ZoneObject {
// An object literal has a boilerplate object that is used
// for minimizing the work when constructing it at runtime.
-class ObjectLiteral: public MaterializedLiteral {
+class ObjectLiteral V8_FINAL : public MaterializedLiteral {
public:
typedef ObjectLiteralProperty Property;
@@ -1498,7 +1528,7 @@ class ObjectLiteral: public MaterializedLiteral {
// Node for capturing a regexp literal.
-class RegExpLiteral: public MaterializedLiteral {
+class RegExpLiteral V8_FINAL : public MaterializedLiteral {
public:
DECLARE_NODE_TYPE(RegExpLiteral)
@@ -1521,7 +1551,7 @@ class RegExpLiteral: public MaterializedLiteral {
// An array literal has a literals object that is used
// for minimizing the work when constructing it at runtime.
-class ArrayLiteral: public MaterializedLiteral {
+class ArrayLiteral V8_FINAL : public MaterializedLiteral {
public:
DECLARE_NODE_TYPE(ArrayLiteral)
@@ -1552,11 +1582,11 @@ class ArrayLiteral: public MaterializedLiteral {
};
-class VariableProxy: public Expression {
+class VariableProxy V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(VariableProxy)
- virtual bool IsValidLeftHandSide() {
+ virtual bool IsValidLeftHandSide() V8_OVERRIDE {
return var_ == NULL ? true : var_->IsValidLeftHandSide();
}
@@ -1604,15 +1634,15 @@ class VariableProxy: public Expression {
};
-class Property: public Expression {
+class Property V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Property)
- virtual bool IsValidLeftHandSide() { return true; }
+ virtual bool IsValidLeftHandSide() V8_OVERRIDE { return true; }
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
BailoutId LoadId() const { return load_id_; }
@@ -1622,9 +1652,11 @@ class Property: public Expression {
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- virtual KeyedAccessStoreMode GetStoreMode() {
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ return &receiver_types_;
+ }
+ virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return STANDARD_STORE;
}
bool IsUninitialized() { return is_uninitialized_; }
@@ -1661,19 +1693,21 @@ class Property: public Expression {
};
-class Call: public Expression {
+class Call V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Call)
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_FINAL { return pos_; }
// Type feedback information.
TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle, CallKind call_kind);
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- virtual bool IsMonomorphic() { return is_monomorphic_; }
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ return &receiver_types_;
+ }
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
CheckType check_type() const { return check_type_; }
void set_string_check(Handle<JSObject> holder) {
@@ -1744,18 +1778,18 @@ class Call: public Expression {
};
-class CallNew: public Expression {
+class CallNew V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CallNew)
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
// Type feedback information.
TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
ElementsKind elements_kind() const { return elements_kind_; }
Handle<Cell> allocation_info_cell() const {
@@ -1795,7 +1829,7 @@ class CallNew: public Expression {
// language construct. Instead it is used to call a C or JS function
// with a set of arguments. This is used from the builtins that are
// implemented in JavaScript (see "v8natives.js").
-class CallRuntime: public Expression {
+class CallRuntime V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CallRuntime)
@@ -1823,22 +1857,19 @@ class CallRuntime: public Expression {
};
-class UnaryOperation: public Expression {
+class UnaryOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(UnaryOperation)
- virtual bool ResultOverwriteAllowed();
-
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
BailoutId MaterializeTrueId() { return materialize_true_id_; }
BailoutId MaterializeFalseId() { return materialize_false_id_; }
- TypeFeedbackId UnaryOperationFeedbackId() const { return reuse(id()); }
-
- virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
+ virtual void RecordToBooleanTypeFeedback(
+ TypeFeedbackOracle* oracle) V8_OVERRIDE;
protected:
UnaryOperation(Isolate* isolate,
@@ -1866,7 +1897,7 @@ class UnaryOperation: public Expression {
};
-class BinaryOperation: public Expression {
+class BinaryOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(BinaryOperation)
@@ -1875,7 +1906,7 @@ class BinaryOperation: public Expression {
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
BailoutId RightId() const { return right_id_; }
@@ -1883,7 +1914,8 @@ class BinaryOperation: public Expression {
Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
void set_fixed_right_arg(Maybe<int> arg) { fixed_right_arg_ = arg; }
- virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
+ virtual void RecordToBooleanTypeFeedback(
+ TypeFeedbackOracle* oracle) V8_OVERRIDE;
protected:
BinaryOperation(Isolate* isolate,
@@ -1916,7 +1948,7 @@ class BinaryOperation: public Expression {
};
-class CountOperation: public Expression {
+class CountOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CountOperation)
@@ -1929,14 +1961,14 @@ class CountOperation: public Expression {
}
Expression* expression() const { return expression_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
- virtual void MarkAsStatement() { is_prefix_ = true; }
-
- void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* znoe);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- virtual KeyedAccessStoreMode GetStoreMode() {
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ return &receiver_types_;
+ }
+ virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
TypeInfo type() const { return type_; }
@@ -1978,14 +2010,14 @@ class CountOperation: public Expression {
};
-class CompareOperation: public Expression {
+class CompareOperation V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(CompareOperation)
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
@@ -2007,7 +2039,8 @@ class CompareOperation: public Expression {
op_(op),
left_(left),
right_(right),
- pos_(pos) {
+ pos_(pos),
+ combined_type_(Type::Null(), isolate) {
ASSERT(Token::IsCompareOp(op));
}
@@ -2021,7 +2054,7 @@ class CompareOperation: public Expression {
};
-class Conditional: public Expression {
+class Conditional V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Conditional)
@@ -2062,7 +2095,7 @@ class Conditional: public Expression {
};
-class Assignment: public Expression {
+class Assignment V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Assignment)
@@ -2073,7 +2106,7 @@ class Assignment: public Expression {
Token::Value op() const { return op_; }
Expression* target() const { return target_; }
Expression* value() const { return value_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
BinaryOperation* binary_operation() const { return binary_operation_; }
// This check relies on the definition order of token in token.h.
@@ -2084,10 +2117,12 @@ class Assignment: public Expression {
// Type feedback information.
TypeFeedbackId AssignmentFeedbackId() { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
- virtual bool IsMonomorphic() { return is_monomorphic_; }
+ virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
bool IsUninitialized() { return is_uninitialized_; }
- virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
- virtual KeyedAccessStoreMode GetStoreMode() {
+ virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+ return &receiver_types_;
+ }
+ virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
@@ -2123,7 +2158,7 @@ class Assignment: public Expression {
};
-class Yield: public Expression {
+class Yield V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Yield)
@@ -2137,7 +2172,7 @@ class Yield: public Expression {
Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; }
Kind yield_kind() const { return yield_kind_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
// Delegating yield surrounds the "yield" in a "try/catch". This index
// locates the catch handler in the handler table, and is equivalent to
@@ -2173,12 +2208,12 @@ class Yield: public Expression {
};
-class Throw: public Expression {
+class Throw V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(Throw)
Expression* exception() const { return exception_; }
- virtual int position() const { return pos_; }
+ virtual int position() const V8_OVERRIDE { return pos_; }
protected:
Throw(Isolate* isolate, Expression* exception, int pos)
@@ -2190,7 +2225,7 @@ class Throw: public Expression {
};
-class FunctionLiteral: public Expression {
+class FunctionLiteral V8_FINAL : public Expression {
public:
enum FunctionType {
ANONYMOUS_EXPRESSION,
@@ -2282,6 +2317,12 @@ class FunctionLiteral: public Expression {
ast_properties_ = *ast_properties;
}
+ bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
+ BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+ void set_dont_optimize_reason(BailoutReason reason) {
+ dont_optimize_reason_ = reason;
+ }
+
protected:
FunctionLiteral(Isolate* isolate,
Handle<String> name,
@@ -2301,6 +2342,7 @@ class FunctionLiteral: public Expression {
scope_(scope),
body_(body),
inferred_name_(isolate->factory()->empty_string()),
+ dont_optimize_reason_(kNoReason),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
handler_count_(handler_count),
@@ -2322,6 +2364,7 @@ class FunctionLiteral: public Expression {
ZoneList<Statement*>* body_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
+ BailoutReason dont_optimize_reason_;
int materialized_literal_count_;
int expected_property_count_;
@@ -2340,7 +2383,7 @@ class FunctionLiteral: public Expression {
};
-class SharedFunctionInfoLiteral: public Expression {
+class SharedFunctionInfoLiteral V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
@@ -2360,7 +2403,7 @@ class SharedFunctionInfoLiteral: public Expression {
};
-class ThisFunction: public Expression {
+class ThisFunction V8_FINAL : public Expression {
public:
DECLARE_NODE_TYPE(ThisFunction)
@@ -2385,10 +2428,10 @@ class RegExpVisitor BASE_EMBEDDED {
};
-class RegExpTree: public ZoneObject {
+class RegExpTree : public ZoneObject {
public:
static const int kInfinity = kMaxInt;
- virtual ~RegExpTree() { }
+ virtual ~RegExpTree() {}
virtual void* Accept(RegExpVisitor* visitor, void* data) = 0;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) = 0;
@@ -2410,19 +2453,19 @@ class RegExpTree: public ZoneObject {
};
-class RegExpDisjunction: public RegExpTree {
+class RegExpDisjunction V8_FINAL : public RegExpTree {
public:
explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpDisjunction* AsDisjunction();
- virtual Interval CaptureRegisters();
- virtual bool IsDisjunction();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpDisjunction* AsDisjunction() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsDisjunction() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return min_match_; }
+ virtual int max_match() V8_OVERRIDE { return max_match_; }
ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
private:
ZoneList<RegExpTree*>* alternatives_;
@@ -2431,19 +2474,19 @@ class RegExpDisjunction: public RegExpTree {
};
-class RegExpAlternative: public RegExpTree {
+class RegExpAlternative V8_FINAL : public RegExpTree {
public:
explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAlternative* AsAlternative();
- virtual Interval CaptureRegisters();
- virtual bool IsAlternative();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpAlternative* AsAlternative() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsAlternative() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return min_match_; }
+ virtual int max_match() V8_OVERRIDE { return max_match_; }
ZoneList<RegExpTree*>* nodes() { return nodes_; }
private:
ZoneList<RegExpTree*>* nodes_;
@@ -2452,7 +2495,7 @@ class RegExpAlternative: public RegExpTree {
};
-class RegExpAssertion: public RegExpTree {
+class RegExpAssertion V8_FINAL : public RegExpTree {
public:
enum AssertionType {
START_OF_LINE,
@@ -2463,22 +2506,22 @@ class RegExpAssertion: public RegExpTree {
NON_BOUNDARY
};
explicit RegExpAssertion(AssertionType type) : assertion_type_(type) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAssertion* AsAssertion();
- virtual bool IsAssertion();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpAssertion* AsAssertion() V8_OVERRIDE;
+ virtual bool IsAssertion() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return 0; }
+ virtual int max_match() V8_OVERRIDE { return 0; }
AssertionType assertion_type() { return assertion_type_; }
private:
AssertionType assertion_type_;
};
-class CharacterSet BASE_EMBEDDED {
+class CharacterSet V8_FINAL BASE_EMBEDDED {
public:
explicit CharacterSet(uc16 standard_set_type)
: ranges_(NULL),
@@ -2501,7 +2544,7 @@ class CharacterSet BASE_EMBEDDED {
};
-class RegExpCharacterClass: public RegExpTree {
+class RegExpCharacterClass V8_FINAL : public RegExpTree {
public:
RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
: set_(ranges),
@@ -2509,15 +2552,15 @@ class RegExpCharacterClass: public RegExpTree {
explicit RegExpCharacterClass(uc16 type)
: set_(type),
is_negated_(false) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpCharacterClass* AsCharacterClass();
- virtual bool IsCharacterClass();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return 1; }
- virtual int max_match() { return 1; }
- virtual void AppendToText(RegExpText* text, Zone* zone);
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpCharacterClass* AsCharacterClass() V8_OVERRIDE;
+ virtual bool IsCharacterClass() V8_OVERRIDE;
+ virtual bool IsTextElement() V8_OVERRIDE { return true; }
+ virtual int min_match() V8_OVERRIDE { return 1; }
+ virtual int max_match() V8_OVERRIDE { return 1; }
+ virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
CharacterSet character_set() { return set_; }
// TODO(lrn): Remove need for complex version if is_standard that
// recognizes a mangled standard set and just do { return set_.is_special(); }
@@ -2543,18 +2586,18 @@ class RegExpCharacterClass: public RegExpTree {
};
-class RegExpAtom: public RegExpTree {
+class RegExpAtom V8_FINAL : public RegExpTree {
public:
explicit RegExpAtom(Vector<const uc16> data) : data_(data) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpAtom* AsAtom();
- virtual bool IsAtom();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return data_.length(); }
- virtual int max_match() { return data_.length(); }
- virtual void AppendToText(RegExpText* text, Zone* zone);
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpAtom* AsAtom() V8_OVERRIDE;
+ virtual bool IsAtom() V8_OVERRIDE;
+ virtual bool IsTextElement() V8_OVERRIDE { return true; }
+ virtual int min_match() V8_OVERRIDE { return data_.length(); }
+ virtual int max_match() V8_OVERRIDE { return data_.length(); }
+ virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
Vector<const uc16> data() { return data_; }
int length() { return data_.length(); }
private:
@@ -2562,18 +2605,18 @@ class RegExpAtom: public RegExpTree {
};
-class RegExpText: public RegExpTree {
+class RegExpText V8_FINAL : public RegExpTree {
public:
explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpText* AsText();
- virtual bool IsText();
- virtual bool IsTextElement() { return true; }
- virtual int min_match() { return length_; }
- virtual int max_match() { return length_; }
- virtual void AppendToText(RegExpText* text, Zone* zone);
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpText* AsText() V8_OVERRIDE;
+ virtual bool IsText() V8_OVERRIDE;
+ virtual bool IsTextElement() V8_OVERRIDE { return true; }
+ virtual int min_match() V8_OVERRIDE { return length_; }
+ virtual int max_match() V8_OVERRIDE { return length_; }
+ virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
void AddElement(TextElement elm, Zone* zone) {
elements_.Add(elm, zone);
length_ += elm.length();
@@ -2585,7 +2628,7 @@ class RegExpText: public RegExpTree {
};
-class RegExpQuantifier: public RegExpTree {
+class RegExpQuantifier V8_FINAL : public RegExpTree {
public:
enum QuantifierType { GREEDY, NON_GREEDY, POSSESSIVE };
RegExpQuantifier(int min, int max, QuantifierType type, RegExpTree* body)
@@ -2600,9 +2643,9 @@ class RegExpQuantifier: public RegExpTree {
max_match_ = max * body->max_match();
}
}
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
+ RegExpNode* on_success) V8_OVERRIDE;
static RegExpNode* ToNode(int min,
int max,
bool is_greedy,
@@ -2610,11 +2653,11 @@ class RegExpQuantifier: public RegExpTree {
RegExpCompiler* compiler,
RegExpNode* on_success,
bool not_at_start = false);
- virtual RegExpQuantifier* AsQuantifier();
- virtual Interval CaptureRegisters();
- virtual bool IsQuantifier();
- virtual int min_match() { return min_match_; }
- virtual int max_match() { return max_match_; }
+ virtual RegExpQuantifier* AsQuantifier() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsQuantifier() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return min_match_; }
+ virtual int max_match() V8_OVERRIDE { return max_match_; }
int min() { return min_; }
int max() { return max_; }
bool is_possessive() { return quantifier_type_ == POSSESSIVE; }
@@ -2632,24 +2675,24 @@ class RegExpQuantifier: public RegExpTree {
};
-class RegExpCapture: public RegExpTree {
+class RegExpCapture V8_FINAL : public RegExpTree {
public:
explicit RegExpCapture(RegExpTree* body, int index)
: body_(body), index_(index) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
+ RegExpNode* on_success) V8_OVERRIDE;
static RegExpNode* ToNode(RegExpTree* body,
int index,
RegExpCompiler* compiler,
RegExpNode* on_success);
- virtual RegExpCapture* AsCapture();
- virtual bool IsAnchoredAtStart();
- virtual bool IsAnchoredAtEnd();
- virtual Interval CaptureRegisters();
- virtual bool IsCapture();
- virtual int min_match() { return body_->min_match(); }
- virtual int max_match() { return body_->max_match(); }
+ virtual RegExpCapture* AsCapture() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsCapture() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return body_->min_match(); }
+ virtual int max_match() V8_OVERRIDE { return body_->max_match(); }
RegExpTree* body() { return body_; }
int index() { return index_; }
static int StartRegister(int index) { return index * 2; }
@@ -2661,7 +2704,7 @@ class RegExpCapture: public RegExpTree {
};
-class RegExpLookahead: public RegExpTree {
+class RegExpLookahead V8_FINAL : public RegExpTree {
public:
RegExpLookahead(RegExpTree* body,
bool is_positive,
@@ -2672,15 +2715,15 @@ class RegExpLookahead: public RegExpTree {
capture_count_(capture_count),
capture_from_(capture_from) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpLookahead* AsLookahead();
- virtual Interval CaptureRegisters();
- virtual bool IsLookahead();
- virtual bool IsAnchoredAtStart();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpLookahead* AsLookahead() V8_OVERRIDE;
+ virtual Interval CaptureRegisters() V8_OVERRIDE;
+ virtual bool IsLookahead() V8_OVERRIDE;
+ virtual bool IsAnchoredAtStart() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return 0; }
+ virtual int max_match() V8_OVERRIDE { return 0; }
RegExpTree* body() { return body_; }
bool is_positive() { return is_positive_; }
int capture_count() { return capture_count_; }
@@ -2694,17 +2737,17 @@ class RegExpLookahead: public RegExpTree {
};
-class RegExpBackReference: public RegExpTree {
+class RegExpBackReference V8_FINAL : public RegExpTree {
public:
explicit RegExpBackReference(RegExpCapture* capture)
: capture_(capture) { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpBackReference* AsBackReference();
- virtual bool IsBackReference();
- virtual int min_match() { return 0; }
- virtual int max_match() { return capture_->max_match(); }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpBackReference* AsBackReference() V8_OVERRIDE;
+ virtual bool IsBackReference() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return 0; }
+ virtual int max_match() V8_OVERRIDE { return capture_->max_match(); }
int index() { return capture_->index(); }
RegExpCapture* capture() { return capture_; }
private:
@@ -2712,16 +2755,16 @@ class RegExpBackReference: public RegExpTree {
};
-class RegExpEmpty: public RegExpTree {
+class RegExpEmpty V8_FINAL : public RegExpTree {
public:
RegExpEmpty() { }
- virtual void* Accept(RegExpVisitor* visitor, void* data);
+ virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success);
- virtual RegExpEmpty* AsEmpty();
- virtual bool IsEmpty();
- virtual int min_match() { return 0; }
- virtual int max_match() { return 0; }
+ RegExpNode* on_success) V8_OVERRIDE;
+ virtual RegExpEmpty* AsEmpty() V8_OVERRIDE;
+ virtual bool IsEmpty() V8_OVERRIDE;
+ virtual int min_match() V8_OVERRIDE { return 0; }
+ virtual int max_match() V8_OVERRIDE { return 0; }
static RegExpEmpty* GetInstance() {
static RegExpEmpty* instance = ::new RegExpEmpty();
return instance;
@@ -2745,7 +2788,7 @@ inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
class AstVisitor BASE_EMBEDDED {
public:
AstVisitor() {}
- virtual ~AstVisitor() { }
+ virtual ~AstVisitor() {}
// Stack overflow check and dynamic dispatch.
virtual void Visit(AstNode* node) = 0;
@@ -2765,7 +2808,7 @@ class AstVisitor BASE_EMBEDDED {
#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
public: \
- virtual void Visit(AstNode* node) { \
+ virtual void Visit(AstNode* node) V8_FINAL V8_OVERRIDE { \
if (!CheckStackOverflow()) node->Accept(this); \
} \
\
@@ -2781,8 +2824,8 @@ public: \
} \
\
private: \
- void InitializeAstVisitor() { \
- isolate_ = Isolate::Current(); \
+ void InitializeAstVisitor(Isolate* isolate) { \
+ isolate_ = isolate; \
stack_overflow_ = false; \
} \
Isolate* isolate() { return isolate_; } \
@@ -2796,9 +2839,10 @@ private: \
class AstConstructionVisitor BASE_EMBEDDED {
public:
- AstConstructionVisitor() { }
+ AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
AstProperties* ast_properties() { return &properties_; }
+ BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
private:
template<class> friend class AstNodeFactory;
@@ -2811,8 +2855,12 @@ class AstConstructionVisitor BASE_EMBEDDED {
void increase_node_count() { properties_.add_node_count(1); }
void add_flag(AstPropertiesFlag flag) { properties_.flags()->Add(flag); }
+ void set_dont_optimize_reason(BailoutReason reason) {
+ dont_optimize_reason_ = reason;
+ }
AstProperties properties_;
+ BailoutReason dont_optimize_reason_;
};
@@ -2831,7 +2879,7 @@ class AstNullVisitor BASE_EMBEDDED {
// AstNode factory
template<class Visitor>
-class AstNodeFactory BASE_EMBEDDED {
+class AstNodeFactory V8_FINAL BASE_EMBEDDED {
public:
AstNodeFactory(Isolate* isolate, Zone* zone)
: isolate_(isolate),
diff --git a/chromium/v8/src/atomicops_internals_tsan.h b/chromium/v8/src/atomicops_internals_tsan.h
index e52c26c2fe2..b5162bad9f6 100644
--- a/chromium/v8/src/atomicops_internals_tsan.h
+++ b/chromium/v8/src/atomicops_internals_tsan.h
@@ -32,6 +32,12 @@
#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
#define V8_ATOMICOPS_INTERNALS_TSAN_H_
+namespace v8 {
+namespace internal {
+
+#ifndef TSAN_INTERFACE_ATOMIC_H
+#define TSAN_INTERFACE_ATOMIC_H
+
// This struct is not part of the public API of this module; clients may not
// use it. (However, it's exported via BASE_EXPORT because clients implicitly
// do use it at link time by inlining these functions.)
@@ -47,12 +53,6 @@ extern struct AtomicOps_x86CPUFeatureStruct
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-namespace v8 {
-namespace internal {
-
-#ifndef TSAN_INTERFACE_ATOMIC_H
-#define TSAN_INTERFACE_ATOMIC_H
-
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/chromium/v8/src/bootstrapper.cc b/chromium/v8/src/bootstrapper.cc
index c2cc6efc45f..0756aefb0b4 100644
--- a/chromium/v8/src/bootstrapper.cc
+++ b/chromium/v8/src/bootstrapper.cc
@@ -45,10 +45,6 @@
#include "extensions/statistics-extension.h"
#include "code-stubs.h"
-#if defined(V8_I18N_SUPPORT)
-#include "extensions/i18n/i18n-extension.h"
-#endif
-
namespace v8 {
namespace internal {
@@ -98,7 +94,7 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
void Bootstrapper::Initialize(bool create_heap_objects) {
- extensions_cache_.Initialize(create_heap_objects);
+ extensions_cache_.Initialize(isolate_, create_heap_objects);
}
@@ -106,9 +102,6 @@ void Bootstrapper::InitializeOncePerProcess() {
GCExtension::Register();
ExternalizeStringExtension::Register();
StatisticsExtension::Register();
-#if defined(V8_I18N_SUPPORT)
- v8_i18n::Extension::Register();
-#endif
}
@@ -147,7 +140,7 @@ void Bootstrapper::TearDown() {
delete_these_arrays_on_tear_down_ = NULL;
}
- extensions_cache_.Initialize(false); // Yes, symmetrical
+ extensions_cache_.Initialize(isolate_, false); // Yes, symmetrical
}
@@ -491,7 +484,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// prototype, otherwise the missing initial_array_prototype will cause
// assertions during startup.
native_context()->set_initial_array_prototype(*prototype);
- SetPrototype(object_fun, prototype);
+ Accessors::FunctionSetPrototype(object_fun, prototype);
}
// Allocate the empty function as the prototype for function ECMAScript
@@ -1064,6 +1057,54 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
native_context()->set_json_object(*json_object);
}
+ { // -- A r r a y B u f f e r
+ Handle<JSFunction> array_buffer_fun =
+ InstallFunction(
+ global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
+ JSArrayBuffer::kSizeWithInternalFields,
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ native_context()->set_array_buffer_fun(*array_buffer_fun);
+ }
+
+ { // -- T y p e d A r r a y s
+ Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array",
+ EXTERNAL_BYTE_ELEMENTS);
+ native_context()->set_int8_array_fun(*int8_fun);
+ Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array",
+ EXTERNAL_UNSIGNED_BYTE_ELEMENTS);
+ native_context()->set_uint8_array_fun(*uint8_fun);
+ Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array",
+ EXTERNAL_SHORT_ELEMENTS);
+ native_context()->set_int16_array_fun(*int16_fun);
+ Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array",
+ EXTERNAL_UNSIGNED_SHORT_ELEMENTS);
+ native_context()->set_uint16_array_fun(*uint16_fun);
+ Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array",
+ EXTERNAL_INT_ELEMENTS);
+ native_context()->set_int32_array_fun(*int32_fun);
+ Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array",
+ EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ native_context()->set_uint32_array_fun(*uint32_fun);
+ Handle<JSFunction> float_fun = InstallTypedArray("Float32Array",
+ EXTERNAL_FLOAT_ELEMENTS);
+ native_context()->set_float_array_fun(*float_fun);
+ Handle<JSFunction> double_fun = InstallTypedArray("Float64Array",
+ EXTERNAL_DOUBLE_ELEMENTS);
+ native_context()->set_double_array_fun(*double_fun);
+ Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray",
+ EXTERNAL_PIXEL_ELEMENTS);
+ native_context()->set_uint8c_array_fun(*uint8c_fun);
+
+ Handle<JSFunction> data_view_fun =
+ InstallFunction(
+ global, "DataView", JS_DATA_VIEW_TYPE,
+ JSDataView::kSizeWithInternalFields,
+ isolate->initial_object_prototype(),
+ Builtins::kIllegal, true, true);
+ native_context()->set_data_view_fun(*data_view_fun);
+ }
+
{ // --- arguments_boilerplate_
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
@@ -1095,12 +1136,12 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->length_string(),
factory->undefined_value(), DONT_ENUM,
- Object::FORCE_TAGGED, JSReceiver::FORCE_FIELD));
+ Object::FORCE_TAGGED, FORCE_FIELD));
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->callee_string(),
factory->undefined_value(), DONT_ENUM,
- Object::FORCE_TAGGED, JSReceiver::FORCE_FIELD));
+ Object::FORCE_TAGGED, FORCE_FIELD));
#ifdef DEBUG
LookupResult lookup(isolate);
@@ -1268,13 +1309,9 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<FixedArray> embedder_data = factory->NewFixedArray(2);
native_context()->set_embedder_data(*embedder_data);
- {
- // Initialize the random seed slot.
- Handle<ByteArray> zeroed_byte_array(
- factory->NewByteArray(kRandomStateSize));
- native_context()->set_random_seed(*zeroed_byte_array);
- memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
- }
+ // Allocate the random seed slot.
+ Handle<ByteArray> random_seed = factory->NewByteArray(kRandomStateSize);
+ native_context()->set_random_seed(*random_seed);
}
@@ -1331,56 +1368,6 @@ void Genesis::InitializeExperimentalGlobal() {
}
}
- if (FLAG_harmony_array_buffer) {
- // -- A r r a y B u f f e r
- Handle<JSFunction> array_buffer_fun =
- InstallFunction(
- global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSizeWithInternalFields,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
- native_context()->set_array_buffer_fun(*array_buffer_fun);
- }
-
- if (FLAG_harmony_typed_arrays) {
- // -- T y p e d A r r a y s
- Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array",
- EXTERNAL_BYTE_ELEMENTS);
- native_context()->set_int8_array_fun(*int8_fun);
- Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array",
- EXTERNAL_UNSIGNED_BYTE_ELEMENTS);
- native_context()->set_uint8_array_fun(*uint8_fun);
- Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array",
- EXTERNAL_SHORT_ELEMENTS);
- native_context()->set_int16_array_fun(*int16_fun);
- Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array",
- EXTERNAL_UNSIGNED_SHORT_ELEMENTS);
- native_context()->set_uint16_array_fun(*uint16_fun);
- Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array",
- EXTERNAL_INT_ELEMENTS);
- native_context()->set_int32_array_fun(*int32_fun);
- Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array",
- EXTERNAL_UNSIGNED_INT_ELEMENTS);
- native_context()->set_uint32_array_fun(*uint32_fun);
- Handle<JSFunction> float_fun = InstallTypedArray("Float32Array",
- EXTERNAL_FLOAT_ELEMENTS);
- native_context()->set_float_array_fun(*float_fun);
- Handle<JSFunction> double_fun = InstallTypedArray("Float64Array",
- EXTERNAL_DOUBLE_ELEMENTS);
- native_context()->set_double_array_fun(*double_fun);
- Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray",
- EXTERNAL_PIXEL_ELEMENTS);
- native_context()->set_uint8c_array_fun(*uint8c_fun);
-
- Handle<JSFunction> data_view_fun =
- InstallFunction(
- global, "DataView", JS_DATA_VIEW_TYPE,
- JSDataView::kSizeWithInternalFields,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true, true);
- native_context()->set_data_view_fun(*data_view_fun);
- }
-
if (FLAG_harmony_generators) {
// Create generator meta-objects and install them on the builtins object.
Handle<JSObject> builtins(native_context()->builtins());
@@ -1554,7 +1541,7 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
: top_context->global_object(),
isolate);
bool has_pending_exception;
- Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+ Execution::Call(isolate, fun, receiver, 0, NULL, &has_pending_exception);
if (has_pending_exception) return false;
return true;
}
@@ -1632,7 +1619,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(
true, true);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(array_function, prototype);
+ Accessors::FunctionSetPrototype(array_function, prototype);
InternalArrayConstructorStub internal_array_constructor_stub(isolate());
Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
@@ -1730,7 +1717,7 @@ bool Genesis::InstallNatives() {
Builtins::kIllegal, false, false);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(script_fun, prototype);
+ Accessors::FunctionSetPrototype(script_fun, prototype);
native_context()->set_script_function(*script_fun);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
@@ -1886,7 +1873,7 @@ bool Genesis::InstallNatives() {
Builtins::kIllegal, false, false);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(opaque_reference_fun, prototype);
+ Accessors::FunctionSetPrototype(opaque_reference_fun, prototype);
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
@@ -2060,16 +2047,6 @@ bool Genesis::InstallExperimentalNatives() {
"native object-observe.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
- if (FLAG_harmony_array_buffer &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native arraybuffer.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
- if (FLAG_harmony_typed_arrays &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native typedarray.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
if (FLAG_harmony_generators &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native generator.js") == 0) {
@@ -2085,6 +2062,11 @@ bool Genesis::InstallExperimentalNatives() {
"native harmony-string.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
+ if (FLAG_harmony_arrays &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native harmony-array.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
}
InstallExperimentalNativeFunctions();
@@ -2302,12 +2284,6 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
InstallExtension(isolate, "v8/statistics", &extension_states);
}
-#if defined(V8_I18N_SUPPORT)
- if (FLAG_enable_i18n) {
- InstallExtension(isolate, "v8/i18n", &extension_states);
- }
-#endif
-
if (extensions == NULL) return true;
// Install required extensions
int count = v8::ImplementationUtilities::GetNameCount(extensions);
@@ -2595,8 +2571,8 @@ Genesis::Genesis(Isolate* isolate,
: isolate_(isolate),
active_(isolate->bootstrapper()) {
result_ = Handle<Context>::null();
- // If V8 isn't running and cannot be initialized, just return.
- if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
+ // If V8 cannot be initialized, just return.
+ if (!V8::Initialize(NULL)) return;
// Before creating the roots we must save the context and restore it
// on all function exits.
@@ -2611,7 +2587,7 @@ Genesis::Genesis(Isolate* isolate,
// We can only de-serialize a context if the isolate was initialized from
// a snapshot. Otherwise we have to build the context from scratch.
if (isolate->initialized_from_snapshot()) {
- native_context_ = Snapshot::NewContextFromSnapshot();
+ native_context_ = Snapshot::NewContextFromSnapshot(isolate);
} else {
native_context_ = Handle<Context>();
}
@@ -2654,6 +2630,14 @@ Genesis::Genesis(Isolate* isolate,
InitializeExperimentalGlobal();
if (!InstallExperimentalNatives()) return;
+ // Initially seed the per-context random number generator
+ // using the per-isolate random number generator.
+ uint32_t* state = reinterpret_cast<uint32_t*>(
+ native_context()->random_seed()->GetDataStartAddress());
+ do {
+ isolate->random_number_generator()->NextBytes(state, kRandomStateSize);
+ } while (state[0] == 0 || state[1] == 0);
+
result_ = native_context();
}
diff --git a/chromium/v8/src/bootstrapper.h b/chromium/v8/src/bootstrapper.h
index 30978003905..bac9f40372d 100644
--- a/chromium/v8/src/bootstrapper.h
+++ b/chromium/v8/src/bootstrapper.h
@@ -44,8 +44,8 @@ class SourceCodeCache BASE_EMBEDDED {
public:
explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
- void Initialize(bool create_heap_objects) {
- cache_ = create_heap_objects ? HEAP->empty_fixed_array() : NULL;
+ void Initialize(Isolate* isolate, bool create_heap_objects) {
+ cache_ = create_heap_objects ? isolate->heap()->empty_fixed_array() : NULL;
}
void Iterate(ObjectVisitor* v) {
diff --git a/chromium/v8/src/builtins.cc b/chromium/v8/src/builtins.cc
index 4a5cd03b6f9..9290852dc95 100644
--- a/chromium/v8/src/builtins.cc
+++ b/chromium/v8/src/builtins.cc
@@ -132,7 +132,6 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
MUST_USE_RESULT static MaybeObject* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
name##ArgumentsType args(args_length, args_object); \
- ASSERT(isolate == Isolate::Current()); \
args.Verify(); \
return Builtin_Impl_##name(args, isolate); \
} \
@@ -304,11 +303,11 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
} else {
entry_size = kDoubleSize;
}
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
+ ASSERT(elms->map() != heap->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
- ASSERT(!HEAP->lo_space()->Contains(elms));
+ ASSERT(!heap->lo_space()->Contains(elms));
STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
@@ -448,7 +447,8 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
argv[i] = args.at<Object>(i + 1);
}
bool pending_exception;
- Handle<Object> result = Execution::Call(function,
+ Handle<Object> result = Execution::Call(isolate,
+ function,
args.receiver(),
argc,
argv.start(),
@@ -594,7 +594,7 @@ BUILTIN(ArrayPop) {
if (accessor->HasElement(array, array, new_length, elms_obj)) {
maybe_result = accessor->Get(array, array, new_length, elms_obj);
} else {
- maybe_result = array->GetPrototype()->GetElement(len - 1);
+ maybe_result = array->GetPrototype()->GetElement(isolate, len - 1);
}
if (maybe_result->IsFailure()) return maybe_result;
MaybeObject* maybe_failure =
@@ -1253,8 +1253,8 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
if (!raw_call_data->IsUndefined()) {
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
- v8::InvocationCallback callback =
- v8::ToCData<v8::InvocationCallback>(callback_obj);
+ v8::FunctionCallback callback =
+ v8::ToCData<v8::FunctionCallback>(callback_obj);
Object* data_obj = call_data->data();
Object* result;
@@ -1322,8 +1322,8 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
ASSERT(!handler->IsUndefined());
CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
Object* callback_obj = call_data->callback();
- v8::InvocationCallback callback =
- v8::ToCData<v8::InvocationCallback>(callback_obj);
+ v8::FunctionCallback callback =
+ v8::ToCData<v8::FunctionCallback>(callback_obj);
// Get the data for the call and perform the callback.
Object* result;
@@ -1461,6 +1461,16 @@ static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) {
}
+static void Generate_StoreIC_PreMonomorphic(MacroAssembler* masm) {
+ StoreIC::GeneratePreMonomorphic(masm);
+}
+
+
+static void Generate_StoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
+ StoreIC::GeneratePreMonomorphic(masm);
+}
+
+
static void Generate_StoreIC_Miss(MacroAssembler* masm) {
StoreIC::GenerateMiss(masm);
}
@@ -1546,6 +1556,16 @@ static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
}
+static void Generate_KeyedStoreIC_PreMonomorphic(MacroAssembler* masm) {
+ KeyedStoreIC::GeneratePreMonomorphic(masm);
+}
+
+
+static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
+ KeyedStoreIC::GeneratePreMonomorphic(masm);
+}
+
+
static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
KeyedStoreIC::GenerateNonStrictArguments(masm);
}
@@ -1717,9 +1737,8 @@ void Builtins::InitBuiltinFunctionTable() {
}
-void Builtins::SetUp(bool create_heap_objects) {
+void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
ASSERT(!initialized_);
- Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
// Create a scope for the handles in the builtins.
@@ -1813,6 +1832,16 @@ const char* Builtins::Lookup(byte* pc) {
}
+void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
+ masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
+void Builtins::Generate_StackCheck(MacroAssembler* masm) {
+ masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
+}
+
+
#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
Handle<Code> Builtins::name() { \
Code** code_address = \
diff --git a/chromium/v8/src/builtins.h b/chromium/v8/src/builtins.h
index 73a2e964590..c712f1ee02d 100644
--- a/chromium/v8/src/builtins.h
+++ b/chromium/v8/src/builtins.h
@@ -87,8 +87,6 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(InstallRecompiledCode, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
@@ -103,7 +101,7 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(ParallelRecompile, BUILTIN, UNINITIALIZED, \
+ V(ConcurrentRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -122,7 +120,7 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(KeyedLoadIC_Slow, BUILTIN, UNINITIALIZED, \
+ V(KeyedLoadIC_Slow, STUB, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -144,7 +142,7 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
- V(LoadIC_Slow, LOAD_IC, GENERIC, \
+ V(LoadIC_Slow, STUB, MONOMORPHIC, \
Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
@@ -162,6 +160,8 @@ enum BuiltinExtraArguments {
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(StoreIC_PreMonomorphic, STORE_IC, PREMONOMORPHIC, \
+ Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
@@ -174,6 +174,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
kStrictMode) \
+ V(StoreIC_PreMonomorphic_Strict, STORE_IC, PREMONOMORPHIC, \
+ kStrictMode) \
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
kStrictMode) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
@@ -185,11 +187,15 @@ enum BuiltinExtraArguments {
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
+ Code::kNoExtraICState) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, \
Code::kNoExtraICState) \
\
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
kStrictMode) \
+ V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
+ kStrictMode) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
kStrictMode) \
V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \
@@ -211,6 +217,10 @@ enum BuiltinExtraArguments {
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(InterruptCheck, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(StackCheck, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -259,8 +269,6 @@ enum BuiltinExtraArguments {
V(BIT_OR, 1) \
V(BIT_AND, 1) \
V(BIT_XOR, 1) \
- V(UNARY_MINUS, 0) \
- V(BIT_NOT, 0) \
V(SHL, 1) \
V(SAR, 1) \
V(SHR, 1) \
@@ -290,7 +298,7 @@ class Builtins {
// Generate all builtin code objects. Should be called once during
// isolate initialization.
- void SetUp(bool create_heap_objects);
+ void SetUp(Isolate* isolate, bool create_heap_objects);
void TearDown();
// Garbage collection support.
@@ -372,8 +380,7 @@ class Builtins {
CFunctionId id,
BuiltinExtraArguments extra_args);
static void Generate_InRecompileQueue(MacroAssembler* masm);
- static void Generate_InstallRecompiledCode(MacroAssembler* masm);
- static void Generate_ParallelRecompile(MacroAssembler* masm);
+ static void Generate_ConcurrentRecompile(MacroAssembler* masm);
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
@@ -397,6 +404,9 @@ class Builtins {
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
+ static void Generate_InterruptCheck(MacroAssembler* masm);
+ static void Generate_StackCheck(MacroAssembler* masm);
+
#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
static void Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm); \
diff --git a/chromium/v8/src/checks.cc b/chromium/v8/src/checks.cc
index 82086824dd2..7108d18892e 100644
--- a/chromium/v8/src/checks.cc
+++ b/chromium/v8/src/checks.cc
@@ -31,33 +31,19 @@
#include "platform.h"
-// TODO(isolates): is it necessary to lift this?
-static int fatal_error_handler_nesting_depth = 0;
-
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
i::AllowHandleDereference allow_deref;
i::AllowDeferredHandleDereference allow_deferred_deref;
fflush(stdout);
fflush(stderr);
- fatal_error_handler_nesting_depth++;
- // First time we try to print an error message
- if (fatal_error_handler_nesting_depth < 2) {
- i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
- va_list arguments;
- va_start(arguments, format);
- i::OS::VPrintError(format, arguments);
- va_end(arguments);
- i::OS::PrintError("\n#\n");
- i::OS::DumpBacktrace();
- }
- // First two times we may try to print a stack dump.
- if (fatal_error_handler_nesting_depth < 3) {
- if (i::FLAG_stack_trace_on_abort) {
- // Call this one twice on double fault
- i::Isolate::Current()->PrintStack(stderr);
- }
- }
+ i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
+ va_list arguments;
+ va_start(arguments, format);
+ i::OS::VPrintError(format, arguments);
+ va_end(arguments);
+ i::OS::PrintError("\n#\n");
+ i::OS::DumpBacktrace();
i::OS::Abort();
}
diff --git a/chromium/v8/src/checks.h b/chromium/v8/src/checks.h
index b309e2c42c7..f5c5f232bd5 100644
--- a/chromium/v8/src/checks.h
+++ b/chromium/v8/src/checks.h
@@ -31,6 +31,7 @@
#include <string.h>
#include "../include/v8stdint.h"
+
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@@ -196,6 +197,20 @@ inline void CheckEqualsHelper(const char* file,
inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ int64_t expected,
+ const char* value_source,
+ int64_t value) {
+ if (expected == value) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %f\n# Found: %f",
+ expected_source, value_source, expected, value);
+ }
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
int line,
const char* expected_source,
double expected,
@@ -232,7 +247,7 @@ inline void CheckNonEqualsHelper(const char* file,
// Use C++11 static_assert if possible, which gives error
// messages that are easier to understand on first sight.
-#if __cplusplus >= 201103L
+#if V8_HAS_CXX11_STATIC_ASSERT
#define STATIC_CHECK(test) static_assert(test, #test)
#else
// This is inspired by the static assertion facility in boost. This
diff --git a/chromium/v8/src/circular-queue-inl.h b/chromium/v8/src/circular-queue-inl.h
index b48070ab5d2..dfb70315781 100644
--- a/chromium/v8/src/circular-queue-inl.h
+++ b/chromium/v8/src/circular-queue-inl.h
@@ -33,30 +33,60 @@
namespace v8 {
namespace internal {
+template<typename T, unsigned L>
+SamplingCircularQueue<T, L>::SamplingCircularQueue()
+ : enqueue_pos_(buffer_),
+ dequeue_pos_(buffer_) {
+}
+
+
+template<typename T, unsigned L>
+SamplingCircularQueue<T, L>::~SamplingCircularQueue() {
+}
+
+
+template<typename T, unsigned L>
+T* SamplingCircularQueue<T, L>::Peek() {
+ MemoryBarrier();
+ if (Acquire_Load(&dequeue_pos_->marker) == kFull) {
+ return &dequeue_pos_->record;
+ }
+ return NULL;
+}
+
+
+template<typename T, unsigned L>
+void SamplingCircularQueue<T, L>::Remove() {
+ Release_Store(&dequeue_pos_->marker, kEmpty);
+ dequeue_pos_ = Next(dequeue_pos_);
+}
-void* SamplingCircularQueue::Enqueue() {
- if (producer_pos_->enqueue_pos == producer_pos_->next_chunk_pos) {
- if (producer_pos_->enqueue_pos == buffer_ + buffer_size_) {
- producer_pos_->next_chunk_pos = buffer_;
- producer_pos_->enqueue_pos = buffer_;
- }
- Acquire_Store(producer_pos_->next_chunk_pos, kEnqueueStarted);
- // Skip marker.
- producer_pos_->enqueue_pos += 1;
- producer_pos_->next_chunk_pos += chunk_size_;
+
+template<typename T, unsigned L>
+T* SamplingCircularQueue<T, L>::StartEnqueue() {
+ MemoryBarrier();
+ if (Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
+ return &enqueue_pos_->record;
}
- void* result = producer_pos_->enqueue_pos;
- producer_pos_->enqueue_pos += record_size_;
- return result;
+ return NULL;
}
-void SamplingCircularQueue::WrapPositionIfNeeded(
- SamplingCircularQueue::Cell** pos) {
- if (*pos == buffer_ + buffer_size_) *pos = buffer_;
+template<typename T, unsigned L>
+void SamplingCircularQueue<T, L>::FinishEnqueue() {
+ Release_Store(&enqueue_pos_->marker, kFull);
+ enqueue_pos_ = Next(enqueue_pos_);
}
+template<typename T, unsigned L>
+typename SamplingCircularQueue<T, L>::Entry* SamplingCircularQueue<T, L>::Next(
+ Entry* entry) {
+ Entry* next = entry + 1;
+ if (next == &buffer_[L]) return buffer_;
+ return next;
+}
+
} } // namespace v8::internal
#endif // V8_CIRCULAR_QUEUE_INL_H_
diff --git a/chromium/v8/src/circular-queue.cc b/chromium/v8/src/circular-queue.cc
deleted file mode 100644
index 0aea3435927..00000000000
--- a/chromium/v8/src/circular-queue.cc
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "circular-queue-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-SamplingCircularQueue::SamplingCircularQueue(size_t record_size_in_bytes,
- size_t desired_chunk_size_in_bytes,
- unsigned buffer_size_in_chunks)
- : record_size_(record_size_in_bytes / sizeof(Cell)),
- chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes *
- record_size_in_bytes + sizeof(Cell)),
- chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)),
- buffer_size_(chunk_size_ * buffer_size_in_chunks),
- buffer_(NewArray<Cell>(buffer_size_)) {
- ASSERT(record_size_ * sizeof(Cell) == record_size_in_bytes);
- ASSERT(chunk_size_ * sizeof(Cell) == chunk_size_in_bytes_);
- ASSERT(buffer_size_in_chunks > 2);
- // Mark all chunks as clear.
- for (size_t i = 0; i < buffer_size_; i += chunk_size_) {
- buffer_[i] = kClear;
- }
-
- // Layout producer and consumer position pointers each on their own
- // cache lines to avoid cache lines thrashing due to simultaneous
- // updates of positions by different processor cores.
- const int positions_size =
- RoundUp(1, kProcessorCacheLineSize) +
- RoundUp(static_cast<int>(sizeof(ProducerPosition)),
- kProcessorCacheLineSize) +
- RoundUp(static_cast<int>(sizeof(ConsumerPosition)),
- kProcessorCacheLineSize);
- positions_ = NewArray<byte>(positions_size);
-
- producer_pos_ = reinterpret_cast<ProducerPosition*>(
- RoundUp(positions_, kProcessorCacheLineSize));
- producer_pos_->next_chunk_pos = buffer_;
- producer_pos_->enqueue_pos = buffer_;
-
- consumer_pos_ = reinterpret_cast<ConsumerPosition*>(
- reinterpret_cast<byte*>(producer_pos_) + kProcessorCacheLineSize);
- ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
- positions_ + positions_size);
- consumer_pos_->dequeue_chunk_pos = buffer_;
- // The distance ensures that producer and consumer never step on
- // each other's chunks and helps eviction of produced data from
- // the CPU cache (having that chunk size is bigger than the cache.)
- const size_t producer_consumer_distance = (2 * chunk_size_);
- consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance;
- consumer_pos_->dequeue_pos = NULL;
-}
-
-
-SamplingCircularQueue::~SamplingCircularQueue() {
- DeleteArray(positions_);
- DeleteArray(buffer_);
-}
-
-
-void* SamplingCircularQueue::StartDequeue() {
- if (consumer_pos_->dequeue_pos != NULL) {
- return consumer_pos_->dequeue_pos;
- } else {
- if (Acquire_Load(consumer_pos_->dequeue_chunk_poll_pos) != kClear) {
- // Skip marker.
- consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos + 1;
- consumer_pos_->dequeue_end_pos =
- consumer_pos_->dequeue_chunk_pos + chunk_size_;
- return consumer_pos_->dequeue_pos;
- } else {
- return NULL;
- }
- }
-}
-
-
-void SamplingCircularQueue::FinishDequeue() {
- consumer_pos_->dequeue_pos += record_size_;
- if (consumer_pos_->dequeue_pos < consumer_pos_->dequeue_end_pos) return;
- // Move to next chunk.
- consumer_pos_->dequeue_pos = NULL;
- *consumer_pos_->dequeue_chunk_pos = kClear;
- consumer_pos_->dequeue_chunk_pos += chunk_size_;
- WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_pos);
- consumer_pos_->dequeue_chunk_poll_pos += chunk_size_;
- WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_poll_pos);
-}
-
-
-void SamplingCircularQueue::FlushResidualRecords() {
- // Eliminate producer / consumer distance.
- consumer_pos_->dequeue_chunk_poll_pos = consumer_pos_->dequeue_chunk_pos;
-}
-
-
-} } // namespace v8::internal
diff --git a/chromium/v8/src/circular-queue.h b/chromium/v8/src/circular-queue.h
index 4ad4f4b5505..94bc89e7dfb 100644
--- a/chromium/v8/src/circular-queue.h
+++ b/chromium/v8/src/circular-queue.h
@@ -28,6 +28,8 @@
#ifndef V8_CIRCULAR_QUEUE_H_
#define V8_CIRCULAR_QUEUE_H_
+#include "v8globals.h"
+
namespace v8 {
namespace internal {
@@ -35,67 +37,49 @@ namespace internal {
// Lock-free cache-friendly sampling circular queue for large
// records. Intended for fast transfer of large records between a
// single producer and a single consumer. If the queue is full,
-// previous unread records are overwritten. The queue is designed with
+// StartEnqueue will return NULL. The queue is designed with
// a goal in mind to evade cache lines thrashing by preventing
// simultaneous reads and writes to adjanced memory locations.
-//
-// IMPORTANT: as a producer never checks for chunks cleanness, it is
-// possible that it can catch up and overwrite a chunk that a consumer
-// is currently reading, resulting in a corrupt record being read.
+template<typename T, unsigned Length>
class SamplingCircularQueue {
public:
// Executed on the application thread.
- SamplingCircularQueue(size_t record_size_in_bytes,
- size_t desired_chunk_size_in_bytes,
- unsigned buffer_size_in_chunks);
+ SamplingCircularQueue();
~SamplingCircularQueue();
- // Enqueue returns a pointer to a memory location for storing the next
- // record.
- INLINE(void* Enqueue());
+ // StartEnqueue returns a pointer to a memory location for storing the next
+ // record or NULL if all entries are full at the moment.
+ T* StartEnqueue();
+ // Notifies the queue that the producer has complete writing data into the
+ // memory returned by StartEnqueue and it can be passed to the consumer.
+ void FinishEnqueue();
// Executed on the consumer (analyzer) thread.
- // StartDequeue returns a pointer to a memory location for retrieving
- // the next record. After the record had been read by a consumer,
- // FinishDequeue must be called. Until that moment, subsequent calls
- // to StartDequeue will return the same pointer.
- void* StartDequeue();
- void FinishDequeue();
- // Due to a presence of slipping between the producer and the consumer,
- // the queue must be notified whether producing has been finished in order
- // to process remaining records from the buffer.
- void FlushResidualRecords();
-
- typedef AtomicWord Cell;
+ // Retrieves, but does not remove, the head of this queue, returning NULL
+ // if this queue is empty. After the record had been read by a consumer,
+ // Remove must be called.
+ T* Peek();
+ void Remove();
private:
- // Reserved values for the chunk marker (first Cell in each chunk).
+ // Reserved values for the entry marker.
enum {
- kClear, // Marks clean (processed) chunks.
- kEnqueueStarted // Marks chunks where enqueue started.
+ kEmpty, // Marks clean (processed) entries.
+ kFull // Marks entries already filled by the producer but not yet
+ // completely processed by the consumer.
};
- struct ProducerPosition {
- Cell* next_chunk_pos;
- Cell* enqueue_pos;
- };
- struct ConsumerPosition {
- Cell* dequeue_chunk_pos;
- Cell* dequeue_chunk_poll_pos;
- Cell* dequeue_pos;
- Cell* dequeue_end_pos;
+ struct V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry {
+ Entry() : marker(kEmpty) {}
+ T record;
+ Atomic32 marker;
};
- INLINE(void WrapPositionIfNeeded(Cell** pos));
+ Entry* Next(Entry* entry);
- const size_t record_size_;
- const size_t chunk_size_in_bytes_;
- const size_t chunk_size_;
- const size_t buffer_size_;
- Cell* buffer_;
- byte* positions_;
- ProducerPosition* producer_pos_;
- ConsumerPosition* consumer_pos_;
+ Entry buffer_[Length];
+ V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry* enqueue_pos_;
+ V8_ALIGNED(PROCESSOR_CACHE_LINE_SIZE) Entry* dequeue_pos_;
DISALLOW_COPY_AND_ASSIGN(SamplingCircularQueue);
};
diff --git a/chromium/v8/src/code-stubs-hydrogen.cc b/chromium/v8/src/code-stubs-hydrogen.cc
index 9ca6a47f2ee..9130a731594 100644
--- a/chromium/v8/src/code-stubs-hydrogen.cc
+++ b/chromium/v8/src/code-stubs-hydrogen.cc
@@ -41,13 +41,13 @@ static LChunk* OptimizeGraph(HGraph* graph) {
DisallowHandleDereference no_deref;
ASSERT(graph != NULL);
- SmartArrayPointer<char> bailout_reason;
+ BailoutReason bailout_reason = kNoReason;
if (!graph->Optimize(&bailout_reason)) {
- FATAL(bailout_reason.is_empty() ? "unknown" : *bailout_reason);
+ FATAL(GetBailoutReason(bailout_reason));
}
LChunk* chunk = LChunk::NewChunk(graph);
if (chunk == NULL) {
- FATAL(graph->info()->bailout_reason());
+ FATAL(GetBailoutReason(graph->info()->bailout_reason()));
}
return chunk;
}
@@ -92,7 +92,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
}
~ArrayContextChecker() {
- checker_.ElseDeopt();
+ checker_.ElseDeopt("Array constructor called from different context");
checker_.End();
}
private:
@@ -112,6 +112,13 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* BuildInternalArrayConstructor(ElementsKind kind,
ArgumentClass argument_class);
+ void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
+ HValue* code_object);
+ void BuildInstallCode(HValue* js_function, HValue* shared_info);
+ void BuildInstallFromOptimizedCodeMap(HValue* js_function,
+ HValue* shared_info,
+ HValue* native_context);
+
private:
HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
@@ -210,8 +217,8 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
template <class Stub>
class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
public:
- explicit CodeStubGraphBuilder(Stub* stub)
- : CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
+ explicit CodeStubGraphBuilder(Isolate* isolate, Stub* stub)
+ : CodeStubGraphBuilderBase(isolate, stub) {}
protected:
virtual HValue* BuildCodeStub() {
@@ -233,7 +240,7 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
IfBuilder builder(this);
builder.IfNot<HCompareObjectEqAndBranch, HValue*>(undefined, undefined);
builder.Then();
- builder.ElseDeopt();
+ builder.ElseDeopt("Forced deopt to runtime");
return undefined;
}
@@ -278,8 +285,7 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
template <class Stub>
-static Handle<Code> DoGenerateCode(Stub* stub) {
- Isolate* isolate = Isolate::Current();
+static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) {
CodeStub::Major major_key =
static_cast<HydrogenCodeStub*>(stub)->MajorKey();
CodeStubInterfaceDescriptor* descriptor =
@@ -295,7 +301,7 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
ASSERT(descriptor->stack_parameter_count_ == NULL);
return stub->GenerateLightweightMissCode(isolate);
}
- CodeStubGraphBuilder<Stub> builder(stub);
+ CodeStubGraphBuilder<Stub> builder(isolate, stub);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen();
}
@@ -327,8 +333,8 @@ HValue* CodeStubGraphBuilder<ToNumberStub>::BuildCodeStub() {
}
-Handle<Code> ToNumberStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> ToNumberStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -352,7 +358,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
- HValue* elements = AddLoadElements(boilerplate, NULL);
+ HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
@@ -387,15 +393,15 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
length));
}
- checker.ElseDeopt();
+ checker.ElseDeopt("Uninitialized boilerplate literals");
checker.End();
return environment()->Pop();
}
-Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> FastCloneShallowArrayStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -434,15 +440,15 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
}
environment()->Push(object);
- checker.ElseDeopt();
+ checker.ElseDeopt("Uninitialized boilerplate in fast clone");
checker.End();
return environment()->Pop();
}
-Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> FastCloneShallowObjectStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -487,23 +493,23 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
}
-Handle<Code> CreateAllocationSiteStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> CreateAllocationSiteStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
template <>
HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
- GetParameter(0), GetParameter(1), NULL, NULL,
+ GetParameter(0), GetParameter(1), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
false, NEVER_RETURN_HOLE, STANDARD_STORE);
return load;
}
-Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> KeyedLoadFastElementStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -513,12 +519,12 @@ HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
- return AddInstruction(BuildLoadNamedField(GetParameter(0), access, NULL));
+ return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
}
-Handle<Code> LoadFieldStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> LoadFieldStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -528,19 +534,19 @@ HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep);
- return AddInstruction(BuildLoadNamedField(GetParameter(0), access, NULL));
+ return AddInstruction(BuildLoadNamedField(GetParameter(0), access));
}
-Handle<Code> KeyedLoadFieldStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> KeyedLoadFieldStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
template <>
HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
- GetParameter(0), GetParameter(1), GetParameter(2), NULL,
+ GetParameter(0), GetParameter(1), GetParameter(2),
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
true, NEVER_RETURN_HOLE, casted_stub()->store_mode());
@@ -548,8 +554,8 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
}
-Handle<Code> KeyedStoreFastElementStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> KeyedStoreFastElementStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -567,8 +573,8 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
}
-Handle<Code> TransitionElementsKindStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> TransitionElementsKindStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
@@ -702,8 +708,8 @@ HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
}
-Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -717,8 +723,9 @@ HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
}
-Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode(
+ Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -731,8 +738,8 @@ HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
}
-Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -744,8 +751,9 @@ HValue* CodeStubGraphBuilder<InternalArrayNoArgumentConstructorStub>::
}
-Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode(
+ Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -757,8 +765,9 @@ HValue* CodeStubGraphBuilder<InternalArraySingleArgumentConstructorStub>::
}
-Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode(
+ Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -770,8 +779,9 @@ HValue* CodeStubGraphBuilder<InternalArrayNArgumentsConstructorStub>::
}
-Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode(
+ Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -796,46 +806,8 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
}
-Handle<Code> CompareNilICStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<UnaryOpStub>::BuildCodeInitializedStub() {
- UnaryOpStub* stub = casted_stub();
- Handle<Type> type = stub->GetType(graph()->isolate());
- HValue* input = GetParameter(0);
-
- // Prevent unwanted HChange being inserted to ensure that the stub
- // deopts on newly encountered types.
- if (!type->Maybe(Type::Double())) {
- input = Add<HForceRepresentation>(input, Representation::Smi());
- }
-
- if (!type->Is(Type::Number())) {
- // If we expect to see other things than Numbers, we will create a generic
- // stub, which handles all numbers and calls into the runtime for the rest.
- IfBuilder if_number(this);
- if_number.If<HIsNumberAndBranch>(input);
- if_number.Then();
- HInstruction* res = BuildUnaryMathOp(input, type, stub->operation());
- if_number.Return(AddInstruction(res));
- if_number.Else();
- HValue* function = AddLoadJSBuiltin(stub->ToJSBuiltin());
- Add<HPushArgument>(GetParameter(0));
- HValue* result = Add<HInvokeFunction>(function, 1);
- if_number.Return(result);
- if_number.End();
- return graph()->GetConstantUndefined();
- }
-
- return AddInstruction(BuildUnaryMathOp(input, type, stub->operation()));
-}
-
-
-Handle<Code> UnaryOpStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> CompareNilICStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -853,8 +825,8 @@ HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
}
-Handle<Code> ToBooleanStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> ToBooleanStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -882,7 +854,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
IfBuilder builder(this);
builder.If<HCompareObjectEqAndBranch>(cell_contents, value);
builder.Then();
- builder.ElseDeopt();
+ builder.ElseDeopt("Unexpected cell contents in constant global store");
builder.End();
} else {
// Load the payload of the global parameter cell. A hole indicates that the
@@ -892,7 +864,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
HValue* hole_value = Add<HConstant>(hole);
builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
builder.Then();
- builder.Deopt();
+ builder.Deopt("Unexpected cell contents in global store");
builder.Else();
Add<HStoreNamedField>(cell, access, value);
builder.End();
@@ -902,8 +874,8 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
}
-Handle<Code> StoreGlobalStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> StoreGlobalStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
@@ -916,7 +888,7 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
- Add<HDeoptimize>(Deoptimizer::EAGER);
+ Add<HDeoptimize>("Tracing elements transitions", Deoptimizer::EAGER);
} else {
info()->MarkAsSavesCallerDoubles();
@@ -925,19 +897,207 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
casted_stub()->to_kind(),
casted_stub()->is_jsarray());
- BuildUncheckedMonomorphicElementAccess(object, key, value, NULL,
- casted_stub()->is_jsarray(),
- casted_stub()->to_kind(),
- true, ALLOW_RETURN_HOLE,
- casted_stub()->store_mode());
+ BuildUncheckedMonomorphicElementAccess(object, key, value,
+ casted_stub()->is_jsarray(),
+ casted_stub()->to_kind(),
+ true, ALLOW_RETURN_HOLE,
+ casted_stub()->store_mode());
}
return value;
}
-Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
- return DoGenerateCode(this);
+Handle<Code> ElementsTransitionAndStoreStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
+ HValue* js_function,
+ HValue* native_context,
+ HValue* code_object) {
+ Counters* counters = isolate()->counters();
+ AddIncrementCounter(counters->fast_new_closure_install_optimized(),
+ context());
+
+ // TODO(fschneider): Idea: store proper code pointers in the optimized code
+ // map and either unmangle them on marking or do nothing as the whole map is
+ // discarded on major GC anyway.
+ Add<HStoreCodeEntry>(js_function, code_object);
+
+ // Now link a function into a list of optimized functions.
+ HValue* optimized_functions_list = Add<HLoadNamedField>(native_context,
+ HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
+ Add<HStoreNamedField>(js_function,
+ HObjectAccess::ForNextFunctionLinkPointer(),
+ optimized_functions_list);
+
+ // This store is the only one that should have a write barrier.
+ Add<HStoreNamedField>(native_context,
+ HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
+ js_function);
+}
+
+
+void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
+ HValue* shared_info) {
+ Add<HStoreNamedField>(js_function,
+ HObjectAccess::ForNextFunctionLinkPointer(),
+ graph()->GetConstantUndefined());
+ HValue* code_object = Add<HLoadNamedField>(shared_info,
+ HObjectAccess::ForCodeOffset());
+ Add<HStoreCodeEntry>(js_function, code_object);
+}
+
+
+void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
+ HValue* js_function,
+ HValue* shared_info,
+ HValue* native_context) {
+ Counters* counters = isolate()->counters();
+ IfBuilder is_optimized(this);
+ HInstruction* optimized_map = Add<HLoadNamedField>(shared_info,
+ HObjectAccess::ForOptimizedCodeMap());
+ HValue* null_constant = Add<HConstant>(0);
+ is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
+ is_optimized.Then();
+ {
+ BuildInstallCode(js_function, shared_info);
+ }
+ is_optimized.Else();
+ {
+ AddIncrementCounter(counters->fast_new_closure_try_optimized(), context());
+ // optimized_map points to fixed array of 3-element entries
+ // (native context, optimized code, literals).
+ // Map must never be empty, so check the first elements.
+ Label install_optimized;
+ HValue* first_context_slot = Add<HLoadNamedField>(optimized_map,
+ HObjectAccess::ForFirstContextSlot());
+ IfBuilder already_in(this);
+ already_in.If<HCompareObjectEqAndBranch>(native_context,
+ first_context_slot);
+ already_in.Then();
+ {
+ HValue* code_object = Add<HLoadNamedField>(optimized_map,
+ HObjectAccess::ForFirstCodeSlot());
+ BuildInstallOptimizedCode(js_function, native_context, code_object);
+ }
+ already_in.Else();
+ {
+ HValue* shared_function_entry_length =
+ Add<HConstant>(SharedFunctionInfo::kEntryLength);
+ LoopBuilder loop_builder(this,
+ context(),
+ LoopBuilder::kPostDecrement,
+ shared_function_entry_length);
+ HValue* array_length = Add<HLoadNamedField>(optimized_map,
+ HObjectAccess::ForFixedArrayLength());
+ HValue* key = loop_builder.BeginBody(array_length,
+ graph()->GetConstant0(),
+ Token::GT);
+ {
+ // Iterate through the rest of map backwards.
+ // Do not double check first entry.
+ HValue* second_entry_index =
+ Add<HConstant>(SharedFunctionInfo::kSecondEntryIndex);
+ IfBuilder restore_check(this);
+ restore_check.If<HCompareNumericAndBranch>(key, second_entry_index,
+ Token::EQ);
+ restore_check.Then();
+ {
+ // Store the unoptimized code
+ BuildInstallCode(js_function, shared_info);
+ loop_builder.Break();
+ }
+ restore_check.Else();
+ {
+ HValue* keyed_minus = AddInstruction(HSub::New(zone(), context(), key,
+ shared_function_entry_length));
+ HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map,
+ keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ IfBuilder done_check(this);
+ done_check.If<HCompareObjectEqAndBranch>(native_context,
+ keyed_lookup);
+ done_check.Then();
+ {
+ // Hit: fetch the optimized code.
+ HValue* keyed_plus = AddInstruction(HAdd::New(zone(), context(),
+ keyed_minus, graph()->GetConstant1()));
+ HValue* code_object = Add<HLoadKeyed>(optimized_map,
+ keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+ BuildInstallOptimizedCode(js_function, native_context, code_object);
+
+ // Fall out of the loop
+ loop_builder.Break();
+ }
+ done_check.Else();
+ done_check.End();
+ }
+ restore_check.End();
+ }
+ loop_builder.EndBody();
+ }
+ already_in.End();
+ }
+ is_optimized.End();
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
+ Counters* counters = isolate()->counters();
+ Factory* factory = isolate()->factory();
+ HInstruction* empty_fixed_array =
+ Add<HConstant>(factory->empty_fixed_array());
+ HValue* shared_info = GetParameter(0);
+
+ // Create a new closure from the given function info in new space
+ HValue* size = Add<HConstant>(JSFunction::kSize);
+ HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
+ NOT_TENURED, JS_FUNCTION_TYPE);
+ AddIncrementCounter(counters->fast_new_closure_total(), context());
+
+ int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
+ casted_stub()->is_generator());
+
+ // Compute the function map in the current native context and set that
+ // as the map of the allocated object.
+ HInstruction* native_context = BuildGetNativeContext();
+ HInstruction* map_slot_value = Add<HLoadNamedField>(native_context,
+ HObjectAccess::ForContextSlot(map_index));
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForMap(), map_slot_value);
+
+ // Initialize the rest of the function.
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForPropertiesPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForElementsPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForPrototypeOrInitialMap(),
+ graph()->GetConstantHole());
+ Add<HStoreNamedField>(js_function,
+ HObjectAccess::ForSharedFunctionInfoPointer(),
+ shared_info);
+ Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
+ context());
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ // But first check if there is an optimized version for our context.
+ if (FLAG_cache_optimized_code) {
+ BuildInstallFromOptimizedCodeMap(js_function, shared_info, native_context);
+ } else {
+ BuildInstallCode(js_function, shared_info);
+ }
+
+ return js_function;
+}
+
+
+Handle<Code> FastNewClosureStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
}
diff --git a/chromium/v8/src/code-stubs.cc b/chromium/v8/src/code-stubs.cc
index 5f6616ea07a..ace4af42a9e 100644
--- a/chromium/v8/src/code-stubs.cc
+++ b/chromium/v8/src/code-stubs.cc
@@ -46,7 +46,7 @@ CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL),
deoptimization_handler_(NULL),
- miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()),
+ miss_handler_(),
has_miss_handler_(false) { }
@@ -93,8 +93,7 @@ Handle<Code> CodeStub::GetCodeCopyFromTemplate(Isolate* isolate) {
}
-Handle<Code> PlatformCodeStub::GenerateCode() {
- Isolate* isolate = Isolate::Current();
+Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
Factory* factory = isolate->factory();
// Generate the new code.
@@ -137,14 +136,14 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) {
if (UseSpecialCache()
? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code, isolate)) {
- ASSERT(IsPregenerated() == code->is_pregenerated());
+ ASSERT(IsPregenerated(isolate) == code->is_pregenerated());
return Handle<Code>(code);
}
{
HandleScope scope(isolate);
- Handle<Code> new_object = GenerateCode();
+ Handle<Code> new_object = GenerateCode(isolate);
new_object->set_major_key(MajorKey());
FinishCode(new_object);
RecordCodeGeneration(*new_object, isolate);
@@ -204,71 +203,6 @@ void CodeStub::PrintName(StringStream* stream) {
}
-Builtins::JavaScript UnaryOpStub::ToJSBuiltin() {
- switch (operation_) {
- default:
- UNREACHABLE();
- case Token::SUB:
- return Builtins::UNARY_MINUS;
- case Token::BIT_NOT:
- return Builtins::BIT_NOT;
- }
-}
-
-
-Handle<JSFunction> UnaryOpStub::ToJSFunction(Isolate* isolate) {
- Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
- Object* builtin = builtins->javascript_builtin(ToJSBuiltin());
- return Handle<JSFunction>(JSFunction::cast(builtin), isolate);
-}
-
-
-MaybeObject* UnaryOpStub::Result(Handle<Object> object, Isolate* isolate) {
- Handle<JSFunction> builtin_function = ToJSFunction(isolate);
- bool caught_exception;
- Handle<Object> result = Execution::Call(builtin_function, object,
- 0, NULL, &caught_exception);
- if (caught_exception) {
- return Failure::Exception();
- }
- return *result;
-}
-
-
-void UnaryOpStub::UpdateStatus(Handle<Object> object) {
- State old_state(state_);
- if (object->IsSmi()) {
- state_.Add(SMI);
- if (operation_ == Token::SUB && *object == 0) {
- // The result (-0) has to be represented as double.
- state_.Add(HEAP_NUMBER);
- }
- } else if (object->IsHeapNumber()) {
- state_.Add(HEAP_NUMBER);
- } else {
- state_.Add(GENERIC);
- }
- TraceTransition(old_state, state_);
-}
-
-
-Handle<Type> UnaryOpStub::GetType(Isolate* isolate) {
- if (state_.Contains(GENERIC)) {
- return handle(Type::Any(), isolate);
- }
- Handle<Type> type = handle(Type::None(), isolate);
- if (state_.Contains(SMI)) {
- type = handle(
- Type::Union(type, handle(Type::Smi(), isolate)), isolate);
- }
- if (state_.Contains(HEAP_NUMBER)) {
- type = handle(
- Type::Union(type, handle(Type::Double(), isolate)), isolate);
- }
- return type;
-}
-
-
void BinaryOpStub::Generate(MacroAssembler* masm) {
// Explicitly allow generation of nested stubs. It is safe here because
// generation code does not use any raw pointers.
@@ -354,29 +288,6 @@ void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
#undef __
-void UnaryOpStub::PrintBaseName(StringStream* stream) {
- CodeStub::PrintBaseName(stream);
- if (operation_ == Token::SUB) stream->Add("Minus");
- if (operation_ == Token::BIT_NOT) stream->Add("Not");
-}
-
-
-void UnaryOpStub::PrintState(StringStream* stream) {
- state_.Print(stream);
-}
-
-
-void UnaryOpStub::State::Print(StringStream* stream) const {
- stream->Add("(");
- SimpleListPrinter printer(stream);
- if (IsEmpty()) printer.Add("None");
- if (Contains(GENERIC)) printer.Add("Generic");
- if (Contains(HEAP_NUMBER)) printer.Add("HeapNumber");
- if (Contains(SMI)) printer.Add("Smi");
- stream->Add(")");
-}
-
-
void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
@@ -557,6 +468,9 @@ void CompareNilICStub::UpdateStatus(Handle<Object> object) {
template<class StateType>
void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
+ // Note: Although a no-op transition is semantically OK, it is hinting at a
+ // bug somewhere in our state transition machinery.
+ ASSERT(from != to);
#ifdef DEBUG
if (!FLAG_trace_ic) return;
char buffer[100];
@@ -681,19 +595,9 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS: {
- KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
- is_js_array_,
- elements_kind_,
- store_mode_);
- }
- break;
+ case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
- is_js_array_,
- store_mode_);
- break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
@@ -703,7 +607,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS:
- KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
+ UNREACHABLE();
break;
case DICTIONARY_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
@@ -827,8 +731,9 @@ void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
- intptr_t stack_pointer) {
- FunctionEntryHook entry_hook = Isolate::Current()->function_entry_hook();
+ intptr_t stack_pointer,
+ Isolate* isolate) {
+ FunctionEntryHook entry_hook = isolate->function_entry_hook();
ASSERT(entry_hook != NULL);
entry_hook(function, stack_pointer);
}
@@ -854,6 +759,12 @@ void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
}
+void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
+ FastNewClosureStub stub(STRICT_MODE, false);
+ InstallDescriptor(isolate, &stub);
+}
+
+
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
: argument_count_(ANY) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
diff --git a/chromium/v8/src/code-stubs.h b/chromium/v8/src/code-stubs.h
index 84d9b023b3a..946eb76962d 100644
--- a/chromium/v8/src/code-stubs.h
+++ b/chromium/v8/src/code-stubs.h
@@ -40,7 +40,6 @@ namespace internal {
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
V(CallFunction) \
V(CallConstruct) \
- V(UnaryOp) \
V(BinaryOp) \
V(StringAdd) \
V(SubString) \
@@ -103,7 +102,6 @@ namespace internal {
V(GetProperty) \
V(SetProperty) \
V(InvokeBuiltin) \
- V(RegExpCEntry) \
V(DirectCEntry)
#else
#define CODE_STUB_LIST_ARM(V)
@@ -160,14 +158,14 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
bool CompilingCallsToThisStubIsGCSafe(Isolate* isolate) {
- bool is_pregenerated = IsPregenerated();
+ bool is_pregenerated = IsPregenerated(isolate);
Code* code = NULL;
CHECK(!is_pregenerated || FindCodeInCache(&code, isolate));
return is_pregenerated;
}
// See comment above, where Instanceof is defined.
- virtual bool IsPregenerated() { return false; }
+ virtual bool IsPregenerated(Isolate* isolate) { return false; }
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void GenerateFPStubs(Isolate* isolate);
@@ -206,7 +204,7 @@ class CodeStub BASE_EMBEDDED {
static bool CanUseFPRegisters();
// Generates the assembler code for the stub.
- virtual Handle<Code> GenerateCode() = 0;
+ virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
// Returns whether the code generated for this stub needs to be allocated as
@@ -264,7 +262,7 @@ class CodeStub BASE_EMBEDDED {
class PlatformCodeStub : public CodeStub {
public:
// Retrieve the code for the stub. Generate the code if needed.
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
@@ -354,7 +352,7 @@ class HydrogenCodeStub : public CodeStub {
CodeStubInterfaceDescriptor* descriptor) = 0;
// Retrieve the code for the stub. Generate the code if needed.
- virtual Handle<Code> GenerateCode() = 0;
+ virtual Handle<Code> GenerateCode(Isolate* isolate) = 0;
virtual int NotMissMinorKey() = 0;
@@ -450,35 +448,11 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
};
-class StackCheckStub : public PlatformCodeStub {
- public:
- StackCheckStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return StackCheck; }
- int MinorKey() { return 0; }
-};
-
-
-class InterruptStub : public PlatformCodeStub {
- public:
- InterruptStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return Interrupt; }
- int MinorKey() { return 0; }
-};
-
-
class ToNumberStub: public HydrogenCodeStub {
public:
ToNumberStub() { }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -490,20 +464,29 @@ class ToNumberStub: public HydrogenCodeStub {
};
-class FastNewClosureStub : public PlatformCodeStub {
+class FastNewClosureStub : public HydrogenCodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
: language_mode_(language_mode),
is_generator_(is_generator) { }
- void Generate(MacroAssembler* masm);
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ static void InstallDescriptors(Isolate* isolate);
+
+ LanguageMode language_mode() const { return language_mode_; }
+ bool is_generator() const { return is_generator_; }
private:
class StrictModeBits: public BitField<bool, 0, 1> {};
class IsGeneratorBits: public BitField<bool, 1, 1> {};
Major MajorKey() { return FastNewClosure; }
- int MinorKey() {
+ int NotMissMinorKey() {
return StrictModeBits::encode(language_mode_ != CLASSIC_MODE) |
IsGeneratorBits::encode(is_generator_);
}
@@ -555,7 +538,7 @@ class StoreGlobalStub : public HydrogenCodeStub {
IsConstantBits::encode(is_constant);
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -593,73 +576,6 @@ class StoreGlobalStub : public HydrogenCodeStub {
};
-class UnaryOpStub : public HydrogenCodeStub {
- public:
- // Stub without type info available -> construct uninitialized
- explicit UnaryOpStub(Token::Value operation)
- : HydrogenCodeStub(UNINITIALIZED), operation_(operation) { }
- explicit UnaryOpStub(Code::ExtraICState ic_state) :
- state_(StateBits::decode(ic_state)),
- operation_(OperatorBits::decode(ic_state)) { }
-
- virtual void InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor);
-
- virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; }
- virtual InlineCacheState GetICState() {
- if (state_.Contains(GENERIC)) {
- return MEGAMORPHIC;
- } else if (state_.IsEmpty()) {
- return PREMONOMORPHIC;
- } else {
- return MONOMORPHIC;
- }
- }
- virtual Code::ExtraICState GetExtraICState() {
- return OperatorBits::encode(operation_) |
- StateBits::encode(state_.ToIntegral());
- }
-
- Token::Value operation() { return operation_; }
- Handle<JSFunction> ToJSFunction(Isolate* isolate);
- Builtins::JavaScript ToJSBuiltin();
-
- void UpdateStatus(Handle<Object> object);
- MaybeObject* Result(Handle<Object> object, Isolate* isolate);
- Handle<Code> GenerateCode();
- Handle<Type> GetType(Isolate* isolate);
-
- protected:
- void PrintState(StringStream* stream);
- void PrintBaseName(StringStream* stream);
-
- private:
- enum UnaryOpType {
- SMI,
- HEAP_NUMBER,
- GENERIC,
- NUMBER_OF_TYPES
- };
-
- class State : public EnumSet<UnaryOpType, byte> {
- public:
- State() : EnumSet<UnaryOpType, byte>() { }
- explicit State(byte bits) : EnumSet<UnaryOpType, byte>(bits) { }
- void Print(StringStream* stream) const;
- };
-
- class StateBits : public BitField<int, 0, NUMBER_OF_TYPES> { };
- class OperatorBits : public BitField<Token::Value, NUMBER_OF_TYPES, 8> { };
-
- State state_;
- Token::Value operation_;
-
- virtual CodeStub::Major MajorKey() { return UnaryOp; }
- virtual int NotMissMinorKey() { return GetExtraICState(); }
-};
-
-
class FastCloneShallowArrayStub : public HydrogenCodeStub {
public:
// Maximum length of copied elements array.
@@ -704,7 +620,7 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
return LAST_ELEMENTS_KIND;
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -744,7 +660,7 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
int length() const { return length_; }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -764,9 +680,9 @@ class CreateAllocationSiteStub : public HydrogenCodeStub {
public:
explicit CreateAllocationSiteStub() { }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateAheadOfTime(Isolate* isolate);
@@ -820,6 +736,13 @@ class InstanceofStub: public PlatformCodeStub {
};
+enum AllocationSiteOverrideMode {
+ DONT_OVERRIDE,
+ DISABLE_ALLOCATION_SITES,
+ LAST_ALLOCATION_SITE_OVERRIDE_MODE = DISABLE_ALLOCATION_SITES
+};
+
+
class ArrayConstructorStub: public PlatformCodeStub {
public:
enum ArgumentCountKey { ANY, NONE, ONE, MORE_THAN_ONE };
@@ -829,6 +752,9 @@ class ArrayConstructorStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
private:
+ void GenerateDispatchToArrayStub(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode);
+
virtual CodeStub::Major MajorKey() { return ArrayConstructor; }
virtual int MinorKey() { return argument_count_; }
@@ -981,7 +907,7 @@ class LoadFieldStub: public HandlerStub {
Initialize(Code::LOAD_IC, inobject, index, representation);
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -1050,7 +976,7 @@ class KeyedLoadFieldStub: public LoadFieldStub {
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
private:
virtual CodeStub::Major MajorKey() { return KeyedLoadField; }
@@ -1321,7 +1247,7 @@ class CompareNilICStub : public HydrogenCodeStub {
virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; }
- Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual Code::ExtraICState GetExtraICState() {
return NilValueField::encode(nil_value_) |
@@ -1389,7 +1315,7 @@ class CEntryStub : public PlatformCodeStub {
// time, so it's OK to call it from other stubs that can't cope with GC during
// their code generation. On machines that always have gp registers (x64) we
// can generate both variants ahead of time.
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateAheadOfTime(Isolate* isolate);
private:
@@ -1401,6 +1327,7 @@ class CEntryStub : public PlatformCodeStub {
bool always_allocate_scope);
// Number of pointers/values returned.
+ Isolate* isolate_;
const int result_size_;
SaveFPRegsMode save_doubles_;
@@ -1772,11 +1699,13 @@ class DoubleToIStub : public PlatformCodeStub {
DoubleToIStub(Register source,
Register destination,
int offset,
- bool is_truncating) : bit_field_(0) {
+ bool is_truncating,
+ bool skip_fastpath = false) : bit_field_(0) {
bit_field_ = SourceRegisterBits::encode(source.code_) |
DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) |
- IsTruncatingBits::encode(is_truncating);
+ IsTruncatingBits::encode(is_truncating) |
+ SkipFastPathBits::encode(skip_fastpath);
}
Register source() {
@@ -1793,12 +1722,18 @@ class DoubleToIStub : public PlatformCodeStub {
return IsTruncatingBits::decode(bit_field_);
}
+ bool skip_fastpath() {
+ return SkipFastPathBits::decode(bit_field_);
+ }
+
int offset() {
return OffsetBits::decode(bit_field_);
}
void Generate(MacroAssembler* masm);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
private:
static const int kBitsPerRegisterNumber = 6;
STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
@@ -1811,6 +1746,8 @@ class DoubleToIStub : public PlatformCodeStub {
public BitField<bool, 2 * kBitsPerRegisterNumber, 1> {}; // NOLINT
class OffsetBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
+ class SkipFastPathBits:
+ public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT
Major MajorKey() { return DoubleToI; }
int MinorKey() { return bit_field_; }
@@ -1836,7 +1773,7 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub {
return ElementsKindBits::decode(bit_field_);
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -1876,7 +1813,7 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
return StoreModeBits::decode(bit_field_);
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -1911,7 +1848,7 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
return ToKindBits::decode(bit_field_);
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -1936,13 +1873,6 @@ enum ContextCheckMode {
};
-enum AllocationSiteOverrideMode {
- DONT_OVERRIDE,
- DISABLE_ALLOCATION_SITES,
- LAST_ALLOCATION_SITE_OVERRIDE_MODE = DISABLE_ALLOCATION_SITES
-};
-
-
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
ArrayConstructorStubBase(ElementsKind kind, ContextCheckMode context_mode,
@@ -1950,7 +1880,8 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
// It only makes sense to override local allocation site behavior
// if there is a difference between the global allocation site policy
// for an ElementsKind and the desired usage of the stub.
- ASSERT(override_mode != DISABLE_ALLOCATION_SITES ||
+ ASSERT(!(FLAG_track_allocation_sites &&
+ override_mode == DISABLE_ALLOCATION_SITES) ||
AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
bit_field_ = ElementsKindBits::encode(kind) |
AllocationSiteOverrideModeBits::encode(override_mode) |
@@ -1969,7 +1900,7 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
return ContextCheckModeBits::decode(bit_field_);
}
- virtual bool IsPregenerated() {
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE {
// We only pre-generate stubs that verify correct context
return context_mode() == CONTEXT_CHECK_REQUIRED;
}
@@ -2007,7 +1938,7 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
: ArrayConstructorStubBase(kind, context_mode, override_mode) {
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -2029,7 +1960,7 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
: ArrayConstructorStubBase(kind, context_mode, override_mode) {
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -2051,7 +1982,7 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
: ArrayConstructorStubBase(kind, context_mode, override_mode) {
}
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -2070,7 +2001,7 @@ class InternalArrayConstructorStubBase : public HydrogenCodeStub {
kind_ = kind;
}
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void InstallDescriptors(Isolate* isolate);
@@ -2094,7 +2025,7 @@ class InternalArrayNoArgumentConstructorStub : public
explicit InternalArrayNoArgumentConstructorStub(ElementsKind kind)
: InternalArrayConstructorStubBase(kind) { }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -2113,7 +2044,7 @@ class InternalArraySingleArgumentConstructorStub : public
explicit InternalArraySingleArgumentConstructorStub(ElementsKind kind)
: InternalArrayConstructorStubBase(kind) { }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -2132,7 +2063,7 @@ class InternalArrayNArgumentsConstructorStub : public
explicit InternalArrayNArgumentsConstructorStub(ElementsKind kind)
: InternalArrayConstructorStubBase(kind) { }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -2221,7 +2152,7 @@ class ToBooleanStub: public HydrogenCodeStub {
bool UpdateStatus(Handle<Object> object);
Types GetTypes() { return types_; }
- virtual Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
@@ -2281,7 +2212,7 @@ class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
bool is_jsarray() const { return is_jsarray_; }
KeyedAccessStoreMode store_mode() const { return store_mode_; }
- Handle<Code> GenerateCode();
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
void InitializeInterfaceDescriptor(
Isolate* isolate,
@@ -2334,7 +2265,7 @@ class StubFailureTrampolineStub : public PlatformCodeStub {
explicit StubFailureTrampolineStub(StubFunctionMode function_mode)
: fp_registers_(CanUseFPRegisters()), function_mode_(function_mode) {}
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateAheadOfTime(Isolate* isolate);
@@ -2369,7 +2300,8 @@ class ProfileEntryHookStub : public PlatformCodeStub {
private:
static void EntryHookTrampoline(intptr_t function,
- intptr_t stack_pointer);
+ intptr_t stack_pointer,
+ Isolate* isolate);
Major MajorKey() { return ProfileEntryHook; }
int MinorKey() { return 0; }
diff --git a/chromium/v8/src/codegen.cc b/chromium/v8/src/codegen.cc
index 2031b321a4b..d33c7f06bd4 100644
--- a/chromium/v8/src/codegen.cc
+++ b/chromium/v8/src/codegen.cc
@@ -89,12 +89,12 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
#ifdef DEBUG
if (!info->IsStub() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter().PrintProgram(info->function()));
+ PrettyPrinter(info->isolate()).PrintProgram(info->function()));
}
if (!info->IsStub() && print_ast) {
PrintF("--- AST ---\n%s\n",
- AstPrinter().PrintProgram(info->function()));
+ AstPrinter(info->isolate()).PrintProgram(info->function()));
}
#endif // DEBUG
}
@@ -114,11 +114,9 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm->CodeObject(),
false, is_crankshafted);
- if (!code.is_null()) {
- isolate->counters()->total_compiled_code_size()->Increment(
- code->instruction_size());
- code->set_prologue_offset(info->prologue_offset());
- }
+ isolate->counters()->total_compiled_code_size()->Increment(
+ code->instruction_size());
+ code->set_prologue_offset(info->prologue_offset());
return code;
}
@@ -126,7 +124,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER
AllowDeferredHandleDereference allow_deference_for_print_code;
- bool print_code = Isolate::Current()->bootstrapper()->IsActive()
+ bool print_code = info->isolate()->bootstrapper()->IsActive()
? FLAG_print_builtin_code
: (FLAG_print_code ||
(info->IsStub() && FLAG_print_code_stubs) ||
@@ -173,9 +171,8 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
}
-bool CodeGenerator::ShouldGenerateLog(Expression* type) {
+bool CodeGenerator::ShouldGenerateLog(Isolate* isolate, Expression* type) {
ASSERT(type != NULL);
- Isolate* isolate = Isolate::Current();
if (!isolate->logger()->is_logging() &&
!isolate->cpu_profiler()->is_profiling()) {
return false;
diff --git a/chromium/v8/src/compiler.cc b/chromium/v8/src/compiler.cc
index 3c51baa30e9..1aa3776bde9 100644
--- a/chromium/v8/src/compiler.cc
+++ b/chromium/v8/src/compiler.cc
@@ -58,7 +58,8 @@ CompilationInfo::CompilationInfo(Handle<Script> script,
Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE)),
script_(script),
- osr_ast_id_(BailoutId::None()) {
+ osr_ast_id_(BailoutId::None()),
+ osr_pc_offset_(0) {
Initialize(script->GetIsolate(), BASE, zone);
}
@@ -68,7 +69,8 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
: flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
- osr_ast_id_(BailoutId::None()) {
+ osr_ast_id_(BailoutId::None()),
+ osr_pc_offset_(0) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -80,7 +82,8 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
context_(closure->context()),
- osr_ast_id_(BailoutId::None()) {
+ osr_ast_id_(BailoutId::None()),
+ osr_pc_offset_(0) {
Initialize(script_->GetIsolate(), BASE, zone);
}
@@ -90,7 +93,8 @@ CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
- osr_ast_id_(BailoutId::None()) {
+ osr_ast_id_(BailoutId::None()),
+ osr_pc_offset_(0) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
@@ -119,7 +123,8 @@ void CompilationInfo::Initialize(Isolate* isolate,
mode_ = STUB;
return;
}
- mode_ = V8::UseCrankshaft() ? mode : NONOPT;
+ mode_ = isolate->use_crankshaft() ? mode : NONOPT;
+ abort_due_to_dependency_ = false;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
@@ -127,7 +132,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
ASSERT(language_mode() == CLASSIC_MODE);
SetLanguageMode(shared_info_->language_mode());
}
- set_bailout_reason("unknown");
+ set_bailout_reason(kUnknown);
}
@@ -225,18 +230,12 @@ bool CompilationInfo::ShouldSelfOptimize() {
return FLAG_self_optimization &&
FLAG_crankshaft &&
!function()->flags()->Contains(kDontSelfOptimize) &&
- !function()->flags()->Contains(kDontOptimize) &&
+ !function()->dont_optimize() &&
function()->scope()->AllowsLazyCompilation() &&
(shared_info().is_null() || !shared_info()->optimization_disabled());
}
-void CompilationInfo::AbortOptimization() {
- Handle<Code> code(shared_info()->code());
- SetCode(code);
-}
-
-
// Determine whether to use the full compiler for all code. If the flag
// --always-full-compiler is specified this is the case. For the virtual frame
// based compiler the full compiler is also used if a debugger is connected, as
@@ -247,7 +246,7 @@ void CompilationInfo::AbortOptimization() {
// break points has actually been set.
static bool IsDebuggerActive(Isolate* isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- return V8::UseCrankshaft() ?
+ return isolate->use_crankshaft() ?
isolate->debug()->has_break_points() :
isolate->debugger()->IsDebuggerActive();
#else
@@ -263,12 +262,14 @@ static bool AlwaysFullCompiler(Isolate* isolate) {
void OptimizingCompiler::RecordOptimizationStats() {
Handle<JSFunction> function = info()->closure();
- int opt_count = function->shared()->opt_count();
- function->shared()->set_opt_count(opt_count + 1);
- double ms_creategraph =
- static_cast<double>(time_taken_to_create_graph_) / 1000;
- double ms_optimize = static_cast<double>(time_taken_to_optimize_) / 1000;
- double ms_codegen = static_cast<double>(time_taken_to_codegen_) / 1000;
+ if (!function->IsOptimized()) {
+ // Concurrent recompilation and OSR may race. Increment only once.
+ int opt_count = function->shared()->opt_count();
+ function->shared()->set_opt_count(opt_count + 1);
+ }
+ double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
+ double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
+ double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
if (FLAG_trace_opt) {
PrintF("[optimizing ");
function->ShortPrint();
@@ -316,14 +317,13 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
- ASSERT(V8::UseCrankshaft());
+ ASSERT(isolate()->use_crankshaft());
ASSERT(info()->IsOptimizing());
ASSERT(!info()->IsCompilingForDebugging());
// We should never arrive here if there is no code object on the
// shared function object.
- Handle<Code> code(info()->shared_info()->code());
- ASSERT(code->kind() == Code::FUNCTION);
+ ASSERT(info()->shared_info()->code()->kind() == Code::FUNCTION);
// We should never arrive here if optimization has been disabled on the
// shared function info.
@@ -333,7 +333,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
if (AlwaysFullCompiler(isolate())) {
- info()->SetCode(code);
+ info()->AbortOptimization();
return SetLastStatus(BAILED_OUT);
}
@@ -342,7 +342,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
if (info()->opt_count() > kMaxOptCount) {
- info()->set_bailout_reason("optimized too many times");
+ info()->set_bailout_reason(kOptimizedTooManyTimes);
return AbortOptimization();
}
@@ -356,21 +356,21 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
Scope* scope = info()->scope();
if ((scope->num_parameters() + 1) > parameter_limit) {
- info()->set_bailout_reason("too many parameters");
+ info()->set_bailout_reason(kTooManyParameters);
return AbortOptimization();
}
const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
- if (!info()->osr_ast_id().IsNone() &&
+ if (info()->is_osr() &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
- info()->set_bailout_reason("too many parameters/locals");
+ info()->set_bailout_reason(kTooManyParametersLocals);
return AbortOptimization();
}
// Take --hydrogen-filter into account.
- if (!info()->closure()->PassesHydrogenFilter()) {
- info()->SetCode(code);
- return SetLastStatus(BAILED_OUT);
+ if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
+ info()->AbortOptimization();
+ return SetLastStatus(BAILED_OUT);
}
// Recompile the unoptimized version of the code if the current version
@@ -379,9 +379,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// performance of the hydrogen-based compiler.
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
- int64_t start_ticks = 0;
+ ElapsedTimer timer;
if (FLAG_hydrogen_stats) {
- start_ticks = OS::Ticks();
+ timer.Start();
}
CompilationInfoWithZone unoptimized(info()->shared_info());
// Note that we use the same AST that we will use for generating the
@@ -400,8 +400,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
}
if (FLAG_hydrogen_stats) {
- int64_t ticks = OS::Ticks() - start_ticks;
- isolate()->GetHStatistics()->IncrementFullCodeGen(ticks);
+ isolate()->GetHStatistics()->IncrementFullCodeGen(timer.Elapsed());
}
}
@@ -410,7 +409,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// optimizable marker in the code object and optimize anyway. This
// is safe as long as the unoptimized code has deoptimization
// support.
- ASSERT(FLAG_always_opt || code->optimizable());
+ ASSERT(FLAG_always_opt || info()->shared_info()->code()->optimizable());
ASSERT(info()->shared_info()->has_deoptimization_support());
if (FLAG_trace_hydrogen) {
@@ -446,6 +445,12 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}
}
+ if (info()->HasAbortedDueToDependencyChange()) {
+ info_->set_bailout_reason(kBailedOutDueToDependencyChange);
+ info_->AbortOptimization();
+ return SetLastStatus(BAILED_OUT);
+ }
+
return SetLastStatus(SUCCEEDED);
}
@@ -454,13 +459,14 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
+ DisallowCodeDependencyChange no_dependency_change;
ASSERT(last_status() == SUCCEEDED);
Timer t(this, &time_taken_to_optimize_);
ASSERT(graph_ != NULL);
- SmartArrayPointer<char> bailout_reason;
+ BailoutReason bailout_reason = kNoReason;
if (!graph_->Optimize(&bailout_reason)) {
- if (!bailout_reason.is_empty()) graph_builder_->Bailout(*bailout_reason);
+ if (bailout_reason == kNoReason) graph_builder_->Bailout(bailout_reason);
return SetLastStatus(BAILED_OUT);
} else {
chunk_ = LChunk::NewChunk(graph_);
@@ -474,6 +480,8 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
+ ASSERT(!info()->HasAbortedDueToDependencyChange());
+ DisallowCodeDependencyChange no_dependency_change;
{ // Scope for timer.
Timer timer(this, &time_taken_to_codegen_);
ASSERT(chunk_ != NULL);
@@ -485,18 +493,22 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
DisallowDeferredHandleDereference no_deferred_handle_deref;
Handle<Code> optimized_code = chunk_->Codegen();
if (optimized_code.is_null()) {
- info()->set_bailout_reason("code generation failed");
+ if (info()->bailout_reason() == kNoReason) {
+ info()->set_bailout_reason(kCodeGenerationFailed);
+ }
return AbortOptimization();
}
info()->SetCode(optimized_code);
}
RecordOptimizationStats();
+ // Add to the weak list of optimized code objects.
+ info()->context()->native_context()->AddOptimizedCode(*info()->code());
return SetLastStatus(SUCCEEDED);
}
static bool GenerateCode(CompilationInfo* info) {
- bool is_optimizing = V8::UseCrankshaft() &&
+ bool is_optimizing = info->isolate()->use_crankshaft() &&
!info->IsCompilingForDebugging() &&
info->IsOptimizing();
if (is_optimizing) {
@@ -716,7 +728,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
}
script->set_is_shared_cross_origin(is_shared_cross_origin);
- script->set_data(script_data.is_null() ? HEAP->undefined_value()
+ script->set_data(script_data.is_null() ? isolate->heap()->undefined_value()
: *script_data);
// Compile the function and add it to the cache.
@@ -733,8 +745,8 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
compilation_cache->PutScript(source, context, result);
}
} else {
- if (result->ic_age() != HEAP->global_ic_age()) {
- result->ResetForNewContext(HEAP->global_ic_age());
+ if (result->ic_age() != isolate->heap()->global_ic_age()) {
+ result->ResetForNewContext(isolate->heap()->global_ic_age());
}
}
@@ -780,7 +792,7 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
if (!result.is_null()) {
// Explicitly disable optimization for eval code. We're not yet prepared
// to handle eval-code in the optimizing compiler.
- result->DisableOptimization("eval");
+ result->DisableOptimization(kEval);
// If caller is strict mode, the result must be in strict mode or
// extended mode as well, but not the other way around. Consider:
@@ -796,8 +808,8 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
}
}
} else {
- if (result->ic_age() != HEAP->global_ic_age()) {
- result->ResetForNewContext(HEAP->global_ic_age());
+ if (result->ic_age() != isolate->heap()->global_ic_age()) {
+ result->ResetForNewContext(isolate->heap()->global_ic_age());
}
}
@@ -813,6 +825,7 @@ static bool InstallFullCode(CompilationInfo* info) {
// was flushed. By setting the code object last we avoid this.
Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<Code> code = info->code();
+ CHECK(code->kind() == Code::FUNCTION);
Handle<JSFunction> function = info->closure();
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info->scope(), info->zone());
@@ -830,18 +843,18 @@ static bool InstallFullCode(CompilationInfo* info) {
// Check the function has compiled code.
ASSERT(shared->is_compiled());
- shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+ shared->set_dont_optimize_reason(lit->dont_optimize_reason());
shared->set_dont_inline(lit->flags()->Contains(kDontInline));
shared->set_ast_node_count(lit->ast_node_count());
- if (V8::UseCrankshaft() &&
+ if (info->isolate()->use_crankshaft() &&
!function.is_null() &&
!shared->optimization_disabled()) {
// If we're asked to always optimize, we compile the optimized
// version of the function right away - unless the debugger is
// active as it makes no sense to compile optimized code then.
if (FLAG_always_opt &&
- !Isolate::Current()->DebuggerHasBreakPoints()) {
+ !info->isolate()->DebuggerHasBreakPoints()) {
CompilationInfoWithZone optimized(function);
optimized.SetOptimizing(BailoutId::None());
return Compiler::CompileLazy(&optimized);
@@ -871,9 +884,10 @@ static void InstallCodeCommon(CompilationInfo* info) {
static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<Code> code = info->code();
- if (FLAG_cache_optimized_code &&
- info->osr_ast_id().IsNone() &&
- code->kind() == Code::OPTIMIZED_FUNCTION) {
+ if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
+
+ // Cache non-OSR optimized code.
+ if (FLAG_cache_optimized_code && !info->is_osr()) {
Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
Handle<FixedArray> literals(function->literals());
@@ -885,9 +899,10 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
- if (FLAG_cache_optimized_code &&
- info->osr_ast_id().IsNone() &&
- info->IsOptimizing()) {
+ if (!info->IsOptimizing()) return false; // Nothing to look up.
+
+ // Lookup non-OSR optimized code.
+ if (FLAG_cache_optimized_code && !info->is_osr()) {
Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<JSFunction> function = info->closure();
ASSERT(!function.is_null());
@@ -943,12 +958,15 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
InstallCodeCommon(info);
if (info->IsOptimizing()) {
+ // Optimized code successfully created.
Handle<Code> code = info->code();
ASSERT(shared->scope_info() != ScopeInfo::Empty(isolate));
+ // TODO(titzer): Only replace the code if it was not an OSR compile.
info->closure()->ReplaceCode(*code);
InsertCodeIntoOptimizedCodeMap(info);
return true;
- } else {
+ } else if (!info->is_osr()) {
+ // Compilation failed. Replace with full code if not OSR compile.
return InstallFullCode(info);
}
}
@@ -959,36 +977,55 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
}
-void Compiler::RecompileParallel(Handle<JSFunction> closure) {
- ASSERT(closure->IsMarkedForParallelRecompilation());
+bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
+ uint32_t osr_pc_offset) {
+ bool compiling_for_osr = (osr_pc_offset != 0);
Isolate* isolate = closure->GetIsolate();
- // Here we prepare compile data for the parallel recompilation thread, but
+ // Here we prepare compile data for the concurrent recompilation thread, but
// this still happens synchronously and interrupts execution.
Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
- if (FLAG_trace_parallel_recompilation) {
- PrintF(" ** Compilation queue, will retry opting on next run.\n");
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Compilation queue full, will retry optimizing ");
+ closure->PrintName();
+ PrintF(" on next run.\n");
}
- return;
+ return false;
}
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+
+ if (compiling_for_osr) {
+ BailoutId osr_ast_id =
+ shared->code()->TranslatePcOffsetToAstId(osr_pc_offset);
+ ASSERT(!osr_ast_id.IsNone());
+ info->SetOptimizing(osr_ast_id);
+ info->set_osr_pc_offset(osr_pc_offset);
+
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - attempt to queue ");
+ closure->PrintName();
+ PrintF(" at AST id %d]\n", osr_ast_id.ToInt());
+ }
+ } else {
+ info->SetOptimizing(BailoutId::None());
+ }
+
VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
- Handle<SharedFunctionInfo> shared = info->shared_info();
int compiled_size = shared->end_position() - shared->start_position();
isolate->counters()->total_compile_size()->Increment(compiled_size);
- info->SetOptimizing(BailoutId::None());
{
CompilationHandleScope handle_scope(*info);
- if (InstallCodeFromOptimizedCodeMap(*info)) {
- return;
+ if (!compiling_for_osr && InstallCodeFromOptimizedCodeMap(*info)) {
+ return true;
}
if (Parser::Parse(*info)) {
@@ -1005,6 +1042,8 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
info.Detach();
shared->code()->set_profiler_ticks(0);
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
+ ASSERT(!isolate->has_pending_exception());
+ return true;
} else if (status == OptimizingCompiler::BAILED_OUT) {
isolate->clear_pending_exception();
InstallFullCode(*info);
@@ -1013,38 +1052,26 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
}
}
- if (shared->code()->back_edges_patched_for_osr()) {
- // At this point we either put the function on recompilation queue or
- // aborted optimization. In either case we want to continue executing
- // the unoptimized code without running into OSR. If the unoptimized
- // code has been patched for OSR, unpatch it.
- InterruptStub interrupt_stub;
- Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
- Handle<Code> replacement_code =
- isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertInterruptCode(shared->code(),
- *interrupt_code,
- *replacement_code);
- }
-
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
+ return false;
}
-void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
+Handle<Code> Compiler::InstallOptimizedCode(
+ OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
// The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
if (info->shared_info()->optimization_disabled()) {
info->AbortOptimization();
InstallFullCode(*info);
- if (FLAG_trace_parallel_recompilation) {
+ if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** aborting optimization for ");
info->closure()->PrintName();
PrintF(" as it has been disabled.\n");
}
- ASSERT(!info->closure()->IsMarkedForInstallingRecompiledCode());
- return;
+ ASSERT(!info->closure()->IsInRecompileQueue());
+ return Handle<Code>::null();
}
Isolate* isolate = info->isolate();
@@ -1055,13 +1082,13 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();
if (info->HasAbortedDueToDependencyChange()) {
- info->set_bailout_reason("bailed out due to dependent map");
+ info->set_bailout_reason(kBailedOutDueToDependencyChange);
status = optimizing_compiler->AbortOptimization();
} else if (status != OptimizingCompiler::SUCCEEDED) {
- info->set_bailout_reason("failed/bailed out last time");
+ info->set_bailout_reason(kFailedBailedOutLastTime);
status = optimizing_compiler->AbortOptimization();
} else if (isolate->DebuggerHasBreakPoints()) {
- info->set_bailout_reason("debugger is active");
+ info->set_bailout_reason(kDebuggerIsActive);
status = optimizing_compiler->AbortOptimization();
} else {
status = optimizing_compiler->GenerateAndInstallCode();
@@ -1078,19 +1105,21 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
info->closure()->context()->native_context()) == -1) {
InsertCodeIntoOptimizedCodeMap(*info);
}
- if (FLAG_trace_parallel_recompilation) {
+ if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Optimized code for ");
info->closure()->PrintName();
PrintF(" installed.\n");
}
} else {
- info->SetCode(Handle<Code>(info->shared_info()->code()));
+ info->AbortOptimization();
InstallFullCode(*info);
}
// Optimized code is finally replacing unoptimized code. Reset the latter's
// profiler ticks to prevent too soon re-opt after a deopt.
info->shared_info()->code()->set_profiler_ticks(0);
- ASSERT(!info->closure()->IsMarkedForInstallingRecompiledCode());
+ ASSERT(!info->closure()->IsInRecompileQueue());
+ return (status == OptimizingCompiler::SUCCEEDED) ? info->code()
+ : Handle<Code>::null();
}
@@ -1178,7 +1207,7 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
function_info->set_ast_node_count(lit->ast_node_count());
function_info->set_is_function(lit->is_function());
- function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
+ function_info->set_dont_optimize_reason(lit->dont_optimize_reason());
function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
function_info->set_is_generator(lit->is_generator());
@@ -1201,6 +1230,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
return;
int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
+ int column_num =
+ GetScriptColumnNumber(script, shared->start_position()) + 1;
USE(line_num);
if (script->name()->IsString()) {
PROFILE(info->isolate(),
@@ -1209,7 +1240,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
*shared,
info,
String::cast(script->name()),
- line_num));
+ line_num,
+ column_num));
} else {
PROFILE(info->isolate(),
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
@@ -1217,7 +1249,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
*shared,
info,
info->isolate()->heap()->empty_string(),
- line_num));
+ line_num,
+ column_num));
}
}
@@ -1232,7 +1265,7 @@ CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
: name_(name), info_(info), zone_(info->isolate()) {
if (FLAG_hydrogen_stats) {
info_zone_start_allocation_size_ = info->zone()->allocation_size();
- start_ticks_ = OS::Ticks();
+ timer_.Start();
}
}
@@ -1241,8 +1274,7 @@ CompilationPhase::~CompilationPhase() {
if (FLAG_hydrogen_stats) {
unsigned size = zone()->allocation_size();
size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
- int64_t ticks = OS::Ticks() - start_ticks_;
- isolate()->GetHStatistics()->SaveTiming(name_, ticks, size);
+ isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
}
}
@@ -1250,9 +1282,11 @@ CompilationPhase::~CompilationPhase() {
bool CompilationPhase::ShouldProduceTraceOutput() const {
// Trace if the appropriate trace flag is set and the phase name's first
// character is in the FLAG_trace_phase command line parameter.
- bool tracing_on = info()->IsStub() ?
- FLAG_trace_hydrogen_stubs :
- FLAG_trace_hydrogen;
+ AllowHandleDereference allow_deref;
+ bool tracing_on = info()->IsStub()
+ ? FLAG_trace_hydrogen_stubs
+ : (FLAG_trace_hydrogen &&
+ info()->closure()->PassesFilter(FLAG_trace_hydrogen_filter));
return (tracing_on &&
OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
}
diff --git a/chromium/v8/src/compiler.h b/chromium/v8/src/compiler.h
index d36e488b174..8ceb61db9ce 100644
--- a/chromium/v8/src/compiler.h
+++ b/chromium/v8/src/compiler.h
@@ -60,11 +60,11 @@ class CompilationInfo {
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
virtual ~CompilationInfo();
- Isolate* isolate() {
- ASSERT(Isolate::Current() == isolate_);
+ Isolate* isolate() const {
return isolate_;
}
Zone* zone() { return zone_; }
+ bool is_osr() const { return !osr_ast_id_.IsNone(); }
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
@@ -199,6 +199,11 @@ class CompilationInfo {
return IsCompilingForDebugging::decode(flags_);
}
+ bool ShouldTrapOnDeopt() const {
+ return (FLAG_trap_on_deopt && IsOptimizing()) ||
+ (FLAG_trap_on_stub_deopt && IsStub());
+ }
+
bool has_global_object() const {
return !closure().is_null() &&
(closure()->context()->global_object() != NULL);
@@ -230,9 +235,10 @@ class CompilationInfo {
// Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize();
- // Disable all optimization attempts of this info for the rest of the
- // current compilation pipeline.
- void AbortOptimization();
+ // Reset code to the unoptimized version when optimization is aborted.
+ void AbortOptimization() {
+ SetCode(handle(shared_info()->code()));
+ }
void set_deferred_handles(DeferredHandles* deferred_handles) {
ASSERT(deferred_handles_ == NULL);
@@ -258,8 +264,8 @@ class CompilationInfo {
SaveHandle(&script_);
}
- const char* bailout_reason() const { return bailout_reason_; }
- void set_bailout_reason(const char* reason) { bailout_reason_ = reason; }
+ BailoutReason bailout_reason() const { return bailout_reason_; }
+ void set_bailout_reason(BailoutReason reason) { bailout_reason_ = reason; }
int prologue_offset() const {
ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_);
@@ -293,11 +299,21 @@ class CompilationInfo {
}
void AbortDueToDependencyChange() {
- mode_ = DEPENDENCY_CHANGE_ABORT;
+ ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ abort_due_to_dependency_ = true;
}
bool HasAbortedDueToDependencyChange() {
- return mode_ == DEPENDENCY_CHANGE_ABORT;
+ ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ return abort_due_to_dependency_;
+ }
+
+ void set_osr_pc_offset(uint32_t pc_offset) {
+ osr_pc_offset_ = pc_offset;
+ }
+
+ bool HasSameOsrEntry(Handle<JSFunction> function, uint32_t pc_offset) {
+ return osr_pc_offset_ == pc_offset && function.is_identical_to(closure_);
}
protected:
@@ -321,14 +337,13 @@ class CompilationInfo {
BASE,
OPTIMIZE,
NONOPT,
- STUB,
- DEPENDENCY_CHANGE_ABORT
+ STUB
};
void Initialize(Isolate* isolate, Mode mode, Zone* zone);
void SetMode(Mode mode) {
- ASSERT(V8::UseCrankshaft());
+ ASSERT(isolate()->use_crankshaft());
mode_ = mode;
}
@@ -395,6 +410,12 @@ class CompilationInfo {
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
BailoutId osr_ast_id_;
+ // The pc_offset corresponding to osr_ast_id_ in unoptimized code.
+ // We can look this up in the back edge table, but cache it for quick access.
+ uint32_t osr_pc_offset_;
+
+ // Flag whether compilation needs to be aborted due to dependency change.
+ bool abort_due_to_dependency_;
// The zone from which the compilation pipeline working on this
// CompilationInfo allocates.
@@ -412,7 +433,7 @@ class CompilationInfo {
}
}
- const char* bailout_reason_;
+ BailoutReason bailout_reason_;
int prologue_offset_;
@@ -491,9 +512,6 @@ class OptimizingCompiler: public ZoneObject {
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
- time_taken_to_create_graph_(0),
- time_taken_to_optimize_(0),
- time_taken_to_codegen_(0),
last_status_(FAILED) { }
enum Status {
@@ -519,9 +537,9 @@ class OptimizingCompiler: public ZoneObject {
HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
- int64_t time_taken_to_create_graph_;
- int64_t time_taken_to_optimize_;
- int64_t time_taken_to_codegen_;
+ TimeDelta time_taken_to_create_graph_;
+ TimeDelta time_taken_to_optimize_;
+ TimeDelta time_taken_to_codegen_;
Status last_status_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
@@ -531,18 +549,20 @@ class OptimizingCompiler: public ZoneObject {
void RecordOptimizationStats();
struct Timer {
- Timer(OptimizingCompiler* compiler, int64_t* location)
+ Timer(OptimizingCompiler* compiler, TimeDelta* location)
: compiler_(compiler),
- start_(OS::Ticks()),
- location_(location) { }
+ location_(location) {
+ ASSERT(location_ != NULL);
+ timer_.Start();
+ }
~Timer() {
- *location_ += (OS::Ticks() - start_);
+ *location_ += timer_.Elapsed();
}
OptimizingCompiler* compiler_;
- int64_t start_;
- int64_t* location_;
+ ElapsedTimer timer_;
+ TimeDelta* location_;
};
};
@@ -591,7 +611,8 @@ class Compiler : public AllStatic {
// success and false if the compilation resulted in a stack overflow.
static bool CompileLazy(CompilationInfo* info);
- static void RecompileParallel(Handle<JSFunction> function);
+ static bool RecompileConcurrent(Handle<JSFunction> function,
+ uint32_t osr_pc_offset = 0);
// Compile a shared function info object (the function is possibly lazily
// compiled).
@@ -604,7 +625,7 @@ class Compiler : public AllStatic {
bool is_toplevel,
Handle<Script> script);
- static void InstallOptimizedCode(OptimizingCompiler* info);
+ static Handle<Code> InstallOptimizedCode(OptimizingCompiler* info);
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
@@ -634,7 +655,7 @@ class CompilationPhase BASE_EMBEDDED {
CompilationInfo* info_;
Zone zone_;
unsigned info_zone_start_allocation_size_;
- int64_t start_ticks_;
+ ElapsedTimer timer_;
DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
};
diff --git a/chromium/v8/src/contexts.cc b/chromium/v8/src/contexts.cc
index 0fddfdf5058..441ef9d9c32 100644
--- a/chromium/v8/src/contexts.cc
+++ b/chromium/v8/src/contexts.cc
@@ -74,7 +74,7 @@ Context* Context::native_context() {
// During bootstrapping, the global object might not be set and we
// have to search the context chain to find the native context.
- ASSERT(Isolate::Current()->bootstrapper()->IsActive());
+ ASSERT(this->GetIsolate()->bootstrapper()->IsActive());
Context* current = this;
while (!current->IsNativeContext()) {
JSFunction* closure = JSFunction::cast(current->closure());
@@ -319,14 +319,48 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
}
+void Context::SetOptimizedFunctionsListHead(Object* head) {
+ ASSERT(IsNativeContext());
+ set(OPTIMIZED_FUNCTIONS_LIST, head);
+}
+
+
Object* Context::OptimizedFunctionsListHead() {
ASSERT(IsNativeContext());
return get(OPTIMIZED_FUNCTIONS_LIST);
}
-void Context::ClearOptimizedFunctions() {
- set(OPTIMIZED_FUNCTIONS_LIST, GetHeap()->undefined_value());
+void Context::AddOptimizedCode(Code* code) {
+ ASSERT(IsNativeContext());
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(code->next_code_link()->IsUndefined());
+ code->set_next_code_link(get(OPTIMIZED_CODE_LIST));
+ set(OPTIMIZED_CODE_LIST, code);
+}
+
+
+void Context::SetOptimizedCodeListHead(Object* head) {
+ ASSERT(IsNativeContext());
+ set(OPTIMIZED_CODE_LIST, head);
+}
+
+
+Object* Context::OptimizedCodeListHead() {
+ ASSERT(IsNativeContext());
+ return get(OPTIMIZED_CODE_LIST);
+}
+
+
+void Context::SetDeoptimizedCodeListHead(Object* head) {
+ ASSERT(IsNativeContext());
+ set(DEOPTIMIZED_CODE_LIST, head);
+}
+
+
+Object* Context::DeoptimizedCodeListHead() {
+ ASSERT(IsNativeContext());
+ return get(DEOPTIMIZED_CODE_LIST);
}
@@ -352,10 +386,9 @@ bool Context::IsBootstrappingOrValidParentContext(
}
-bool Context::IsBootstrappingOrGlobalObject(Object* object) {
+bool Context::IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object) {
// During bootstrapping we allow all objects to pass as global
// objects. This is necessary to fix circular dependencies.
- Isolate* isolate = Isolate::Current();
return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
isolate->bootstrapper()->IsActive() ||
object->IsGlobalObject();
diff --git a/chromium/v8/src/contexts.h b/chromium/v8/src/contexts.h
index fdf6d27ef55..189c215e639 100644
--- a/chromium/v8/src/contexts.h
+++ b/chromium/v8/src/contexts.h
@@ -337,8 +337,10 @@ class Context: public FixedArray {
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
OPTIMIZED_FUNCTIONS_LIST, // Weak.
- MAP_CACHE_INDEX, // Weak.
- NEXT_CONTEXT_LINK, // Weak.
+ OPTIMIZED_CODE_LIST, // Weak.
+ DEOPTIMIZED_CODE_LIST, // Weak.
+ MAP_CACHE_INDEX, // Weak.
+ NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
NATIVE_CONTEXT_SLOTS,
@@ -370,7 +372,7 @@ class Context: public FixedArray {
GlobalObject* global_object() {
Object* result = get(GLOBAL_OBJECT_INDEX);
- ASSERT(IsBootstrappingOrGlobalObject(result));
+ ASSERT(IsBootstrappingOrGlobalObject(this->GetIsolate(), result));
return reinterpret_cast<GlobalObject*>(result);
}
void set_global_object(GlobalObject* object) {
@@ -428,11 +430,19 @@ class Context: public FixedArray {
// Mark the native context with out of memory.
inline void mark_out_of_memory();
- // A native context hold a list of all functions which have been optimized.
+ // A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
+ void SetOptimizedFunctionsListHead(Object* head);
Object* OptimizedFunctionsListHead();
- void ClearOptimizedFunctions();
+
+ // The native context also stores a list of all optimized code and a
+ // list of all deoptimized code, which are needed by the deoptimizer.
+ void AddOptimizedCode(Code* code);
+ void SetOptimizedCodeListHead(Object* head);
+ Object* OptimizedCodeListHead();
+ void SetDeoptimizedCodeListHead(Object* head);
+ Object* DeoptimizedCodeListHead();
Handle<Object> ErrorMessageForCodeGenerationFromStrings();
@@ -508,7 +518,7 @@ class Context: public FixedArray {
#ifdef DEBUG
// Bootstrapping-aware type checks.
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
- static bool IsBootstrappingOrGlobalObject(Object* object);
+ static bool IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object);
#endif
STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize);
diff --git a/chromium/v8/src/counters.cc b/chromium/v8/src/counters.cc
index 183941206e9..e0a6a60a0a4 100644
--- a/chromium/v8/src/counters.cc
+++ b/chromium/v8/src/counters.cc
@@ -41,7 +41,7 @@ StatsTable::StatsTable()
int* StatsCounter::FindLocationInStatsTable() const {
- return Isolate::Current()->stats_table()->FindLocation(name_);
+ return isolate_->stats_table()->FindLocation(name_);
}
@@ -60,8 +60,7 @@ void* Histogram::CreateHistogram() const {
// Start the timer.
void HistogramTimer::Start() {
if (Enabled()) {
- stop_time_ = 0;
- start_time_ = OS::Ticks();
+ timer_.Start();
}
if (FLAG_log_internal_timer_events) {
LOG(isolate(), TimerEvent(Logger::START, name()));
@@ -72,10 +71,9 @@ void HistogramTimer::Start() {
// Stop the timer and record the results.
void HistogramTimer::Stop() {
if (Enabled()) {
- stop_time_ = OS::Ticks();
// Compute the delta between start and stop, in milliseconds.
- int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- AddSample(milliseconds);
+ AddSample(static_cast<int>(timer_.Elapsed().InMilliseconds()));
+ timer_.Stop();
}
if (FLAG_log_internal_timer_events) {
LOG(isolate(), TimerEvent(Logger::END, name()));
diff --git a/chromium/v8/src/counters.h b/chromium/v8/src/counters.h
index a633fea7798..93911d72161 100644
--- a/chromium/v8/src/counters.h
+++ b/chromium/v8/src/counters.h
@@ -116,8 +116,8 @@ class StatsTable {
class StatsCounter {
public:
StatsCounter() { }
- explicit StatsCounter(const char* name)
- : name_(name), ptr_(NULL), lookup_done_(false) { }
+ explicit StatsCounter(Isolate* isolate, const char* name)
+ : isolate_(isolate), name_(name), ptr_(NULL), lookup_done_(false) { }
// Sets the counter to a specific value.
void Set(int value) {
@@ -175,6 +175,7 @@ class StatsCounter {
private:
int* FindLocationInStatsTable() const;
+ Isolate* isolate_;
const char* name_;
int* ptr_;
bool lookup_done_;
@@ -245,9 +246,7 @@ class HistogramTimer : public Histogram {
int max,
int num_buckets,
Isolate* isolate)
- : Histogram(name, min, max, num_buckets, isolate),
- start_time_(0),
- stop_time_(0) { }
+ : Histogram(name, min, max, num_buckets, isolate) {}
// Start the timer.
void Start();
@@ -257,12 +256,11 @@ class HistogramTimer : public Histogram {
// Returns true if the timer is running.
bool Running() {
- return Enabled() && (start_time_ != 0) && (stop_time_ == 0);
+ return Enabled() && timer_.IsStarted();
}
private:
- int64_t start_time_;
- int64_t stop_time_;
+ ElapsedTimer timer_;
};
// Helper class for scoping a HistogramTimer.
diff --git a/chromium/v8/src/cpu-profiler-inl.h b/chromium/v8/src/cpu-profiler-inl.h
index 868ec64fd61..7bfbf5c57cb 100644
--- a/chromium/v8/src/cpu-profiler-inl.h
+++ b/chromium/v8/src/cpu-profiler-inl.h
@@ -67,13 +67,30 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
-TickSample* ProfilerEventsProcessor::TickSampleEvent() {
+TickSample* CpuProfiler::StartTickSample() {
+ if (is_profiling_) return processor_->StartTickSample();
+ return NULL;
+}
+
+
+void CpuProfiler::FinishTickSample() {
+ processor_->FinishTickSample();
+}
+
+
+TickSample* ProfilerEventsProcessor::StartTickSample() {
+ void* address = ticks_buffer_.StartEnqueue();
+ if (address == NULL) return NULL;
TickSampleEventRecord* evt =
- new(ticks_buffer_.Enqueue()) TickSampleEventRecord(last_code_event_id_);
+ new(address) TickSampleEventRecord(last_code_event_id_);
return &evt->sample;
}
+void ProfilerEventsProcessor::FinishTickSample() {
+ ticks_buffer_.FinishEnqueue();
+}
+
} } // namespace v8::internal
#endif // V8_CPU_PROFILER_INL_H_
diff --git a/chromium/v8/src/cpu-profiler.cc b/chromium/v8/src/cpu-profiler.cc
index 0a83b85f505..e0f7aea18a8 100644
--- a/chromium/v8/src/cpu-profiler.cc
+++ b/chromium/v8/src/cpu-profiler.cc
@@ -40,18 +40,18 @@
namespace v8 {
namespace internal {
-static const int kTickSamplesBufferChunkSize = 64 * KB;
-static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
+ProfilerEventsProcessor::ProfilerEventsProcessor(
+ ProfileGenerator* generator,
+ Sampler* sampler,
+ TimeDelta period)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
+ sampler_(sampler),
running_(true),
- ticks_buffer_(sizeof(TickSampleEventRecord),
- kTickSamplesBufferChunkSize,
- kTickSamplesBufferChunksCount),
+ period_(period),
last_code_event_id_(0), last_processed_code_event_id_(0) {
}
@@ -103,54 +103,54 @@ bool ProfilerEventsProcessor::ProcessCodeEvent() {
return false;
}
-
-bool ProfilerEventsProcessor::ProcessTicks() {
- while (true) {
- if (!ticks_from_vm_buffer_.IsEmpty()
- && ticks_from_vm_buffer_.Peek()->order ==
- last_processed_code_event_id_) {
- TickSampleEventRecord record;
- ticks_from_vm_buffer_.Dequeue(&record);
- generator_->RecordTickSample(record.sample);
- }
-
- const TickSampleEventRecord* rec =
- TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
- if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
- // Make a local copy of tick sample record to ensure that it won't
- // be modified as we are processing it. This is possible as the
- // sampler writes w/o any sync to the queue, so if the processor
- // will get far behind, a record may be modified right under its
- // feet.
- TickSampleEventRecord record = *rec;
- if (record.order != last_processed_code_event_id_) return true;
-
- // A paranoid check to make sure that we don't get a memory overrun
- // in case of frames_count having a wild value.
- if (record.sample.frames_count < 0
- || record.sample.frames_count > TickSample::kMaxFramesCount)
- record.sample.frames_count = 0;
+ProfilerEventsProcessor::SampleProcessingResult
+ ProfilerEventsProcessor::ProcessOneSample() {
+ if (!ticks_from_vm_buffer_.IsEmpty()
+ && ticks_from_vm_buffer_.Peek()->order ==
+ last_processed_code_event_id_) {
+ TickSampleEventRecord record;
+ ticks_from_vm_buffer_.Dequeue(&record);
generator_->RecordTickSample(record.sample);
- ticks_buffer_.FinishDequeue();
+ return OneSampleProcessed;
+ }
+
+ const TickSampleEventRecord* record = ticks_buffer_.Peek();
+ if (record == NULL) {
+ if (ticks_from_vm_buffer_.IsEmpty()) return NoSamplesInQueue;
+ return FoundSampleForNextCodeEvent;
+ }
+ if (record->order != last_processed_code_event_id_) {
+ return FoundSampleForNextCodeEvent;
}
+ generator_->RecordTickSample(record->sample);
+ ticks_buffer_.Remove();
+ return OneSampleProcessed;
}
void ProfilerEventsProcessor::Run() {
while (running_) {
- // Process ticks until we have any.
- if (ProcessTicks()) {
- // All ticks of the current last_processed_code_event_id_ are processed,
- // proceed to the next code event.
- ProcessCodeEvent();
- }
- YieldCPU();
+ ElapsedTimer timer;
+ timer.Start();
+ // Keep processing existing events until we need to do next sample.
+ do {
+ if (FoundSampleForNextCodeEvent == ProcessOneSample()) {
+ // All ticks of the current last_processed_code_event_id_ are
+ // processed, proceed to the next code event.
+ ProcessCodeEvent();
+ }
+ } while (!timer.HasExpired(period_));
+
+ // Schedule next sample. sampler_ is NULL in tests.
+ if (sampler_) sampler_->DoSample();
}
// Process remaining tick events.
- ticks_buffer_.FlushResidualRecords();
do {
- ProcessTicks();
+ SampleProcessingResult result;
+ do {
+ result = ProcessOneSample();
+ } while (result == OneSampleProcessed);
} while (ProcessCodeEvent());
}
@@ -166,12 +166,6 @@ CpuProfile* CpuProfiler::GetProfile(int index) {
}
-TickSample* CpuProfiler::TickSampleEvent() {
- if (is_profiling_) return processor_->TickSampleEvent();
- return NULL;
-}
-
-
void CpuProfiler::DeleteAllProfiles() {
if (is_profiling_) StopProcessor();
ResetProfiles();
@@ -253,6 +247,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
ASSERT(Script::cast(shared->script()));
Script* script = Script::cast(shared->script());
rec->entry->set_script_id(script->id()->value());
+ rec->entry->set_bailout_reason(
+ GetBailoutReason(shared->DisableOptimizationReason()));
}
rec->size = code->ExecutableSize();
rec->shared = shared->address();
@@ -283,6 +279,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->entry->set_script_id(script->id()->value());
rec->size = code->ExecutableSize();
rec->shared = shared->address();
+ rec->entry->set_bailout_reason(
+ GetBailoutReason(shared->DisableOptimizationReason()));
processor_->Enqueue(evt_rec);
}
@@ -373,11 +371,12 @@ void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
CpuProfiler::CpuProfiler(Isolate* isolate)
: isolate_(isolate),
- profiles_(new CpuProfilesCollection()),
+ sampling_interval_(TimeDelta::FromMicroseconds(
+ FLAG_cpu_profiler_sampling_interval)),
+ profiles_(new CpuProfilesCollection(isolate->heap())),
next_profile_uid_(1),
generator_(NULL),
processor_(NULL),
- need_to_stop_sampler_(false),
is_profiling_(false) {
}
@@ -387,11 +386,12 @@ CpuProfiler::CpuProfiler(Isolate* isolate,
ProfileGenerator* test_generator,
ProfilerEventsProcessor* test_processor)
: isolate_(isolate),
+ sampling_interval_(TimeDelta::FromMicroseconds(
+ FLAG_cpu_profiler_sampling_interval)),
profiles_(test_profiles),
next_profile_uid_(1),
generator_(test_generator),
processor_(test_processor),
- need_to_stop_sampler_(false),
is_profiling_(false) {
}
@@ -402,9 +402,15 @@ CpuProfiler::~CpuProfiler() {
}
+void CpuProfiler::set_sampling_interval(TimeDelta value) {
+ ASSERT(!is_profiling_);
+ sampling_interval_ = value;
+}
+
+
void CpuProfiler::ResetProfiles() {
delete profiles_;
- profiles_ = new CpuProfilesCollection();
+ profiles_ = new CpuProfilesCollection(isolate()->heap());
}
@@ -425,12 +431,13 @@ void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
Logger* logger = isolate_->logger();
// Disable logging when using the new implementation.
- saved_logging_nesting_ = logger->logging_nesting_;
- logger->logging_nesting_ = 0;
+ saved_is_logging_ = logger->is_logging_;
+ logger->is_logging_ = false;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_);
+ Sampler* sampler = logger->sampler();
+ processor_ = new ProfilerEventsProcessor(
+ generator_, sampler, sampling_interval_);
is_profiling_ = true;
- processor_->StartSynchronously();
// Enumerate stuff we already have in the heap.
ASSERT(isolate_->heap()->HasBeenSetUp());
if (!FLAG_prof_browser_mode) {
@@ -440,12 +447,9 @@ void CpuProfiler::StartProcessorIfNotStarted() {
logger->LogAccessorCallbacks();
LogBuiltins();
// Enable stack sampling.
- Sampler* sampler = logger->sampler();
+ sampler->SetHasProcessingThread(true);
sampler->IncreaseProfilingDepth();
- if (!sampler->IsActive()) {
- sampler->Start();
- need_to_stop_sampler_ = true;
- }
+ processor_->StartSynchronously();
}
}
@@ -477,18 +481,15 @@ void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
void CpuProfiler::StopProcessor() {
Logger* logger = isolate_->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
- sampler->DecreaseProfilingDepth();
- if (need_to_stop_sampler_) {
- sampler->Stop();
- need_to_stop_sampler_ = false;
- }
is_profiling_ = false;
processor_->StopSynchronously();
delete processor_;
delete generator_;
processor_ = NULL;
generator_ = NULL;
- logger->logging_nesting_ = saved_logging_nesting_;
+ sampler->SetHasProcessingThread(false);
+ sampler->DecreaseProfilingDepth();
+ logger->is_logging_ = saved_is_logging_;
}
diff --git a/chromium/v8/src/cpu-profiler.h b/chromium/v8/src/cpu-profiler.h
index cbe3e3cf81d..8aba5426d5a 100644
--- a/chromium/v8/src/cpu-profiler.h
+++ b/chromium/v8/src/cpu-profiler.h
@@ -31,6 +31,7 @@
#include "allocation.h"
#include "atomicops.h"
#include "circular-queue.h"
+#include "platform/time.h"
#include "sampler.h"
#include "unbound-queue.h"
@@ -114,10 +115,6 @@ class TickSampleEventRecord {
unsigned order;
TickSample sample;
-
- static TickSampleEventRecord* cast(void* value) {
- return reinterpret_cast<TickSampleEventRecord*>(value);
- }
};
@@ -140,7 +137,9 @@ class CodeEventsContainer {
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
- explicit ProfilerEventsProcessor(ProfileGenerator* generator);
+ ProfilerEventsProcessor(ProfileGenerator* generator,
+ Sampler* sampler,
+ TimeDelta period);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
@@ -156,17 +155,31 @@ class ProfilerEventsProcessor : public Thread {
// queue (because the structure is of fixed width, but usually not all
// stack frame entries are filled.) This method returns a pointer to the
// next record of the buffer.
- INLINE(TickSample* TickSampleEvent());
+ inline TickSample* StartTickSample();
+ inline void FinishTickSample();
private:
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent();
- bool ProcessTicks();
+
+ enum SampleProcessingResult {
+ OneSampleProcessed,
+ FoundSampleForNextCodeEvent,
+ NoSamplesInQueue
+ };
+ SampleProcessingResult ProcessOneSample();
ProfileGenerator* generator_;
+ Sampler* sampler_;
bool running_;
+ // Sampling period in microseconds.
+ const TimeDelta period_;
UnboundQueue<CodeEventsContainer> events_buffer_;
- SamplingCircularQueue ticks_buffer_;
+ static const size_t kTickSampleBufferSize = 1 * MB;
+ static const size_t kTickSampleQueueLength =
+ kTickSampleBufferSize / sizeof(TickSampleEventRecord);
+ SamplingCircularQueue<TickSampleEventRecord,
+ kTickSampleQueueLength> ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned last_code_event_id_;
unsigned last_processed_code_event_id_;
@@ -195,6 +208,7 @@ class CpuProfiler : public CodeEventListener {
virtual ~CpuProfiler();
+ void set_sampling_interval(TimeDelta value);
void StartProfiling(const char* title, bool record_samples = false);
void StartProfiling(String* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
@@ -205,7 +219,8 @@ class CpuProfiler : public CodeEventListener {
void DeleteProfile(CpuProfile* profile);
// Invoked from stack sampler (thread or signal handler.)
- TickSample* TickSampleEvent();
+ inline TickSample* StartTickSample();
+ inline void FinishTickSample();
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
@@ -241,6 +256,7 @@ class CpuProfiler : public CodeEventListener {
ProfileGenerator* generator() const { return generator_; }
ProfilerEventsProcessor* processor() const { return processor_; }
+ Isolate* isolate() const { return isolate_; }
private:
void StartProcessorIfNotStarted();
@@ -250,15 +266,14 @@ class CpuProfiler : public CodeEventListener {
void LogBuiltins();
Isolate* isolate_;
+ TimeDelta sampling_interval_;
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
- int saved_logging_nesting_;
- bool need_to_stop_sampler_;
+ bool saved_is_logging_;
bool is_profiling_;
- private:
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
};
diff --git a/chromium/v8/src/cpu.cc b/chromium/v8/src/cpu.cc
new file mode 100644
index 00000000000..2bf51a7f6c0
--- /dev/null
+++ b/chromium/v8/src/cpu.cc
@@ -0,0 +1,466 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "cpu.h"
+
+#if V8_CC_MSVC
+#include <intrin.h> // __cpuid()
+#endif
+#if V8_OS_POSIX
+#include <unistd.h> // sysconf()
+#endif
+
+#include <algorithm>
+#include <cctype>
+#include <climits>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include "checks.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+// Define __cpuid() for non-MSVC compilers.
+#if !V8_CC_MSVC
+
+static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
+#if defined(__i386__) && defined(__pic__)
+ // Make sure to preserve ebx, which contains the pointer
+ // to the GOT in case we're generating PIC.
+ __asm__ volatile (
+ "mov %%ebx, %%edi\n\t"
+ "cpuid\n\t"
+ "xchg %%edi, %%ebx\n\t"
+ : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+ : "a"(info_type)
+ );
+#else
+ __asm__ volatile (
+ "cpuid \n\t"
+ : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+ : "a"(info_type)
+ );
+#endif // defined(__i386__) && defined(__pic__)
+}
+
+#endif // !V8_CC_MSVC
+
+#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
+
+#if V8_HOST_ARCH_ARM
+
+// See <uapi/asm/hwcap.h> kernel header.
+/*
+ * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
+ */
+#define HWCAP_SWP (1 << 0)
+#define HWCAP_HALF (1 << 1)
+#define HWCAP_THUMB (1 << 2)
+#define HWCAP_26BIT (1 << 3) /* Play it safe */
+#define HWCAP_FAST_MULT (1 << 4)
+#define HWCAP_FPA (1 << 5)
+#define HWCAP_VFP (1 << 6)
+#define HWCAP_EDSP (1 << 7)
+#define HWCAP_JAVA (1 << 8)
+#define HWCAP_IWMMXT (1 << 9)
+#define HWCAP_CRUNCH (1 << 10)
+#define HWCAP_THUMBEE (1 << 11)
+#define HWCAP_NEON (1 << 12)
+#define HWCAP_VFPv3 (1 << 13)
+#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
+#define HWCAP_TLS (1 << 15)
+#define HWCAP_VFPv4 (1 << 16)
+#define HWCAP_IDIVA (1 << 17)
+#define HWCAP_IDIVT (1 << 18)
+#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
+#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
+#define HWCAP_LPAE (1 << 20)
+
+#define AT_HWCAP 16
+
+// Read the ELF HWCAP flags by parsing /proc/self/auxv.
+static uint32_t ReadELFHWCaps() {
+ uint32_t result = 0;
+ FILE* fp = fopen("/proc/self/auxv", "r");
+ if (fp != NULL) {
+ struct { uint32_t tag; uint32_t value; } entry;
+ for (;;) {
+ size_t n = fread(&entry, sizeof(entry), 1, fp);
+ if (n == 0 || (entry.tag == 0 && entry.value == 0)) {
+ break;
+ }
+ if (entry.tag == AT_HWCAP) {
+ result = entry.value;
+ break;
+ }
+ }
+ fclose(fp);
+ }
+ return result;
+}
+
+#endif // V8_HOST_ARCH_ARM
+
+// Extract the information exposed by the kernel via /proc/cpuinfo.
+class CPUInfo V8_FINAL BASE_EMBEDDED {
+ public:
+ CPUInfo() : datalen_(0) {
+ // Get the size of the cpuinfo file by reading it until the end. This is
+ // required because files under /proc do not always return a valid size
+ // when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed.
+ static const char PATHNAME[] = "/proc/cpuinfo";
+ FILE* fp = fopen(PATHNAME, "r");
+ if (fp != NULL) {
+ for (;;) {
+ char buffer[256];
+ size_t n = fread(buffer, 1, sizeof(buffer), fp);
+ if (n == 0) {
+ break;
+ }
+ datalen_ += n;
+ }
+ fclose(fp);
+ }
+
+ // Read the contents of the cpuinfo file.
+ data_ = new char[datalen_ + 1];
+ fp = fopen(PATHNAME, "r");
+ if (fp != NULL) {
+ for (size_t offset = 0; offset < datalen_; ) {
+ size_t n = fread(data_ + offset, 1, datalen_ - offset, fp);
+ if (n == 0) {
+ break;
+ }
+ offset += n;
+ }
+ fclose(fp);
+ }
+
+ // Zero-terminate the data.
+ data_[datalen_] = '\0';
+ }
+
+ ~CPUInfo() {
+ delete[] data_;
+ }
+
+ // Extract the content of a the first occurence of a given field in
+ // the content of the cpuinfo file and return it as a heap-allocated
+ // string that must be freed by the caller using delete[].
+ // Return NULL if not found.
+ char* ExtractField(const char* field) const {
+ ASSERT(field != NULL);
+
+ // Look for first field occurence, and ensure it starts the line.
+ size_t fieldlen = strlen(field);
+ char* p = data_;
+ for (;;) {
+ p = strstr(p, field);
+ if (p == NULL) {
+ return NULL;
+ }
+ if (p == data_ || p[-1] == '\n') {
+ break;
+ }
+ p += fieldlen;
+ }
+
+ // Skip to the first colon followed by a space.
+ p = strchr(p + fieldlen, ':');
+ if (p == NULL || !isspace(p[1])) {
+ return NULL;
+ }
+ p += 2;
+
+ // Find the end of the line.
+ char* q = strchr(p, '\n');
+ if (q == NULL) {
+ q = data_ + datalen_;
+ }
+
+ // Copy the line into a heap-allocated buffer.
+ size_t len = q - p;
+ char* result = new char[len + 1];
+ if (result != NULL) {
+ memcpy(result, p, len);
+ result[len] = '\0';
+ }
+ return result;
+ }
+
+ private:
+ char* data_;
+ size_t datalen_;
+};
+
+
+// Checks that a space-separated list of items contains one given 'item'.
+static bool HasListItem(const char* list, const char* item) {
+ ssize_t item_len = strlen(item);
+ const char* p = list;
+ if (p != NULL) {
+ while (*p != '\0') {
+ // Skip whitespace.
+ while (isspace(*p)) ++p;
+
+ // Find end of current list item.
+ const char* q = p;
+ while (*q != '\0' && !isspace(*q)) ++q;
+
+ if (item_len == q - p && memcmp(p, item, item_len) == 0) {
+ return true;
+ }
+
+ // Skip to next item.
+ p = q;
+ }
+ }
+ return false;
+}
+
+#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+CPU::CPU() : stepping_(0),
+ model_(0),
+ ext_model_(0),
+ family_(0),
+ ext_family_(0),
+ type_(0),
+ implementer_(0),
+ architecture_(0),
+ part_(0),
+ has_fpu_(false),
+ has_cmov_(false),
+ has_sahf_(false),
+ has_mmx_(false),
+ has_sse_(false),
+ has_sse2_(false),
+ has_sse3_(false),
+ has_ssse3_(false),
+ has_sse41_(false),
+ has_sse42_(false),
+ has_idiva_(false),
+ has_neon_(false),
+ has_thumbee_(false),
+ has_vfp_(false),
+ has_vfp3_(false),
+ has_vfp3_d32_(false) {
+ memcpy(vendor_, "Unknown", 8);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ int cpu_info[4];
+
+ // __cpuid with an InfoType argument of 0 returns the number of
+ // valid Ids in CPUInfo[0] and the CPU identification string in
+ // the other three array elements. The CPU identification string is
+ // not in linear order. The code below arranges the information
+ // in a human readable form. The human readable order is CPUInfo[1] |
+ // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
+ // before using memcpy to copy these three array elements to cpu_string.
+ __cpuid(cpu_info, 0);
+ unsigned num_ids = cpu_info[0];
+ std::swap(cpu_info[2], cpu_info[3]);
+ memcpy(vendor_, cpu_info + 1, 12);
+ vendor_[12] = '\0';
+
+ // Interpret CPU feature information.
+ if (num_ids > 0) {
+ __cpuid(cpu_info, 1);
+ stepping_ = cpu_info[0] & 0xf;
+ model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
+ family_ = (cpu_info[0] >> 8) & 0xf;
+ type_ = (cpu_info[0] >> 12) & 0x3;
+ ext_model_ = (cpu_info[0] >> 16) & 0xf;
+ ext_family_ = (cpu_info[0] >> 20) & 0xff;
+ has_fpu_ = (cpu_info[3] & 0x00000001) != 0;
+ has_cmov_ = (cpu_info[3] & 0x00008000) != 0;
+ has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
+ has_sse_ = (cpu_info[3] & 0x02000000) != 0;
+ has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
+ has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
+ has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
+ has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
+ has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+ }
+
+ // Query extended IDs.
+ __cpuid(cpu_info, 0x80000000);
+ unsigned num_ext_ids = cpu_info[0];
+
+ // Interpret extended CPU feature information.
+ if (num_ext_ids > 0x80000000) {
+ __cpuid(cpu_info, 0x80000001);
+ // SAHF is always available in compat/legacy mode,
+ // but must be probed in long mode.
+#if V8_HOST_ARCH_IA32
+ has_sahf_ = true;
+#else
+ has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
+#endif
+ }
+#elif V8_HOST_ARCH_ARM
+ CPUInfo cpu_info;
+
+ // Extract implementor from the "CPU implementer" field.
+ char* implementer = cpu_info.ExtractField("CPU implementer");
+ if (implementer != NULL) {
+ char* end ;
+ implementer_ = strtol(implementer, &end, 0);
+ if (end == implementer) {
+ implementer_ = 0;
+ }
+ delete[] implementer;
+ }
+
+ // Extract part number from the "CPU part" field.
+ char* part = cpu_info.ExtractField("CPU part");
+ if (part != NULL) {
+ char* end ;
+ part_ = strtol(part, &end, 0);
+ if (end == part) {
+ part_ = 0;
+ }
+ delete[] part;
+ }
+
+ // Extract architecture from the "CPU Architecture" field.
+ // The list is well-known, unlike the the output of
+ // the 'Processor' field which can vary greatly.
+ // See the definition of the 'proc_arch' array in
+ // $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
+ // same file.
+ char* architecture = cpu_info.ExtractField("CPU architecture");
+ if (architecture != NULL) {
+ char* end;
+ architecture_ = strtol(architecture, &end, 10);
+ if (end == architecture) {
+ architecture_ = 0;
+ }
+ delete[] architecture;
+
+ // Unfortunately, it seems that certain ARMv6-based CPUs
+ // report an incorrect architecture number of 7!
+ //
+ // See http://code.google.com/p/android/issues/detail?id=10812
+ //
+ // We try to correct this by looking at the 'elf_format'
+ // field reported by the 'Processor' field, which is of the
+ // form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
+ // an ARMv6-one. For example, the Raspberry Pi is one popular
+ // ARMv6 device that reports architecture 7.
+ if (architecture_ == 7) {
+ char* processor = cpu_info.ExtractField("Processor");
+ if (HasListItem(processor, "(v6l)")) {
+ architecture_ = 6;
+ }
+ delete[] processor;
+ }
+ }
+
+ // Try to extract the list of CPU features from ELF hwcaps.
+ uint32_t hwcaps = ReadELFHWCaps();
+ if (hwcaps != 0) {
+ has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
+ has_neon_ = (hwcaps & HWCAP_NEON) != 0;
+ has_thumbee_ = (hwcaps & HWCAP_THUMBEE) != 0;
+ has_vfp_ = (hwcaps & HWCAP_VFP) != 0;
+ has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0;
+ has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 ||
+ (hwcaps & HWCAP_VFPD32) != 0));
+ } else {
+ // Try to fallback to "Features" CPUInfo field.
+ char* features = cpu_info.ExtractField("Features");
+ has_idiva_ = HasListItem(features, "idiva");
+ has_neon_ = HasListItem(features, "neon");
+ has_thumbee_ = HasListItem(features, "thumbee");
+ has_vfp_ = HasListItem(features, "vfp");
+ if (HasListItem(features, "vfpv3")) {
+ has_vfp3_ = true;
+ has_vfp3_d32_ = true;
+ } else if (HasListItem(features, "vfpv3d16")) {
+ has_vfp3_ = true;
+ }
+ delete[] features;
+ }
+
+ // Some old kernels will report vfp not vfpv3. Here we make an attempt
+ // to detect vfpv3 by checking for vfp *and* neon, since neon is only
+ // available on architectures with vfpv3. Checking neon on its own is
+ // not enough as it is possible to have neon without vfp.
+ if (has_vfp_ && has_neon_) {
+ has_vfp3_ = true;
+ }
+
+ // VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
+ if (architecture_ < 7 && has_vfp3_) {
+ architecture_ = 7;
+ }
+
+ // ARMv7 implies ThumbEE.
+ if (architecture_ >= 7) {
+ has_thumbee_ = true;
+ }
+
+ // The earliest architecture with ThumbEE is ARMv6T2.
+ if (has_thumbee_ && architecture_ < 6) {
+ architecture_ = 6;
+ }
+
+ // We don't support any FPUs other than VFP.
+ has_fpu_ = has_vfp_;
+#elif V8_HOST_ARCH_MIPS
+ // Simple detection of FPU at runtime for Linux.
+ // It is based on /proc/cpuinfo, which reveals hardware configuration
+ // to user-space applications. According to MIPS (early 2010), no similar
+ // facility is universally available on the MIPS architectures,
+ // so it's up to individual OSes to provide such.
+ CPUInfo cpu_info;
+ char* cpu_model = cpu_info.ExtractField("cpu model");
+ has_fpu_ = HasListItem(cpu_model, "FPU");
+ delete[] cpu_model;
+#endif
+}
+
+
+// static
+int CPU::NumberOfProcessorsOnline() {
+#if V8_OS_WIN
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ return info.dwNumberOfProcessors;
+#else
+ return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
+#endif
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/cpu.h b/chromium/v8/src/cpu.h
index 247af71aa35..b2e9f7da7ee 100644
--- a/chromium/v8/src/cpu.h
+++ b/chromium/v8/src/cpu.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,14 +44,64 @@ namespace internal {
// ----------------------------------------------------------------------------
// CPU
//
-// This class has static methods for the architecture specific functions. Add
-// methods here to cope with differences between the supported architectures.
+// Query information about the processor.
//
-// For each architecture the file cpu_<arch>.cc contains the implementation of
-// these functions.
+// This class also has static methods for the architecture specific functions.
+// Add methods here to cope with differences between the supported
+// architectures. For each architecture the file cpu_<arch>.cc contains the
+// implementation of these static functions.
-class CPU : public AllStatic {
+class CPU V8_FINAL BASE_EMBEDDED {
public:
+ CPU();
+
+ // x86 CPUID information
+ const char* vendor() const { return vendor_; }
+ int stepping() const { return stepping_; }
+ int model() const { return model_; }
+ int ext_model() const { return ext_model_; }
+ int family() const { return family_; }
+ int ext_family() const { return ext_family_; }
+ int type() const { return type_; }
+
+ // arm implementer/part information
+ int implementer() const { return implementer_; }
+ static const int ARM = 0x41;
+ static const int QUALCOMM = 0x51;
+ int architecture() const { return architecture_; }
+ int part() const { return part_; }
+ static const int ARM_CORTEX_A5 = 0xc05;
+ static const int ARM_CORTEX_A7 = 0xc07;
+ static const int ARM_CORTEX_A8 = 0xc08;
+ static const int ARM_CORTEX_A9 = 0xc09;
+ static const int ARM_CORTEX_A12 = 0xc0c;
+ static const int ARM_CORTEX_A15 = 0xc0f;
+
+ // General features
+ bool has_fpu() const { return has_fpu_; }
+
+ // x86 features
+ bool has_cmov() const { return has_cmov_; }
+ bool has_sahf() const { return has_sahf_; }
+ bool has_mmx() const { return has_mmx_; }
+ bool has_sse() const { return has_sse_; }
+ bool has_sse2() const { return has_sse2_; }
+ bool has_sse3() const { return has_sse3_; }
+ bool has_ssse3() const { return has_ssse3_; }
+ bool has_sse41() const { return has_sse41_; }
+ bool has_sse42() const { return has_sse42_; }
+
+ // arm features
+ bool has_idiva() const { return has_idiva_; }
+ bool has_neon() const { return has_neon_; }
+ bool has_thumbee() const { return has_thumbee_; }
+ bool has_vfp() const { return has_vfp_; }
+ bool has_vfp3() const { return has_vfp3_; }
+ bool has_vfp3_d32() const { return has_vfp3_d32_; }
+
+ // Returns the number of processors online.
+ static int NumberOfProcessorsOnline();
+
// Initializes the cpu architecture support. Called once at VM startup.
static void SetUp();
@@ -60,8 +110,33 @@ class CPU : public AllStatic {
// Flush instruction cache.
static void FlushICache(void* start, size_t size);
- // Try to activate a system level debugger.
- static void DebugBreak();
+ private:
+ char vendor_[13];
+ int stepping_;
+ int model_;
+ int ext_model_;
+ int family_;
+ int ext_family_;
+ int type_;
+ int implementer_;
+ int architecture_;
+ int part_;
+ bool has_fpu_;
+ bool has_cmov_;
+ bool has_sahf_;
+ bool has_mmx_;
+ bool has_sse_;
+ bool has_sse2_;
+ bool has_sse3_;
+ bool has_ssse3_;
+ bool has_sse41_;
+ bool has_sse42_;
+ bool has_idiva_;
+ bool has_neon_;
+ bool has_thumbee_;
+ bool has_vfp_;
+ bool has_vfp3_;
+ bool has_vfp3_d32_;
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/d8-debug.cc b/chromium/v8/src/d8-debug.cc
index 9a72518f4cb..602ae166bea 100644
--- a/chromium/v8/src/d8-debug.cc
+++ b/chromium/v8/src/d8-debug.cc
@@ -29,8 +29,9 @@
#include "d8.h"
#include "d8-debug.h"
-#include "platform.h"
#include "debug-agent.h"
+#include "platform.h"
+#include "platform/socket.h"
namespace v8 {
@@ -171,21 +172,14 @@ void RunRemoteDebugger(Isolate* isolate, int port) {
void RemoteDebugger::Run() {
bool ok;
- // Make sure that socket support is initialized.
- ok = i::Socket::SetUp();
- if (!ok) {
- printf("Unable to initialize socket support %d\n", i::Socket::LastError());
- return;
- }
-
// Connect to the debugger agent.
- conn_ = i::OS::CreateSocket();
+ conn_ = new i::Socket;
static const int kPortStrSize = 6;
char port_str[kPortStrSize];
i::OS::SNPrintF(i::Vector<char>(port_str, kPortStrSize), "%d", port_);
ok = conn_->Connect("localhost", port_str);
if (!ok) {
- printf("Unable to connect to debug agent %d\n", i::Socket::LastError());
+ printf("Unable to connect to debug agent %d\n", i::Socket::GetLastError());
return;
}
@@ -201,7 +195,7 @@ void RemoteDebugger::Run() {
// Process events received from debugged VM and from the keyboard.
bool terminate = false;
while (!terminate) {
- event_available_->Wait();
+ event_available_.Wait();
RemoteDebuggerEvent* event = GetEvent();
switch (event->type()) {
case RemoteDebuggerEvent::kMessage:
@@ -248,7 +242,7 @@ void RemoteDebugger::ConnectionClosed() {
void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
- i::ScopedLock lock(event_access_);
+ i::LockGuard<i::Mutex> lock_guard(&event_access_);
if (head_ == NULL) {
ASSERT(tail_ == NULL);
head_ = event;
@@ -258,12 +252,12 @@ void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
tail_->set_next(event);
tail_ = event;
}
- event_available_->Signal();
+ event_available_.Signal();
}
RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
- i::ScopedLock lock(event_access_);
+ i::LockGuard<i::Mutex> lock_guard(&event_access_);
ASSERT(head_ != NULL);
RemoteDebuggerEvent* result = head_;
head_ = head_->next();
diff --git a/chromium/v8/src/d8-debug.h b/chromium/v8/src/d8-debug.h
index 2386b6bd6c4..55876229a32 100644
--- a/chromium/v8/src/d8-debug.h
+++ b/chromium/v8/src/d8-debug.h
@@ -53,8 +53,7 @@ class RemoteDebugger {
explicit RemoteDebugger(Isolate* isolate, int port)
: isolate_(isolate),
port_(port),
- event_access_(i::OS::CreateMutex()),
- event_available_(i::OS::CreateSemaphore(0)),
+ event_available_(0),
head_(NULL), tail_(NULL) {}
void Run();
@@ -84,8 +83,8 @@ class RemoteDebugger {
// Linked list of events from debugged V8 and from keyboard input. Access to
// the list is guarded by a mutex and a semaphore signals new items in the
// list.
- i::Mutex* event_access_;
- i::Semaphore* event_available_;
+ i::Mutex event_access_;
+ i::Semaphore event_available_;
RemoteDebuggerEvent* head_;
RemoteDebuggerEvent* tail_;
diff --git a/chromium/v8/src/d8.cc b/chromium/v8/src/d8.cc
index 6e7beebf549..da3d14de528 100644
--- a/chromium/v8/src/d8.cc
+++ b/chromium/v8/src/d8.cc
@@ -157,7 +157,7 @@ CounterMap* Shell::counter_map_;
i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
-i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
+i::Mutex Shell::context_mutex_;
Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED
@@ -271,10 +271,10 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
PerIsolateData::RealmScope::~RealmScope() {
// Drop realms to avoid keeping them alive.
for (int i = 0; i < data_->realm_count_; ++i)
- data_->realms_[i].Dispose(data_->isolate_);
+ data_->realms_[i].Dispose();
delete[] data_->realms_;
if (!data_->realm_shared_.IsEmpty())
- data_->realm_shared_.Dispose(data_->isolate_);
+ data_->realm_shared_.Dispose();
}
@@ -361,7 +361,7 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw("Invalid realm index");
return;
}
- data->realms_[index].Dispose(isolate);
+ data->realms_[index].Dispose();
data->realms_[index].Clear();
}
@@ -420,7 +420,7 @@ void Shell::RealmSharedSet(Local<String> property,
const PropertyCallbackInfo<void>& info) {
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose(isolate);
+ if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose();
data->realm_shared_.Reset(isolate, value);
}
@@ -766,13 +766,14 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
// Install the debugger object in the utility scope
- i::Debug* debug = i::Isolate::Current()->debug();
+ i::Debug* debug = reinterpret_cast<i::Isolate*>(isolate)->debug();
debug->Load();
i::Handle<i::JSObject> js_debug
= i::Handle<i::JSObject>(debug->debug_context()->global_object());
utility_context->Global()->Set(String::New("$debug"),
Utils::ToLocal(js_debug));
- debug->debug_context()->set_security_token(HEAP->undefined_value());
+ debug->debug_context()->set_security_token(
+ reinterpret_cast<i::Isolate*>(isolate)->heap()->undefined_value());
#endif // ENABLE_DEBUGGER_SUPPORT
// Run the d8 shell utility script in the utility context
@@ -925,7 +926,7 @@ void Shell::InitializeDebugger(Isolate* isolate) {
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
- i::ScopedLock lock(context_mutex_);
+ i::LockGuard<i::Mutex> lock_guard(&context_mutex_);
#endif // V8_SHARED
// Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
@@ -935,7 +936,7 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
Context::Scope scope(context);
#ifndef V8_SHARED
- i::Factory* factory = i::Isolate::Current()->factory();
+ i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
factory->NewFixedArray(js_args.argc());
@@ -1011,7 +1012,6 @@ void Shell::OnExit() {
"-------------+\n");
delete [] counters;
}
- delete context_mutex_;
delete counters_file_;
delete counter_map_;
#endif // V8_SHARED
@@ -1221,10 +1221,6 @@ void ShellThread::Run() {
SourceGroup::~SourceGroup() {
#ifndef V8_SHARED
- delete next_semaphore_;
- next_semaphore_ = NULL;
- delete done_semaphore_;
- done_semaphore_ = NULL;
delete thread_;
thread_ = NULL;
#endif // V8_SHARED
@@ -1285,7 +1281,7 @@ i::Thread::Options SourceGroup::GetThreadOptions() {
void SourceGroup::ExecuteInThread() {
Isolate* isolate = Isolate::New();
do {
- if (next_semaphore_ != NULL) next_semaphore_->Wait();
+ next_semaphore_.Wait();
{
Isolate::Scope iscope(isolate);
Locker lock(isolate);
@@ -1305,7 +1301,7 @@ void SourceGroup::ExecuteInThread() {
V8::IdleNotification(kLongIdlePauseInMs);
}
}
- if (done_semaphore_ != NULL) done_semaphore_->Signal();
+ done_semaphore_.Signal();
} while (!Shell::options.last_run);
isolate->Dispose();
}
@@ -1316,7 +1312,7 @@ void SourceGroup::StartExecuteInThread() {
thread_ = new IsolateThread(this);
thread_->Start();
}
- next_semaphore_->Signal();
+ next_semaphore_.Signal();
}
@@ -1325,7 +1321,7 @@ void SourceGroup::WaitForThread() {
if (Shell::options.last_run) {
thread_->Join();
} else {
- done_semaphore_->Wait();
+ done_semaphore_.Wait();
}
}
#endif // V8_SHARED
@@ -1336,6 +1332,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
if (strcmp(argv[i], "--stress-opt") == 0) {
options.stress_opt = true;
argv[i] = NULL;
+ } else if (strcmp(argv[i], "--nostress-opt") == 0) {
+ options.stress_opt = false;
+ argv[i] = NULL;
} else if (strcmp(argv[i], "--stress-deopt") == 0) {
options.stress_deopt = true;
argv[i] = NULL;
@@ -1406,6 +1405,14 @@ bool Shell::SetOptions(int argc, char* argv[]) {
#else
options.num_parallel_files++;
#endif // V8_SHARED
+ } else if (strcmp(argv[i], "--dump-heap-constants") == 0) {
+#ifdef V8_SHARED
+ printf("D8 with shared library does not support constant dumping\n");
+ return false;
+#else
+ options.dump_heap_constants = true;
+ argv[i] = NULL;
+#endif
}
#ifdef V8_SHARED
else if (strcmp(argv[i], "--dump-counters") == 0) {
@@ -1548,11 +1555,10 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
#ifdef V8_SHARED
static void SetStandaloneFlagsViaCommandLine() {
- int fake_argc = 3;
- char **fake_argv = new char*[3];
+ int fake_argc = 2;
+ char **fake_argv = new char*[2];
fake_argv[0] = NULL;
- fake_argv[1] = strdup("--harmony-typed-arrays");
- fake_argv[2] = strdup("--trace-hydrogen-file=hydrogen.cfg");
+ fake_argv[1] = strdup("--trace-hydrogen-file=hydrogen.cfg");
v8::V8::SetFlagsFromCommandLine(&fake_argc, fake_argv, false);
free(fake_argv[1]);
delete[] fake_argv;
@@ -1560,6 +1566,63 @@ static void SetStandaloneFlagsViaCommandLine() {
#endif
+#ifndef V8_SHARED
+static void DumpHeapConstants(i::Isolate* isolate) {
+ i::Heap* heap = isolate->heap();
+
+ // Dump the INSTANCE_TYPES table to the console.
+ printf("# List of known V8 instance types.\n");
+#define DUMP_TYPE(T) printf(" %d: \"%s\",\n", i::T, #T);
+ printf("INSTANCE_TYPES = {\n");
+ INSTANCE_TYPE_LIST(DUMP_TYPE)
+ printf("}\n");
+#undef DUMP_TYPE
+
+ // Dump the KNOWN_MAP table to the console.
+ printf("\n# List of known V8 maps.\n");
+#define ROOT_LIST_CASE(type, name, camel_name) \
+ if (n == NULL && o == heap->name()) n = #camel_name;
+#define STRUCT_LIST_CASE(upper_name, camel_name, name) \
+ if (n == NULL && o == heap->name##_map()) n = #camel_name "Map";
+ i::HeapObjectIterator it(heap->map_space());
+ printf("KNOWN_MAPS = {\n");
+ for (i::Object* o = it.Next(); o != NULL; o = it.Next()) {
+ i::Map* m = i::Map::cast(o);
+ const char* n = NULL;
+ intptr_t p = reinterpret_cast<intptr_t>(m) & 0xfffff;
+ int t = m->instance_type();
+ ROOT_LIST(ROOT_LIST_CASE)
+ STRUCT_LIST(STRUCT_LIST_CASE)
+ if (n == NULL) continue;
+ printf(" 0x%05" V8PRIxPTR ": (%d, \"%s\"),\n", p, t, n);
+ }
+ printf("}\n");
+#undef STRUCT_LIST_CASE
+#undef ROOT_LIST_CASE
+
+ // Dump the KNOWN_OBJECTS table to the console.
+ printf("\n# List of known V8 objects.\n");
+#define ROOT_LIST_CASE(type, name, camel_name) \
+ if (n == NULL && o == heap->name()) n = #camel_name;
+ i::OldSpaces spit(heap);
+ printf("KNOWN_OBJECTS = {\n");
+ for (i::PagedSpace* s = spit.next(); s != NULL; s = spit.next()) {
+ i::HeapObjectIterator it(s);
+ const char* sname = AllocationSpaceName(s->identity());
+ for (i::Object* o = it.Next(); o != NULL; o = it.Next()) {
+ const char* n = NULL;
+ intptr_t p = reinterpret_cast<intptr_t>(o) & 0xfffff;
+ ROOT_LIST(ROOT_LIST_CASE)
+ if (n == NULL) continue;
+ printf(" (\"%s\", 0x%05" V8PRIxPTR "): \"%s\",\n", sname, p, n);
+ }
+ }
+ printf("}\n");
+#undef ROOT_LIST_CASE
+}
+#endif // V8_SHARED
+
+
class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
@@ -1584,8 +1647,6 @@ int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICU();
#ifndef V8_SHARED
- i::FLAG_harmony_array_buffer = true;
- i::FLAG_harmony_typed_arrays = true;
i::FLAG_trace_hydrogen_file = "hydrogen.cfg";
#else
SetStandaloneFlagsViaCommandLine();
@@ -1603,6 +1664,13 @@ int Shell::Main(int argc, char* argv[]) {
PerIsolateData data(isolate);
InitializeDebugger(isolate);
+#ifndef V8_SHARED
+ if (options.dump_heap_constants) {
+ DumpHeapConstants(reinterpret_cast<i::Isolate*>(isolate));
+ return 0;
+ }
+#endif
+
if (options.stress_opt || options.stress_deopt) {
Testing::SetStressRunType(options.stress_opt
? Testing::kStressTypeOpt
diff --git a/chromium/v8/src/d8.h b/chromium/v8/src/d8.h
index 4f04342cf4d..1ae1bcfe6e7 100644
--- a/chromium/v8/src/d8.h
+++ b/chromium/v8/src/d8.h
@@ -140,8 +140,8 @@ class SourceGroup {
public:
SourceGroup() :
#ifndef V8_SHARED
- next_semaphore_(v8::internal::OS::CreateSemaphore(0)),
- done_semaphore_(v8::internal::OS::CreateSemaphore(0)),
+ next_semaphore_(0),
+ done_semaphore_(0),
thread_(NULL),
#endif // V8_SHARED
argv_(NULL),
@@ -180,8 +180,8 @@ class SourceGroup {
static i::Thread::Options GetThreadOptions();
void ExecuteInThread();
- i::Semaphore* next_semaphore_;
- i::Semaphore* done_semaphore_;
+ i::Semaphore next_semaphore_;
+ i::Semaphore done_semaphore_;
i::Thread* thread_;
#endif // V8_SHARED
@@ -231,6 +231,7 @@ class ShellOptions {
stress_deopt(false),
interactive_shell(false),
test_shell(false),
+ dump_heap_constants(false),
num_isolates(1),
isolate_sources(NULL) { }
@@ -254,6 +255,7 @@ class ShellOptions {
bool stress_deopt;
bool interactive_shell;
bool test_shell;
+ bool dump_heap_constants;
int num_isolates;
SourceGroup* isolate_sources;
};
@@ -388,7 +390,7 @@ class Shell : public i::AllStatic {
static CounterCollection local_counters_;
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
- static i::Mutex* context_mutex_;
+ static i::Mutex context_mutex_;
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript(Isolate* isolate);
@@ -398,8 +400,8 @@ class Shell : public i::AllStatic {
static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
- static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
- static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
+ static Handle<FunctionTemplate> CreateArrayBufferTemplate(FunctionCallback);
+ static Handle<FunctionTemplate> CreateArrayTemplate(FunctionCallback);
static Handle<Value> CreateExternalArrayBuffer(Isolate* isolate,
Handle<Object> buffer,
int32_t size);
diff --git a/chromium/v8/src/debug-agent.cc b/chromium/v8/src/debug-agent.cc
index 811c00e0cc6..51823aaf24c 100644
--- a/chromium/v8/src/debug-agent.cc
+++ b/chromium/v8/src/debug-agent.cc
@@ -25,12 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifdef ENABLE_DEBUGGER_SUPPORT
#include "v8.h"
#include "debug.h"
#include "debug-agent.h"
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
+#include "platform/socket.h"
namespace v8 {
namespace internal {
@@ -38,16 +38,36 @@ namespace internal {
// Public V8 debugger API message handler function. This function just delegates
// to the debugger agent through it's data parameter.
void DebuggerAgentMessageHandler(const v8::Debug::Message& message) {
- DebuggerAgent* agent = Isolate::Current()->debugger_agent_instance();
+ Isolate* isolate = reinterpret_cast<Isolate*>(message.GetIsolate());
+ DebuggerAgent* agent = isolate->debugger_agent_instance();
ASSERT(agent != NULL);
agent->DebuggerMessage(message);
}
+DebuggerAgent::DebuggerAgent(Isolate* isolate, const char* name, int port)
+ : Thread(name),
+ isolate_(isolate),
+ name_(StrDup(name)),
+ port_(port),
+ server_(new Socket),
+ terminate_(false),
+ session_(NULL),
+ terminate_now_(0),
+ listening_(0) {
+ ASSERT(isolate_->debugger_agent_instance() == NULL);
+ isolate_->set_debugger_agent_instance(this);
+}
+
+
+DebuggerAgent::~DebuggerAgent() {
+ isolate_->set_debugger_agent_instance(NULL);
+ delete server_;
+}
+
+
// Debugger agent main thread.
void DebuggerAgent::Run() {
- const int kOneSecondInMicros = 1000000;
-
// Allow this socket to reuse port even if still in TIME_WAIT.
server_->SetReuseAddress(true);
@@ -60,16 +80,20 @@ void DebuggerAgent::Run() {
// would be that the port is already in use so this avoids a busy loop and
// make the agent take over the port when it becomes free.
if (!bound) {
+ const TimeDelta kTimeout = TimeDelta::FromSeconds(1);
PrintF("Failed to open socket on port %d, "
- "waiting %d ms before retrying\n", port_, kOneSecondInMicros / 1000);
- terminate_now_->Wait(kOneSecondInMicros);
+ "waiting %d ms before retrying\n", port_,
+ static_cast<int>(kTimeout.InMilliseconds()));
+ if (!terminate_now_.WaitFor(kTimeout)) {
+ if (terminate_) return;
+ }
}
}
// Accept connections on the bound port.
while (!terminate_) {
bool ok = server_->Listen(1);
- listening_->Signal();
+ listening_.Signal();
if (ok) {
// Accept the new connection.
Socket* client = server_->Accept();
@@ -89,7 +113,7 @@ void DebuggerAgent::Shutdown() {
// Signal termination and make the server exit either its listen call or its
// binding loop. This makes sure that no new sessions can be established.
- terminate_now_->Signal();
+ terminate_now_.Signal();
server_->Shutdown();
Join();
@@ -99,19 +123,21 @@ void DebuggerAgent::Shutdown() {
void DebuggerAgent::WaitUntilListening() {
- listening_->Wait();
+ listening_.Wait();
}
static const char* kCreateSessionMessage =
"Remote debugging session already active\r\n";
void DebuggerAgent::CreateSession(Socket* client) {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// If another session is already established terminate this one.
if (session_ != NULL) {
- client->Send(kCreateSessionMessage, StrLength(kCreateSessionMessage));
+ int len = StrLength(kCreateSessionMessage);
+ int res = client->Send(kCreateSessionMessage, len);
delete client;
+ USE(res);
return;
}
@@ -123,7 +149,7 @@ void DebuggerAgent::CreateSession(Socket* client) {
void DebuggerAgent::CloseSession() {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// Terminate the session.
if (session_ != NULL) {
@@ -136,7 +162,7 @@ void DebuggerAgent::CloseSession() {
void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// Forward the message handling to the session.
if (session_ != NULL) {
@@ -154,7 +180,7 @@ void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
}
// Terminate the session.
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
ASSERT(session == session_);
if (session == session_) {
session_->Shutdown();
@@ -226,7 +252,7 @@ void DebuggerAgentSession::Shutdown() {
const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
-SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
+SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(Socket* conn) {
int received;
// Read header.
@@ -243,7 +269,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
prev_c = c;
received = conn->Receive(&c, 1);
if (received == 0) {
- PrintF("Error %d\n", Socket::LastError());
+ PrintF("Error %d\n", Socket::GetLastError());
return SmartArrayPointer<char>();
}
@@ -305,7 +331,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
char* buffer = NewArray<char>(content_length + 1);
received = ReceiveAll(conn, buffer, content_length);
if (received < content_length) {
- PrintF("Error %d\n", Socket::LastError());
+ PrintF("Error %d\n", Socket::GetLastError());
return SmartArrayPointer<char>();
}
buffer[content_length] = '\0';
@@ -314,7 +340,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
}
-bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
+bool DebuggerAgentUtil::SendConnectMessage(Socket* conn,
const char* embedding_host) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer.
@@ -360,7 +386,7 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
}
-bool DebuggerAgentUtil::SendMessage(const Socket* conn,
+bool DebuggerAgentUtil::SendMessage(Socket* conn,
const Vector<uint16_t> message) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer both for header and body.
@@ -375,14 +401,17 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
}
// Send the header.
- int len;
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "%s: %d\r\n", kContentLength, utf8_len);
- conn->Send(buffer, len);
+ int len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+ "%s: %d\r\n", kContentLength, utf8_len);
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
// Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- conn->Send(buffer, len);
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
// Send message body as UTF-8.
int buffer_position = 0; // Current buffer position.
@@ -402,13 +431,19 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
const int kEncodedSurrogateLength =
unibrow::Utf16::kUtf8BytesToCodeASurrogate;
ASSERT(buffer_position >= kEncodedSurrogateLength);
- conn->Send(buffer, buffer_position - kEncodedSurrogateLength);
+ len = buffer_position - kEncodedSurrogateLength;
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
for (int i = 0; i < kEncodedSurrogateLength; i++) {
buffer[i] = buffer[buffer_position + i];
}
buffer_position = kEncodedSurrogateLength;
} else {
- conn->Send(buffer, buffer_position);
+ len = buffer_position;
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
buffer_position = 0;
}
}
@@ -419,7 +454,7 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
}
-bool DebuggerAgentUtil::SendMessage(const Socket* conn,
+bool DebuggerAgentUtil::SendMessage(Socket* conn,
const v8::Handle<v8::String> request) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer both for header and body.
@@ -428,24 +463,30 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
v8::String::Utf8Value utf8_request(request);
// Send the header.
- int len;
- len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
- "Content-Length: %d\r\n", utf8_request.length());
- conn->Send(buffer, len);
+ int len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
+ "Content-Length: %d\r\n", utf8_request.length());
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
// Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
- conn->Send(buffer, len);
+ if (conn->Send(buffer, len) < len) {
+ return false;
+ }
// Send message body as UTF-8.
- conn->Send(*utf8_request, utf8_request.length());
+ len = utf8_request.length();
+ if (conn->Send(*utf8_request, len) < len) {
+ return false;
+ }
return true;
}
// Receive the full buffer before returning unless an error occours.
-int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
+int DebuggerAgentUtil::ReceiveAll(Socket* conn, char* data, int len) {
int total_received = 0;
while (total_received < len) {
int received = conn->Receive(data + total_received, len - total_received);
diff --git a/chromium/v8/src/debug-agent.h b/chromium/v8/src/debug-agent.h
index 61151900f01..138e51acc60 100644
--- a/chromium/v8/src/debug-agent.h
+++ b/chromium/v8/src/debug-agent.h
@@ -37,27 +37,15 @@ namespace internal {
// Forward decelrations.
class DebuggerAgentSession;
+class Socket;
// Debugger agent which starts a socket listener on the debugger port and
// handles connection from a remote debugger.
class DebuggerAgent: public Thread {
public:
- DebuggerAgent(const char* name, int port)
- : Thread(name),
- isolate_(Isolate::Current()),
- name_(StrDup(name)), port_(port),
- server_(OS::CreateSocket()), terminate_(false),
- session_access_(OS::CreateMutex()), session_(NULL),
- terminate_now_(OS::CreateSemaphore(0)),
- listening_(OS::CreateSemaphore(0)) {
- ASSERT(isolate_->debugger_agent_instance() == NULL);
- isolate_->set_debugger_agent_instance(this);
- }
- ~DebuggerAgent() {
- isolate_->set_debugger_agent_instance(NULL);
- delete server_;
- }
+ DebuggerAgent(Isolate* isolate, const char* name, int port);
+ ~DebuggerAgent();
void Shutdown();
void WaitUntilListening();
@@ -76,10 +64,10 @@ class DebuggerAgent: public Thread {
int port_; // Port to use for the agent.
Socket* server_; // Server socket for listen/accept.
bool terminate_; // Termination flag.
- Mutex* session_access_; // Mutex guarging access to session_.
+ RecursiveMutex session_access_; // Mutex guarding access to session_.
DebuggerAgentSession* session_; // Current active session if any.
- Semaphore* terminate_now_; // Semaphore to signal termination.
- Semaphore* listening_;
+ Semaphore terminate_now_; // Semaphore to signal termination.
+ Semaphore listening_;
friend class DebuggerAgentSession;
friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message);
@@ -116,13 +104,11 @@ class DebuggerAgentUtil {
public:
static const char* const kContentLength;
- static SmartArrayPointer<char> ReceiveMessage(const Socket* conn);
- static bool SendConnectMessage(const Socket* conn,
- const char* embedding_host);
- static bool SendMessage(const Socket* conn, const Vector<uint16_t> message);
- static bool SendMessage(const Socket* conn,
- const v8::Handle<v8::String> message);
- static int ReceiveAll(const Socket* conn, char* data, int len);
+ static SmartArrayPointer<char> ReceiveMessage(Socket* conn);
+ static bool SendConnectMessage(Socket* conn, const char* embedding_host);
+ static bool SendMessage(Socket* conn, const Vector<uint16_t> message);
+ static bool SendMessage(Socket* conn, const v8::Handle<v8::String> message);
+ static int ReceiveAll(Socket* conn, char* data, int len);
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/debug-debugger.js b/chromium/v8/src/debug-debugger.js
index a588b4c21d7..19209d4b95d 100644
--- a/chromium/v8/src/debug-debugger.js
+++ b/chromium/v8/src/debug-debugger.js
@@ -957,12 +957,17 @@ function ExecutionState(break_id) {
this.selected_frame = 0;
}
-ExecutionState.prototype.prepareStep = function(opt_action, opt_count) {
+ExecutionState.prototype.prepareStep = function(opt_action, opt_count,
+ opt_callframe) {
var action = Debug.StepAction.StepIn;
if (!IS_UNDEFINED(opt_action)) action = %ToNumber(opt_action);
var count = opt_count ? %ToNumber(opt_count) : 1;
+ var callFrameId = 0;
+ if (!IS_UNDEFINED(opt_callframe)) {
+ callFrameId = opt_callframe.details_.frameId();
+ }
- return %PrepareStep(this.break_id, action, count);
+ return %PrepareStep(this.break_id, action, count, callFrameId);
};
ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
diff --git a/chromium/v8/src/debug.cc b/chromium/v8/src/debug.cc
index a0b9884410f..0496b8cb007 100644
--- a/chromium/v8/src/debug.cc
+++ b/chromium/v8/src/debug.cc
@@ -86,8 +86,9 @@ static void PrintLn(v8::Local<v8::Value> value) {
}
-static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
- Isolate* isolate = Isolate::Current();
+static Handle<Code> ComputeCallDebugPrepareStepIn(Isolate* isolate,
+ int argc,
+ Code::Kind kind) {
return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
}
@@ -159,7 +160,6 @@ void BreakLocationIterator::Next() {
Code* code = Code::GetCodeFromTargetAddress(target);
if ((code->is_inline_cache_stub() &&
!code->is_binary_op_stub() &&
- !code->is_unary_op_stub() &&
!code->is_compare_ic_stub() &&
!code->is_to_boolean_ic_stub()) ||
RelocInfo::IsConstructCall(rmode())) {
@@ -404,12 +404,15 @@ void BreakLocationIterator::ClearDebugBreak() {
bool BreakLocationIterator::IsStepInLocation(Isolate* isolate) {
- if (RelocInfo::IsConstructCall(rmode())) {
+ if (RelocInfo::IsConstructCall(original_rmode())) {
return true;
} else if (RelocInfo::IsCodeTarget(rmode())) {
HandleScope scope(debug_info_->GetIsolate());
- Address target = rinfo()->target_address();
+ Address target = original_rinfo()->target_address();
Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
+ if (target_code->kind() == Code::STUB) {
+ return target_code->major_key() == CodeStub::CallFunction;
+ }
return target_code->is_call_stub() || target_code->is_keyed_call_stub();
} else {
return false;
@@ -431,7 +434,7 @@ void BreakLocationIterator::PrepareStepIn(Isolate* isolate) {
// the call in the original code as it is the code there that will be
// executed in place of the debug break call.
Handle<Code> stub = ComputeCallDebugPrepareStepIn(
- target_code->arguments_count(), target_code->kind());
+ isolate, target_code->arguments_count(), target_code->kind());
if (IsDebugBreak()) {
original_rinfo()->set_target_address(stub->entry());
} else {
@@ -631,7 +634,7 @@ const int Debug::kFrameDropperFrameSize = 4;
void ScriptCache::Add(Handle<Script> script) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = isolate_->global_handles();
// Create an entry in the hash map for the script.
int id = script->id()->value();
HashMap::Entry* entry =
@@ -653,7 +656,7 @@ void ScriptCache::Add(Handle<Script> script) {
Handle<FixedArray> ScriptCache::GetScripts() {
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = isolate_->factory();
Handle<FixedArray> instances = factory->NewFixedArray(occupancy());
int count = 0;
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
@@ -668,7 +671,7 @@ Handle<FixedArray> ScriptCache::GetScripts() {
void ScriptCache::ProcessCollectedScripts() {
- Debugger* debugger = Isolate::Current()->debugger();
+ Debugger* debugger = isolate_->debugger();
for (int i = 0; i < collected_scripts_.length(); i++) {
debugger->OnScriptCollected(collected_scripts_[i]);
}
@@ -677,7 +680,7 @@ void ScriptCache::ProcessCollectedScripts() {
void ScriptCache::Clear() {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = isolate_->global_handles();
// Iterate the script cache to get rid of all the weak handles.
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry != NULL);
@@ -706,7 +709,7 @@ void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
script_cache->collected_scripts_.Add(id);
// Clear the weak handle.
- obj->Dispose(isolate);
+ obj->Dispose();
}
@@ -748,7 +751,7 @@ void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
// Globalize the request debug info object and make it weak.
debug_info_ = Handle<DebugInfo>::cast(
(global_handles->Create(debug_info)));
@@ -759,13 +762,12 @@ DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
DebugInfoListNode::~DebugInfoListNode() {
- Isolate::Current()->global_handles()->Destroy(
+ debug_info_->GetIsolate()->global_handles()->Destroy(
reinterpret_cast<Object**>(debug_info_.location()));
}
-bool Debug::CompileDebuggerScript(int index) {
- Isolate* isolate = Isolate::Current();
+bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
@@ -822,7 +824,7 @@ bool Debug::CompileDebuggerScript(int index) {
ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) {
isolate->set_pending_exception(*exception);
- MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
+ MessageHandler::ReportMessage(isolate, NULL, message);
isolate->clear_pending_exception();
}
return false;
@@ -850,7 +852,7 @@ bool Debug::Load() {
// Disable breakpoints and interrupts while compiling and running the
// debugger scripts including the context creation code.
- DisableBreak disable(true);
+ DisableBreak disable(isolate_, true);
PostponeInterruptsScope postpone(isolate_);
// Create the debugger context.
@@ -884,12 +886,12 @@ bool Debug::Load() {
// Compile the JavaScript for the debugger in the debugger context.
debugger->set_compiling_natives(true);
bool caught_exception =
- !CompileDebuggerScript(Natives::GetIndex("mirror")) ||
- !CompileDebuggerScript(Natives::GetIndex("debug"));
+ !CompileDebuggerScript(isolate_, Natives::GetIndex("mirror")) ||
+ !CompileDebuggerScript(isolate_, Natives::GetIndex("debug"));
if (FLAG_enable_liveedit) {
caught_exception = caught_exception ||
- !CompileDebuggerScript(Natives::GetIndex("liveedit"));
+ !CompileDebuggerScript(isolate_, Natives::GetIndex("liveedit"));
}
debugger->set_compiling_natives(false);
@@ -956,7 +958,7 @@ Object* Debug::Break(Arguments args) {
}
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) {
return heap->undefined_value();
}
@@ -1015,7 +1017,7 @@ Object* Debug::Break(Arguments args) {
// Clear queue
thread_local_.queued_step_count_ = 0;
- PrepareStep(StepNext, step_count);
+ PrepareStep(StepNext, step_count, StackFrame::NO_ID);
} else {
// Notify the debug event listeners.
isolate_->debugger()->OnDebugBreak(break_points_hit, false);
@@ -1053,7 +1055,7 @@ Object* Debug::Break(Arguments args) {
ClearStepping();
// Set up for the remaining steps.
- PrepareStep(step_action, step_count);
+ PrepareStep(step_action, step_count, StackFrame::NO_ID);
}
if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
@@ -1374,7 +1376,9 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
-void Debug::PrepareStep(StepAction step_action, int step_count) {
+void Debug::PrepareStep(StepAction step_action,
+ int step_count,
+ StackFrame::Id frame_id) {
HandleScope scope(isolate_);
PrepareForBreakPoints();
@@ -1400,6 +1404,9 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// If there is no JavaScript stack don't do anything.
return;
}
+ if (frame_id != StackFrame::NO_ID) {
+ id = frame_id;
+ }
JavaScriptFrameIterator frames_it(isolate_, id);
JavaScriptFrame* frame = frames_it.frame();
@@ -1625,7 +1632,7 @@ bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
// object.
bool Debug::IsDebugBreak(Address addr) {
Code* code = Code::GetCodeFromTargetAddress(addr);
- return code->is_debug_break();
+ return code->is_debug_stub() && code->extra_ic_state() == DEBUG_BREAK;
}
@@ -1647,7 +1654,7 @@ bool Debug::IsBreakStub(Code* code) {
// Find the builtin to use for invoking the debug break
Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = code->GetIsolate();
// Find the builtin debug break function matching the calling convention
// used by the call site.
@@ -1702,7 +1709,7 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
Handle<Object> Debug::GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared,
BreakPositionAlignment position_alignment) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = shared->GetIsolate();
Heap* heap = isolate->heap();
if (!HasDebugInfo(shared)) {
return Handle<Object>(heap->undefined_value(), isolate);
@@ -1881,7 +1888,7 @@ static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
// Use compile lazy which will end up compiling the full code in the
// configuration configured above.
bool result = Compiler::CompileLazy(&info);
- ASSERT(result != Isolate::Current()->has_pending_exception());
+ ASSERT(result != info.isolate()->has_pending_exception());
info.isolate()->clear_pending_exception();
#if DEBUG
if (result) {
@@ -2045,6 +2052,10 @@ void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
+ if (FLAG_concurrent_recompilation) {
+ isolate_->optimizing_compiler_thread()->Flush();
+ }
+
Deoptimizer::DeoptimizeAll(isolate_);
Handle<Code> lazy_compile =
@@ -2099,10 +2110,9 @@ void Debug::PrepareForBreakPoints() {
function->set_code(*lazy_compile);
function->shared()->set_code(*lazy_compile);
} else if (kind == Code::BUILTIN &&
- (function->IsMarkedForInstallingRecompiledCode() ||
- function->IsInRecompileQueue() ||
+ (function->IsInRecompileQueue() ||
function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForParallelRecompilation())) {
+ function->IsMarkedForConcurrentRecompilation())) {
// Abort in-flight compilation.
Code* shared_code = function->shared()->code();
if (shared_code->kind() == Code::FUNCTION &&
@@ -2531,7 +2541,7 @@ void Debug::CreateScriptCache() {
"Debug::CreateScriptCache");
ASSERT(script_cache_ == NULL);
- script_cache_ = new ScriptCache();
+ script_cache_ = new ScriptCache(isolate_);
// Scan heap for Script objects.
int count = 0;
@@ -2606,24 +2616,18 @@ Debugger::Debugger(Isolate* isolate)
message_handler_(NULL),
debugger_unload_pending_(false),
host_dispatch_handler_(NULL),
- dispatch_handler_access_(OS::CreateMutex()),
debug_message_dispatch_handler_(NULL),
message_dispatch_helper_thread_(NULL),
- host_dispatch_micros_(100 * 1000),
+ host_dispatch_period_(TimeDelta::FromMilliseconds(100)),
agent_(NULL),
command_queue_(isolate->logger(), kQueueInitialSize),
- command_received_(OS::CreateSemaphore(0)),
+ command_received_(0),
event_command_queue_(isolate->logger(), kQueueInitialSize),
isolate_(isolate) {
}
-Debugger::~Debugger() {
- delete dispatch_handler_access_;
- dispatch_handler_access_ = 0;
- delete command_received_;
- command_received_ = 0;
-}
+Debugger::~Debugger() {}
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
@@ -2754,7 +2758,7 @@ void Debugger::OnException(Handle<Object> exception, bool uncaught) {
}
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// Clear all current stepping setup.
@@ -2820,7 +2824,7 @@ void Debugger::OnBeforeCompile(Handle<Script> script) {
if (!EventActive(v8::BeforeCompile)) return;
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// Create the event data object.
@@ -2857,7 +2861,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
bool in_debugger = debug->InDebugger();
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// If debugging there might be script break points registered for this
@@ -2885,7 +2889,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
bool caught_exception;
Handle<Object> argv[] = { wrapper };
Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
- Isolate::Current()->js_builtins_object(),
+ isolate_->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
@@ -2920,7 +2924,7 @@ void Debugger::OnScriptCollected(int id) {
if (!Debugger::EventActive(v8::ScriptCollected)) return;
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) return;
// Create the script collected state object.
@@ -3037,7 +3041,7 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<Context> Debugger::GetDebugContext() {
never_unload_debugger_ = true;
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
return isolate_->debug()->debug_context();
}
@@ -3146,14 +3150,14 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
// Wait for new command in the queue.
if (Debugger::host_dispatch_handler_) {
// In case there is a host dispatch - do periodic dispatches.
- if (!command_received_->Wait(host_dispatch_micros_)) {
+ if (!command_received_.WaitFor(host_dispatch_period_)) {
// Timout expired, do the dispatch.
Debugger::host_dispatch_handler_();
continue;
}
} else {
// In case there is no host dispatch - just wait.
- command_received_->Wait();
+ command_received_.Wait();
}
// Get the command from the queue.
@@ -3266,7 +3270,7 @@ void Debugger::SetEventListener(Handle<Object> callback,
void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
message_handler_ = handler;
ListenersChanged();
@@ -3295,15 +3299,15 @@ void Debugger::ListenersChanged() {
void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- int period) {
+ TimeDelta period) {
host_dispatch_handler_ = handler;
- host_dispatch_micros_ = period * 1000;
+ host_dispatch_period_ = period;
}
void Debugger::SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
debug_message_dispatch_handler_ = handler;
if (provide_locker && message_dispatch_helper_thread_ == NULL) {
@@ -3316,7 +3320,7 @@ void Debugger::SetDebugMessageDispatchHandler(
// Calls the registered debug message handler. This callback is part of the
// public API.
void Debugger::InvokeMessageHandler(MessageImpl message) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
if (message_handler_ != NULL) {
message_handler_(message);
@@ -3337,7 +3341,7 @@ void Debugger::ProcessCommand(Vector<const uint16_t> command,
client_data);
isolate_->logger()->DebugTag("Put command on command_queue.");
command_queue_.Put(message);
- command_received_->Signal();
+ command_received_.Signal();
// Set the debug command break flag to have the command processed.
if (!isolate_->debug()->InDebugger()) {
@@ -3346,7 +3350,7 @@ void Debugger::ProcessCommand(Vector<const uint16_t> command,
MessageDispatchHelperThread* dispatch_thread;
{
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
dispatch_thread = message_dispatch_helper_thread_;
}
@@ -3375,7 +3379,7 @@ void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
bool Debugger::IsDebuggerActive() {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
return message_handler_ != NULL ||
!event_listener_.is_null() ||
@@ -3390,7 +3394,7 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
Debugger::never_unload_debugger_ = true;
// Enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate_);
if (debugger.FailedToEnter()) {
return isolate_->factory()->undefined_value();
}
@@ -3404,6 +3408,7 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
Handle<Object> argv[] = { exec_state, data };
Handle<Object> result = Execution::Call(
+ isolate_,
fun,
Handle<Object>(isolate_->debug()->debug_context_->global_proxy(),
isolate_),
@@ -3421,7 +3426,6 @@ static void StubMessageHandler2(const v8::Debug::Message& message) {
bool Debugger::StartAgent(const char* name, int port,
bool wait_for_connection) {
- ASSERT(Isolate::Current() == isolate_);
if (wait_for_connection) {
// Suspend V8 if it is already running or set V8 to suspend whenever
// it starts.
@@ -3433,20 +3437,15 @@ bool Debugger::StartAgent(const char* name, int port,
v8::Debug::DebugBreak();
}
- if (Socket::SetUp()) {
- if (agent_ == NULL) {
- agent_ = new DebuggerAgent(name, port);
- agent_->Start();
- }
- return true;
+ if (agent_ == NULL) {
+ agent_ = new DebuggerAgent(isolate_, name, port);
+ agent_->Start();
}
-
- return false;
+ return true;
}
void Debugger::StopAgent() {
- ASSERT(Isolate::Current() == isolate_);
if (agent_ != NULL) {
agent_->Shutdown();
agent_->Join();
@@ -3457,7 +3456,6 @@ void Debugger::StopAgent() {
void Debugger::WaitForAgent() {
- ASSERT(Isolate::Current() == isolate_);
if (agent_ != NULL)
agent_->WaitUntilListening();
}
@@ -3466,7 +3464,7 @@ void Debugger::WaitForAgent() {
void Debugger::CallMessageDispatchHandler() {
v8::Debug::DebugMessageDispatchHandler handler;
{
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
handler = Debugger::debug_message_dispatch_handler_;
}
if (handler != NULL) {
@@ -3475,8 +3473,8 @@ void Debugger::CallMessageDispatchHandler() {
}
-EnterDebugger::EnterDebugger()
- : isolate_(Isolate::Current()),
+EnterDebugger::EnterDebugger(Isolate* isolate)
+ : isolate_(isolate),
prev_(isolate_->debug()->debugger_entry()),
it_(isolate_),
has_js_frames_(!it_.done()),
@@ -3511,7 +3509,6 @@ EnterDebugger::EnterDebugger()
EnterDebugger::~EnterDebugger() {
- ASSERT(Isolate::Current() == isolate_);
Debug* debug = isolate_->debug();
// Restore to the previous break state.
@@ -3626,6 +3623,11 @@ v8::Handle<v8::Object> MessageImpl::GetExecutionState() const {
}
+v8::Isolate* MessageImpl::GetIsolate() const {
+ return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
+}
+
+
v8::Handle<v8::Object> MessageImpl::GetEventData() const {
return v8::Utils::ToLocal(event_data_);
}
@@ -3656,7 +3658,7 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const {
v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = event_data_->GetIsolate();
v8::Handle<v8::Context> context = GetDebugEventContext(isolate);
// Isolate::context() may be NULL when "script collected" event occures.
ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
@@ -3697,7 +3699,7 @@ v8::Handle<v8::Object> EventDetailsImpl::GetEventData() const {
v8::Handle<v8::Context> EventDetailsImpl::GetEventContext() const {
- return GetDebugEventContext(Isolate::Current());
+ return GetDebugEventContext(exec_state_->GetIsolate());
}
@@ -3787,24 +3789,17 @@ void CommandMessageQueue::Expand() {
LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
- : logger_(logger), queue_(size) {
- lock_ = OS::CreateMutex();
-}
-
-
-LockingCommandMessageQueue::~LockingCommandMessageQueue() {
- delete lock_;
-}
+ : logger_(logger), queue_(size) {}
bool LockingCommandMessageQueue::IsEmpty() const {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
return queue_.IsEmpty();
}
CommandMessage LockingCommandMessageQueue::Get() {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
CommandMessage result = queue_.Get();
logger_->DebugEvent("Get", result.text());
return result;
@@ -3812,48 +3807,42 @@ CommandMessage LockingCommandMessageQueue::Get() {
void LockingCommandMessageQueue::Put(const CommandMessage& message) {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
queue_.Put(message);
logger_->DebugEvent("Put", message.text());
}
void LockingCommandMessageQueue::Clear() {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
queue_.Clear();
}
MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
: Thread("v8:MsgDispHelpr"),
- isolate_(isolate), sem_(OS::CreateSemaphore(0)),
- mutex_(OS::CreateMutex()), already_signalled_(false) {
-}
-
-
-MessageDispatchHelperThread::~MessageDispatchHelperThread() {
- delete mutex_;
- delete sem_;
+ isolate_(isolate), sem_(0),
+ already_signalled_(false) {
}
void MessageDispatchHelperThread::Schedule() {
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(&mutex_);
if (already_signalled_) {
return;
}
already_signalled_ = true;
}
- sem_->Signal();
+ sem_.Signal();
}
void MessageDispatchHelperThread::Run() {
while (true) {
- sem_->Wait();
+ sem_.Wait();
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(&mutex_);
already_signalled_ = false;
}
{
diff --git a/chromium/v8/src/debug.h b/chromium/v8/src/debug.h
index 67debc7543c..2b5f43ab495 100644
--- a/chromium/v8/src/debug.h
+++ b/chromium/v8/src/debug.h
@@ -174,7 +174,8 @@ class BreakLocationIterator {
// the cache is the script id.
class ScriptCache : private HashMap {
public:
- ScriptCache() : HashMap(ScriptMatch), collected_scripts_(10) {}
+ explicit ScriptCache(Isolate* isolate)
+ : HashMap(ScriptMatch), isolate_(isolate), collected_scripts_(10) {}
virtual ~ScriptCache() { Clear(); }
// Add script to the cache.
@@ -203,6 +204,7 @@ class ScriptCache : private HashMap {
v8::Persistent<v8::Value>* obj,
void* data);
+ Isolate* isolate_;
// List used during GC to temporarily store id's of collected scripts.
List<int> collected_scripts_;
};
@@ -259,7 +261,9 @@ class Debug {
void FloodHandlerWithOneShot();
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
- void PrepareStep(StepAction step_action, int step_count);
+ void PrepareStep(StepAction step_action,
+ int step_count,
+ StackFrame::Id frame_id);
void ClearStepping();
void ClearStepOut();
bool IsStepping() { return thread_local_.step_count_ > 0; }
@@ -532,7 +536,7 @@ class Debug {
explicit Debug(Isolate* isolate);
~Debug();
- static bool CompileDebuggerScript(int index);
+ static bool CompileDebuggerScript(Isolate* isolate, int index);
void ClearOneShot();
void ActivateStepIn(StackFrame* frame);
void ClearStepIn();
@@ -664,6 +668,7 @@ class MessageImpl: public v8::Debug::Message {
virtual v8::Handle<v8::String> GetJSON() const;
virtual v8::Handle<v8::Context> GetEventContext() const;
virtual v8::Debug::ClientData* GetClientData() const;
+ virtual v8::Isolate* GetIsolate() const;
private:
MessageImpl(bool is_event,
@@ -762,7 +767,6 @@ class MessageDispatchHelperThread;
class LockingCommandMessageQueue BASE_EMBEDDED {
public:
LockingCommandMessageQueue(Logger* logger, int size);
- ~LockingCommandMessageQueue();
bool IsEmpty() const;
CommandMessage Get();
void Put(const CommandMessage& message);
@@ -770,7 +774,7 @@ class LockingCommandMessageQueue BASE_EMBEDDED {
private:
Logger* logger_;
CommandMessageQueue queue_;
- Mutex* lock_;
+ mutable Mutex mutex_;
DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
};
@@ -821,7 +825,7 @@ class Debugger {
void SetEventListener(Handle<Object> callback, Handle<Object> data);
void SetMessageHandler(v8::Debug::MessageHandler2 handler);
void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
- int period);
+ TimeDelta period);
void SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler,
bool provide_locker);
@@ -863,7 +867,7 @@ class Debugger {
friend void ForceUnloadDebugger(); // In test-debug.cc
inline bool EventActive(v8::DebugEvent event) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> lock_guard(debugger_access_);
// Check whether the message handler was been cleared.
if (debugger_unload_pending_) {
@@ -918,7 +922,7 @@ class Debugger {
Handle<Object> event_data);
void ListenersChanged();
- Mutex* debugger_access_; // Mutex guarding debugger variables.
+ RecursiveMutex* debugger_access_; // Mutex guarding debugger variables.
Handle<Object> event_listener_; // Global handle to listener.
Handle<Object> event_listener_data_;
bool compiling_natives_; // Are we compiling natives?
@@ -929,16 +933,16 @@ class Debugger {
v8::Debug::MessageHandler2 message_handler_;
bool debugger_unload_pending_; // Was message handler cleared?
v8::Debug::HostDispatchHandler host_dispatch_handler_;
- Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
+ Mutex dispatch_handler_access_; // Mutex guarding dispatch handler.
v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
MessageDispatchHelperThread* message_dispatch_helper_thread_;
- int host_dispatch_micros_;
+ TimeDelta host_dispatch_period_;
DebuggerAgent* agent_;
static const int kQueueInitialSize = 4;
LockingCommandMessageQueue command_queue_;
- Semaphore* command_received_; // Signaled for each command received.
+ Semaphore command_received_; // Signaled for each command received.
LockingCommandMessageQueue event_command_queue_;
Isolate* isolate_;
@@ -956,7 +960,7 @@ class Debugger {
// some reason could not be entered FailedToEnter will return true.
class EnterDebugger BASE_EMBEDDED {
public:
- EnterDebugger();
+ explicit EnterDebugger(Isolate* isolate);
~EnterDebugger();
// Check whether the debugger could be entered.
@@ -983,12 +987,12 @@ class EnterDebugger BASE_EMBEDDED {
// Stack allocated class for disabling break.
class DisableBreak BASE_EMBEDDED {
public:
- explicit DisableBreak(bool disable_break) : isolate_(Isolate::Current()) {
+ explicit DisableBreak(Isolate* isolate, bool disable_break)
+ : isolate_(isolate) {
prev_disable_break_ = isolate_->debug()->disable_break();
isolate_->debug()->set_disable_break(disable_break);
}
~DisableBreak() {
- ASSERT(Isolate::Current() == isolate_);
isolate_->debug()->set_disable_break(prev_disable_break_);
}
@@ -1047,7 +1051,7 @@ class Debug_Address {
class MessageDispatchHelperThread: public Thread {
public:
explicit MessageDispatchHelperThread(Isolate* isolate);
- ~MessageDispatchHelperThread();
+ ~MessageDispatchHelperThread() {}
void Schedule();
@@ -1055,8 +1059,8 @@ class MessageDispatchHelperThread: public Thread {
void Run();
Isolate* isolate_;
- Semaphore* const sem_;
- Mutex* const mutex_;
+ Semaphore sem_;
+ Mutex mutex_;
bool already_signalled_;
DISALLOW_COPY_AND_ASSIGN(MessageDispatchHelperThread);
diff --git a/chromium/v8/src/deoptimizer.cc b/chromium/v8/src/deoptimizer.cc
index 53b9b76377f..c979a534d89 100644
--- a/chromium/v8/src/deoptimizer.cc
+++ b/chromium/v8/src/deoptimizer.cc
@@ -56,11 +56,10 @@ static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
: allocator_(allocator),
- current_(NULL),
#ifdef ENABLE_DEBUGGER_SUPPORT
deoptimized_frame_info_(NULL),
#endif
- deoptimizing_code_list_(NULL) {
+ current_(NULL) {
for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
deopt_entry_code_entries_[i] = -1;
deopt_entry_code_[i] = AllocateCodeChunk(allocator);
@@ -73,14 +72,6 @@ DeoptimizerData::~DeoptimizerData() {
allocator_->Free(deopt_entry_code_[i]);
deopt_entry_code_[i] = NULL;
}
-
- DeoptimizingCodeListNode* current = deoptimizing_code_list_;
- while (current != NULL) {
- DeoptimizingCodeListNode* prev = current;
- current = current->next();
- delete prev;
- }
- deoptimizing_code_list_ = NULL;
}
@@ -93,33 +84,19 @@ void DeoptimizerData::Iterate(ObjectVisitor* v) {
#endif
-Code* DeoptimizerData::FindDeoptimizingCode(Address addr) {
- for (DeoptimizingCodeListNode* node = deoptimizing_code_list_;
- node != NULL;
- node = node->next()) {
- if (node->code()->contains(addr)) return *node->code();
- }
- return NULL;
-}
-
-
-void DeoptimizerData::RemoveDeoptimizingCode(Code* code) {
- for (DeoptimizingCodeListNode *prev = NULL, *cur = deoptimizing_code_list_;
- cur != NULL;
- prev = cur, cur = cur->next()) {
- if (*cur->code() == code) {
- if (prev == NULL) {
- deoptimizing_code_list_ = cur->next();
- } else {
- prev->set_next(cur->next());
- }
- delete cur;
- return;
+Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
+ if (function_->IsHeapObject()) {
+ // Search all deoptimizing code in the native context of the function.
+ Context* native_context = function_->context()->native_context();
+ Object* element = native_context->DeoptimizedCodeListHead();
+ while (!element->IsUndefined()) {
+ Code* code = Code::cast(element);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ if (code->contains(addr)) return code;
+ element = code->next_code_link();
}
}
- // Deoptimizing code is removed through weak callback. Each object is expected
- // to be removed once and only once.
- UNREACHABLE();
+ return NULL;
}
@@ -289,27 +266,42 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
- Isolate* isolate = context->GetIsolate();
- Zone zone(isolate);
DisallowHeapAllocation no_allocation;
ASSERT(context->IsNativeContext());
visitor->EnterContext(context);
- // Create a snapshot of the optimized functions list. This is needed because
- // visitors might remove more than one link from the list at once.
- ZoneList<JSFunction*> snapshot(1, &zone);
+ // Visit the list of optimized functions, removing elements that
+ // no longer refer to optimized code.
+ JSFunction* prev = NULL;
Object* element = context->OptimizedFunctionsListHead();
while (!element->IsUndefined()) {
- JSFunction* element_function = JSFunction::cast(element);
- snapshot.Add(element_function, &zone);
- element = element_function->next_function_link();
- }
-
- // Run through the snapshot of optimized functions and visit them.
- for (int i = 0; i < snapshot.length(); ++i) {
- visitor->VisitFunction(snapshot.at(i));
+ JSFunction* function = JSFunction::cast(element);
+ Object* next = function->next_function_link();
+ if (function->code()->kind() != Code::OPTIMIZED_FUNCTION ||
+ (visitor->VisitFunction(function),
+ function->code()->kind() != Code::OPTIMIZED_FUNCTION)) {
+ // The function no longer refers to optimized code, or the visitor
+ // changed the code to which it refers to no longer be optimized code.
+ // Remove the function from this list.
+ if (prev != NULL) {
+ prev->set_next_function_link(next);
+ } else {
+ context->SetOptimizedFunctionsListHead(next);
+ }
+ // The visitor should not alter the link directly.
+ ASSERT(function->next_function_link() == next);
+ // Set the next function link to undefined to indicate it is no longer
+ // in the optimized functions list.
+ function->set_next_function_link(context->GetHeap()->undefined_value());
+ } else {
+ // The visitor should not alter the link directly.
+ ASSERT(function->next_function_link() == next);
+ // preserve this element.
+ prev = function;
+ }
+ element = next;
}
visitor->LeaveContext(context);
@@ -321,7 +313,7 @@ void Deoptimizer::VisitAllOptimizedFunctions(
OptimizedFunctionVisitor* visitor) {
DisallowHeapAllocation no_allocation;
- // Run through the list of all native contexts and deoptimize.
+ // Run through the list of all native contexts.
Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined()) {
VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
@@ -330,217 +322,161 @@ void Deoptimizer::VisitAllOptimizedFunctions(
}
-// Removes the functions selected by the given filter from the optimized
-// function list of the given context and adds their code to the list of
-// code objects to be deoptimized.
-static void SelectCodeToDeoptimize(Context* context,
- OptimizedFunctionFilter* filter,
- ZoneList<Code*>* codes,
- Zone* zone,
- Object* undefined) {
+// Unlink functions referring to code marked for deoptimization, then move
+// marked code from the optimized code list to the deoptimized code list,
+// and patch code for lazy deopt.
+void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
DisallowHeapAllocation no_allocation;
- Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
- Object* remainder_head = undefined;
- Object* remainder_tail = undefined;
-
- // TODO(titzer): rewrite to not modify unselected functions.
- while (current != undefined) {
- JSFunction* function = JSFunction::cast(current);
- current = function->next_function_link();
- if (filter->TakeFunction(function)) {
- // Extract this function from the context's list and remember the code.
+
+ // A "closure" that unlinks optimized code that is going to be
+ // deoptimized from the functions that refer to it.
+ class SelectedCodeUnlinker: public OptimizedFunctionVisitor {
+ public:
+ virtual void EnterContext(Context* context) { } // Don't care.
+ virtual void LeaveContext(Context* context) { } // Don't care.
+ virtual void VisitFunction(JSFunction* function) {
Code* code = function->code();
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
- if (code->marked_for_deoptimization()) {
- ASSERT(codes->Contains(code));
- } else {
- code->set_marked_for_deoptimization(true);
- codes->Add(code, zone);
- }
+ if (!code->marked_for_deoptimization()) return;
+
+ // Unlink this function and evict from optimized code map.
SharedFunctionInfo* shared = function->shared();
- // Replace the function's code with the shared code.
function->set_code(shared->code());
- // Evict the code from the optimized code map.
shared->EvictFromOptimizedCodeMap(code, "deoptimized function");
- // Remove the function from the optimized functions list.
- function->set_next_function_link(undefined);
if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
+ PrintF("[deoptimizer unlinked: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
}
- } else {
- // Don't select this function; link it back into the list.
- if (remainder_head == undefined) {
- remainder_head = function;
- } else {
- JSFunction::cast(remainder_tail)->set_next_function_link(function);
- }
- remainder_tail = function;
}
- }
- if (remainder_tail != undefined) {
- JSFunction::cast(remainder_tail)->set_next_function_link(undefined);
- }
- context->set(Context::OPTIMIZED_FUNCTIONS_LIST, remainder_head);
-}
+ };
+ // Unlink all functions that refer to marked code.
+ SelectedCodeUnlinker unlinker;
+ VisitAllOptimizedFunctionsForContext(context, &unlinker);
-class DeoptimizeAllFilter : public OptimizedFunctionFilter {
- public:
- virtual bool TakeFunction(JSFunction* function) {
- return true;
- }
-};
+ // Move marked code from the optimized code list to the deoptimized
+ // code list, collecting them into a ZoneList.
+ Isolate* isolate = context->GetHeap()->isolate();
+ Zone zone(isolate);
+ ZoneList<Code*> codes(10, &zone);
+ // Walk over all optimized code objects in this native context.
+ Code* prev = NULL;
+ Object* element = context->OptimizedCodeListHead();
+ while (!element->IsUndefined()) {
+ Code* code = Code::cast(element);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ Object* next = code->next_code_link();
+ if (code->marked_for_deoptimization()) {
+ // Put the code into the list for later patching.
+ codes.Add(code, &zone);
+
+ if (prev != NULL) {
+ // Skip this code in the optimized code list.
+ prev->set_next_code_link(next);
+ } else {
+ // There was no previous node, the next node is the new head.
+ context->SetOptimizedCodeListHead(next);
+ }
-class DeoptimizeWithMatchingCodeFilter : public OptimizedFunctionFilter {
- public:
- explicit DeoptimizeWithMatchingCodeFilter(Code* code) : code_(code) {}
- virtual bool TakeFunction(JSFunction* function) {
- return function->code() == code_;
+ // Move the code to the _deoptimized_ code list.
+ code->set_next_code_link(context->DeoptimizedCodeListHead());
+ context->SetDeoptimizedCodeListHead(code);
+ } else {
+ // Not marked; preserve this element.
+ prev = code;
+ }
+ element = next;
}
- private:
- Code* code_;
-};
+ // TODO(titzer): we need a handle scope only because of the macro assembler,
+ // which is only used in EnsureCodeForDeoptimizationEntry.
+ HandleScope scope(isolate);
+ // Now patch all the codes for deoptimization.
+ for (int i = 0; i < codes.length(); i++) {
+ // It is finally time to die, code object.
+ // Do platform-specific patching to force any activations to lazy deopt.
+ PatchCodeForDeoptimization(isolate, codes[i]);
-class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter {
- public:
- virtual bool TakeFunction(JSFunction* function) {
- return function->code()->marked_for_deoptimization();
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
}
-};
+}
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
- DisallowHeapAllocation no_allocation;
-
if (FLAG_trace_deopt) {
- PrintF("[deoptimize all contexts]\n");
+ PrintF("[deoptimize all code in all contexts]\n");
}
-
- DeoptimizeAllFilter filter;
- DeoptimizeAllFunctionsWith(isolate, &filter);
-}
-
-
-void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
DisallowHeapAllocation no_allocation;
- DeoptimizeAllFilter filter;
- if (object->IsJSGlobalProxy()) {
- Object* proto = object->GetPrototype();
- ASSERT(proto->IsJSGlobalObject());
- DeoptimizeAllFunctionsForContext(
- GlobalObject::cast(proto)->native_context(), &filter);
- } else if (object->IsGlobalObject()) {
- DeoptimizeAllFunctionsForContext(
- GlobalObject::cast(object)->native_context(), &filter);
+ // For all contexts, mark all code, then deoptimize.
+ Object* context = isolate->heap()->native_contexts_list();
+ while (!context->IsUndefined()) {
+ Context* native_context = Context::cast(context);
+ MarkAllCodeForContext(native_context);
+ DeoptimizeMarkedCodeForContext(native_context);
+ context = native_context->get(Context::NEXT_CONTEXT_LINK);
}
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- Code* code = function->code();
- if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
- DeoptimizeWithMatchingCodeFilter filter(code);
- DeoptimizeAllFunctionsForContext(
- function->context()->native_context(), &filter);
-}
-
-
-void Deoptimizer::DeoptimizeAllFunctionsForContext(
- Context* context, OptimizedFunctionFilter* filter) {
- ASSERT(context->IsNativeContext());
- Isolate* isolate = context->GetIsolate();
- Object* undefined = isolate->heap()->undefined_value();
- Zone zone(isolate);
- ZoneList<Code*> codes(4, &zone);
- SelectCodeToDeoptimize(context, filter, &codes, &zone, undefined);
- for (int i = 0; i < codes.length(); i++) {
- DeoptimizeCode(isolate, codes.at(i));
+void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
+ if (FLAG_trace_deopt) {
+ PrintF("[deoptimize marked code in all contexts]\n");
}
-}
-
-
-void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate,
- OptimizedFunctionFilter* filter) {
DisallowHeapAllocation no_allocation;
-
- // Run through the list of all native contexts and deoptimize.
+ // For all contexts, deoptimize code already marked.
Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined()) {
- DeoptimizeAllFunctionsForContext(Context::cast(context), filter);
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+ Context* native_context = Context::cast(context);
+ DeoptimizeMarkedCodeForContext(native_context);
+ context = native_context->get(Context::NEXT_CONTEXT_LINK);
}
}
-void Deoptimizer::DeoptimizeCodeList(Isolate* isolate, ZoneList<Code*>* codes) {
- if (codes->length() == 0) return; // Nothing to do.
-
- // Mark the code; any functions refering to this code will be selected.
- for (int i = 0; i < codes->length(); i++) {
- ASSERT(!codes->at(i)->marked_for_deoptimization());
- codes->at(i)->set_marked_for_deoptimization(true);
- }
-
- // For all contexts, remove optimized functions that refer to the selected
- // code from the optimized function lists.
- Object* undefined = isolate->heap()->undefined_value();
- Zone zone(isolate);
- Object* list = isolate->heap()->native_contexts_list();
- DeoptimizeMarkedCodeFilter filter;
- while (!list->IsUndefined()) {
- Context* context = Context::cast(list);
- // Note that selecting code unlinks the functions that refer to it.
- SelectCodeToDeoptimize(context, &filter, codes, &zone, undefined);
- list = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
+ if (FLAG_trace_deopt) {
+ PrintF("[deoptimize global object @ 0x%08" V8PRIxPTR "]\n",
+ reinterpret_cast<intptr_t>(object));
}
-
- // Now deoptimize all the code.
- for (int i = 0; i < codes->length(); i++) {
- DeoptimizeCode(isolate, codes->at(i));
+ if (object->IsJSGlobalProxy()) {
+ Object* proto = object->GetPrototype();
+ ASSERT(proto->IsJSGlobalObject());
+ Context* native_context = GlobalObject::cast(proto)->native_context();
+ MarkAllCodeForContext(native_context);
+ DeoptimizeMarkedCodeForContext(native_context);
+ } else if (object->IsGlobalObject()) {
+ Context* native_context = GlobalObject::cast(object)->native_context();
+ MarkAllCodeForContext(native_context);
+ DeoptimizeMarkedCodeForContext(native_context);
}
}
-void Deoptimizer::DeoptimizeCode(Isolate* isolate, Code* code) {
- HandleScope scope(isolate);
- DisallowHeapAllocation nha;
-
- // Do platform-specific patching of the optimized code.
- PatchCodeForDeoptimization(isolate, code);
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+void Deoptimizer::MarkAllCodeForContext(Context* context) {
+ Object* element = context->OptimizedCodeListHead();
+ while (!element->IsUndefined()) {
+ Code* code = Code::cast(element);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ code->set_marked_for_deoptimization(true);
+ element = code->next_code_link();
+ }
}
-void Deoptimizer::HandleWeakDeoptimizedCode(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* parameter) {
- DeoptimizingCodeListNode* node =
- reinterpret_cast<DeoptimizingCodeListNode*>(parameter);
- DeoptimizerData* data =
- reinterpret_cast<Isolate*>(isolate)->deoptimizer_data();
- data->RemoveDeoptimizingCode(*node->code());
-#ifdef DEBUG
- for (DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
- current != NULL;
- current = current->next()) {
- ASSERT(current != node);
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ Code* code = function->code();
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ // Mark the code for deoptimization and unlink any functions that also
+ // refer to that code. The code cannot be shared across native contexts,
+ // so we only need to search one.
+ code->set_marked_for_deoptimization(true);
+ DeoptimizeMarkedCodeForContext(function->context()->native_context());
}
-#endif
}
@@ -559,8 +495,6 @@ bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
return (frame_type == StackFrame::STUB)
? FLAG_trace_stub_failures
: FLAG_trace_deopt;
- case OSR:
- return FLAG_trace_osr;
}
UNREACHABLE();
return false;
@@ -573,7 +507,6 @@ const char* Deoptimizer::MessageFor(BailoutType type) {
case SOFT: return "soft";
case LAZY: return "lazy";
case DEBUGGER: return "debugger";
- case OSR: return "OSR";
}
UNREACHABLE();
return NULL;
@@ -602,6 +535,12 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
deferred_objects_double_values_(0),
deferred_objects_(0),
deferred_heap_numbers_(0),
+ jsframe_functions_(0),
+ jsframe_has_adapted_arguments_(0),
+ materialized_values_(NULL),
+ materialized_objects_(NULL),
+ materialization_value_index_(0),
+ materialization_object_index_(0),
trace_(false) {
// For COMPILED_STUBs called from builtins, the function pointer is a SMI
// indicating an internal frame.
@@ -621,6 +560,14 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
}
}
compiled_code_ = FindOptimizedCode(function, optimized_code);
+
+#if DEBUG
+ ASSERT(compiled_code_ != NULL);
+ if (type == EAGER || type == SOFT || type == LAZY) {
+ ASSERT(compiled_code_->kind() != Code::FUNCTION);
+ }
+#endif
+
StackFrame::Type frame_type = function == NULL
? StackFrame::STUB
: StackFrame::JAVA_SCRIPT;
@@ -641,21 +588,11 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
case Deoptimizer::SOFT:
case Deoptimizer::EAGER:
case Deoptimizer::LAZY: {
- Code* compiled_code =
- isolate_->deoptimizer_data()->FindDeoptimizingCode(from_);
+ Code* compiled_code = FindDeoptimizingCode(from_);
return (compiled_code == NULL)
? static_cast<Code*>(isolate_->FindCodeObject(from_))
: compiled_code;
}
- case Deoptimizer::OSR: {
- // The function has already been optimized and we're transitioning
- // from the unoptimized shared version to the optimized one in the
- // function. The return address (from_) points to unoptimized code.
- Code* compiled_code = function->code();
- ASSERT(compiled_code->kind() == Code::OPTIMIZED_FUNCTION);
- ASSERT(!compiled_code->contains(from_));
- return compiled_code;
- }
case Deoptimizer::DEBUGGER:
ASSERT(optimized_code->contains(from_));
return optimized_code;
@@ -759,11 +696,18 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
int length = 0;
- DeoptimizingCodeListNode* node =
- isolate->deoptimizer_data()->deoptimizing_code_list_;
- while (node != NULL) {
- length++;
- node = node->next();
+ // Count all entries in the deoptimizing code list of every context.
+ Object* context = isolate->heap()->native_contexts_list();
+ while (!context->IsUndefined()) {
+ Context* native_context = Context::cast(context);
+ Object* element = native_context->DeoptimizedCodeListHead();
+ while (!element->IsUndefined()) {
+ Code* code = Code::cast(element);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ length++;
+ element = code->next_code_link();
+ }
+ context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
}
return length;
}
@@ -772,18 +716,14 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
- if (bailout_type_ == OSR) {
- DoComputeOsrOutputFrame();
- return;
- }
-
// Print some helpful diagnostic information.
- int64_t start = OS::Ticks();
if (FLAG_log_timer_events &&
compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
+ ElapsedTimer timer;
if (trace_) {
+ timer.Start();
PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
MessageFor(bailout_type_),
reinterpret_cast<intptr_t>(function_));
@@ -864,7 +804,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// Print some helpful diagnostic information.
if (trace_) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms = timer.Elapsed().InMillisecondsF();
int index = output_count_ - 1; // Index of the topmost frame.
JSFunction* function = output_[index]->GetFunction();
PrintF("[deoptimizing (%s): end 0x%08" V8PRIxPTR " ",
@@ -1208,7 +1148,15 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
+ int deferred_object_index = deferred_objects_.length();
DoTranslateCommand(iterator, frame_index, output_offset);
+ // The allocated receiver of a construct stub frame is passed as the
+ // receiver parameter through the translation. It might be encoding
+ // a captured object, patch the slot address for a captured object.
+ if (i == 0 && deferred_objects_.length() > deferred_object_index) {
+ ASSERT(!deferred_objects_[deferred_object_index].is_arguments());
+ deferred_objects_[deferred_object_index].patch_slot_address(top_address);
+ }
}
// Read caller's PC from the previous frame.
@@ -1633,9 +1581,105 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
+Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
+ int object_index = materialization_object_index_++;
+ ObjectMaterializationDescriptor desc = deferred_objects_[object_index];
+ const int length = desc.object_length();
+
+ if (desc.duplicate_object() >= 0) {
+ // Found a previously materialized object by de-duplication.
+ object_index = desc.duplicate_object();
+ materialized_objects_->Add(Handle<Object>());
+ } else if (desc.is_arguments() && ArgumentsObjectIsAdapted(object_index)) {
+ // Use the arguments adapter frame we just built to materialize the
+ // arguments object. FunctionGetArguments can't throw an exception.
+ Handle<JSFunction> function = ArgumentsObjectFunction(object_index);
+ Handle<JSObject> arguments = Handle<JSObject>::cast(
+ Accessors::FunctionGetArguments(function));
+ materialized_objects_->Add(arguments);
+ materialization_value_index_ += length;
+ } else if (desc.is_arguments()) {
+ // Construct an arguments object and copy the parameters to a newly
+ // allocated arguments object backing store.
+ Handle<JSFunction> function = ArgumentsObjectFunction(object_index);
+ Handle<JSObject> arguments =
+ isolate_->factory()->NewArgumentsObject(function, length);
+ Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
+ ASSERT(array->length() == length);
+ arguments->set_elements(*array);
+ materialized_objects_->Add(arguments);
+ for (int i = 0; i < length; ++i) {
+ Handle<Object> value = MaterializeNextValue();
+ array->set(i, *value);
+ }
+ } else {
+ // Dispatch on the instance type of the object to be materialized.
+ Handle<Map> map = Handle<Map>::cast(MaterializeNextValue());
+ switch (map->instance_type()) {
+ case HEAP_NUMBER_TYPE: {
+ Handle<HeapNumber> number =
+ Handle<HeapNumber>::cast(MaterializeNextValue());
+ materialized_objects_->Add(number);
+ materialization_value_index_ += kDoubleSize / kPointerSize - 1;
+ break;
+ }
+ case JS_OBJECT_TYPE: {
+ Handle<JSObject> object =
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
+ materialized_objects_->Add(object);
+ Handle<Object> properties = MaterializeNextValue();
+ Handle<Object> elements = MaterializeNextValue();
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ for (int i = 0; i < length - 3; ++i) {
+ Handle<Object> value = MaterializeNextValue();
+ object->FastPropertyAtPut(i, *value);
+ }
+ break;
+ }
+ case JS_ARRAY_TYPE: {
+ Handle<JSArray> object =
+ isolate_->factory()->NewJSArray(0, map->elements_kind());
+ materialized_objects_->Add(object);
+ Handle<Object> properties = MaterializeNextValue();
+ Handle<Object> elements = MaterializeNextValue();
+ Handle<Object> length = MaterializeNextValue();
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_length(*length);
+ break;
+ }
+ default:
+ PrintF("[couldn't handle instance type %d]\n", map->instance_type());
+ UNREACHABLE();
+ }
+ }
+
+ return materialized_objects_->at(object_index);
+}
+
+
+Handle<Object> Deoptimizer::MaterializeNextValue() {
+ int value_index = materialization_value_index_++;
+ Handle<Object> value = materialized_values_->at(value_index);
+ if (*value == isolate_->heap()->arguments_marker()) {
+ value = MaterializeNextHeapObject();
+ }
+ return value;
+}
+
+
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
ASSERT_NE(DEBUGGER, bailout_type_);
+ // Walk all JavaScript output frames with the given frame iterator.
+ for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
+ if (frame_index != 0) it->Advance();
+ JavaScriptFrame* frame = it->frame();
+ jsframe_functions_.Add(handle(frame->function(), isolate_));
+ jsframe_has_adapted_arguments_.Add(frame->has_adapted_arguments());
+ }
+
// Handlify all tagged object values before triggering any allocation.
List<Handle<Object> > values(deferred_objects_tagged_values_.length());
for (int i = 0; i < deferred_objects_tagged_values_.length(); ++i) {
@@ -1652,7 +1696,7 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
if (trace_) {
- PrintF("Materializing a new heap number %p [%e] in slot %p\n",
+ PrintF("Materialized a new heap number %p [%e] in slot %p\n",
reinterpret_cast<void*>(*num),
d.value(),
d.slot_address());
@@ -1660,62 +1704,52 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
Memory::Object_at(d.slot_address()) = *num;
}
- // Materialize all heap numbers required for arguments objects.
+ // Materialize all heap numbers required for arguments/captured objects.
for (int i = 0; i < values.length(); i++) {
if (!values.at(i)->IsTheHole()) continue;
double double_value = deferred_objects_double_values_[i];
Handle<Object> num = isolate_->factory()->NewNumber(double_value);
if (trace_) {
- PrintF("Materializing a new heap number %p [%e] for arguments object\n",
+ PrintF("Materialized a new heap number %p [%e] for object\n",
reinterpret_cast<void*>(*num), double_value);
}
values.Set(i, num);
}
- // Materialize arguments objects one frame at a time.
- for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
- if (frame_index != 0) it->Advance();
- JavaScriptFrame* frame = it->frame();
- Handle<JSFunction> function(frame->function(), isolate_);
- Handle<JSObject> arguments;
- for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
- if (frame->GetExpression(i) == isolate_->heap()->arguments_marker()) {
- ObjectMaterializationDescriptor descriptor =
- deferred_objects_.RemoveLast();
- const int length = descriptor.object_length();
- if (arguments.is_null()) {
- if (frame->has_adapted_arguments()) {
- // Use the arguments adapter frame we just built to materialize the
- // arguments object. FunctionGetArguments can't throw an exception.
- arguments = Handle<JSObject>::cast(
- Accessors::FunctionGetArguments(function));
- values.RewindBy(length);
- } else {
- // Construct an arguments object and copy the parameters to a newly
- // allocated arguments object backing store.
- arguments =
- isolate_->factory()->NewArgumentsObject(function, length);
- Handle<FixedArray> array =
- isolate_->factory()->NewFixedArray(length);
- ASSERT(array->length() == length);
- for (int i = length - 1; i >= 0 ; --i) {
- array->set(i, *values.RemoveLast());
- }
- arguments->set_elements(*array);
- }
- }
- frame->SetExpression(i, *arguments);
- ASSERT_EQ(Memory::Object_at(descriptor.slot_address()), *arguments);
- if (trace_) {
- PrintF("Materializing %sarguments object of length %d for %p: ",
- frame->has_adapted_arguments() ? "(adapted) " : "",
- arguments->elements()->length(),
+ // Materialize arguments/captured objects.
+ if (!deferred_objects_.is_empty()) {
+ List<Handle<Object> > materialized_objects(deferred_objects_.length());
+ materialized_objects_ = &materialized_objects;
+ materialized_values_ = &values;
+
+ while (materialization_object_index_ < deferred_objects_.length()) {
+ int object_index = materialization_object_index_;
+ ObjectMaterializationDescriptor descriptor =
+ deferred_objects_.at(object_index);
+
+ // Find a previously materialized object by de-duplication or
+ // materialize a new instance of the object if necessary. Store
+ // the materialized object into the frame slot.
+ Handle<Object> object = MaterializeNextHeapObject();
+ Memory::Object_at(descriptor.slot_address()) = *object;
+ if (trace_) {
+ if (descriptor.is_arguments()) {
+ PrintF("Materialized %sarguments object of length %d for %p: ",
+ ArgumentsObjectIsAdapted(object_index) ? "(adapted) " : "",
+ Handle<JSObject>::cast(object)->elements()->length(),
+ reinterpret_cast<void*>(descriptor.slot_address()));
+ } else {
+ PrintF("Materialized captured object of size %d for %p: ",
+ Handle<HeapObject>::cast(object)->Size(),
reinterpret_cast<void*>(descriptor.slot_address()));
- arguments->ShortPrint();
- PrintF("\n");
}
+ object->ShortPrint();
+ PrintF("\n");
}
}
+
+ ASSERT(materialization_object_index_ == materialized_objects_->length());
+ ASSERT(materialization_value_index_ == materialized_values_->length());
}
}
@@ -1786,10 +1820,10 @@ static const char* TraceValueType(bool is_smi, bool is_native = false) {
void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
- int object_opcode,
+ int object_index,
int field_index) {
disasm::NameConverter converter;
- Address object_slot = deferred_objects_.last().slot_address();
+ Address object_slot = deferred_objects_[object_index].slot_address();
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
@@ -1802,7 +1836,6 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
case Translation::COMPILED_STUB_FRAME:
- case Translation::ARGUMENTS_OBJECT:
UNREACHABLE();
return;
@@ -1972,6 +2005,50 @@ void Deoptimizer::DoTranslateObject(TranslationIterator* iterator,
AddObjectTaggedValue(value);
return;
}
+
+ case Translation::DUPLICATED_OBJECT: {
+ int object_index = iterator->Next();
+ if (trace_) {
+ PrintF(" nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ isolate_->heap()->arguments_marker()->ShortPrint();
+ PrintF(" ; duplicate of object #%d\n", object_index);
+ }
+ // Use the materialization marker value as a sentinel and fill in
+ // the object after the deoptimized frame is built.
+ intptr_t value = reinterpret_cast<intptr_t>(
+ isolate_->heap()->arguments_marker());
+ AddObjectDuplication(0, object_index);
+ AddObjectTaggedValue(value);
+ return;
+ }
+
+ case Translation::ARGUMENTS_OBJECT:
+ case Translation::CAPTURED_OBJECT: {
+ int length = iterator->Next();
+ bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
+ if (trace_) {
+ PrintF(" nested @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ isolate_->heap()->arguments_marker()->ShortPrint();
+ PrintF(" ; object (length = %d, is_args = %d)\n", length, is_args);
+ }
+ // Use the materialization marker value as a sentinel and fill in
+ // the object after the deoptimized frame is built.
+ intptr_t value = reinterpret_cast<intptr_t>(
+ isolate_->heap()->arguments_marker());
+ AddObjectStart(0, length, is_args);
+ AddObjectTaggedValue(value);
+ // We save the object values on the side and materialize the actual
+ // object after the deoptimized frame is built.
+ int object_index = deferred_objects_.length() - 1;
+ for (int i = 0; i < length; i++) {
+ DoTranslateObject(iterator, object_index, i);
+ }
+ return;
+ }
}
}
@@ -2211,305 +2288,130 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
return;
}
- case Translation::ARGUMENTS_OBJECT: {
- int length = iterator->Next();
+ case Translation::DUPLICATED_OBJECT: {
+ int object_index = iterator->Next();
if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; arguments object (length = %d)\n", length);
+ PrintF(" ; duplicate of object #%d\n", object_index);
}
- // Use the arguments marker value as a sentinel and fill in the arguments
- // object after the deoptimized frame is built.
+ // Use the materialization marker value as a sentinel and fill in
+ // the object after the deoptimized frame is built.
intptr_t value = reinterpret_cast<intptr_t>(
isolate_->heap()->arguments_marker());
- AddObjectStart(output_[frame_index]->GetTop() + output_offset, length);
+ AddObjectDuplication(output_[frame_index]->GetTop() + output_offset,
+ object_index);
output_[frame_index]->SetFrameSlot(output_offset, value);
- // We save the argument values on the side and materialize the actual
- // arguments object after the deoptimized frame is built.
- for (int i = 0; i < length; i++) {
- DoTranslateObject(iterator, Translation::ARGUMENTS_OBJECT, i);
- }
return;
}
- }
-}
-
-bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
- int* input_offset) {
- disasm::NameConverter converter;
- FrameDescription* output = output_[0];
-
- // The input values are all part of the unoptimized frame so they
- // are all tagged pointers.
- uintptr_t input_value = input_->GetFrameSlot(*input_offset);
- Object* input_object = reinterpret_cast<Object*>(input_value);
-
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
-
- switch (opcode) {
- case Translation::BEGIN:
- case Translation::JS_FRAME:
- case Translation::ARGUMENTS_ADAPTOR_FRAME:
- case Translation::CONSTRUCT_STUB_FRAME:
- case Translation::GETTER_STUB_FRAME:
- case Translation::SETTER_STUB_FRAME:
- case Translation::COMPILED_STUB_FRAME:
- UNREACHABLE(); // Malformed input.
- return false;
-
- case Translation::REGISTER: {
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- input_value,
- *input_offset);
- }
- output->SetRegister(output_reg, input_value);
- break;
- }
-
- case Translation::INT32_REGISTER: {
- int32_t int32_value = 0;
- if (!input_object->ToInt32(&int32_value)) return false;
-
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %d (int32) ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- int32_value,
- *input_offset);
- }
- output->SetRegister(output_reg, int32_value);
- break;
- }
-
- case Translation::UINT32_REGISTER: {
- uint32_t uint32_value = 0;
- if (!input_object->ToUint32(&uint32_value)) return false;
-
- int output_reg = iterator->Next();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %u (uint32) ; [sp + %d]\n",
- converter.NameOfCPURegister(output_reg),
- uint32_value,
- *input_offset);
- }
- output->SetRegister(output_reg, static_cast<int32_t>(uint32_value));
- }
-
-
- case Translation::DOUBLE_REGISTER: {
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
-
- int output_reg = iterator->Next();
- double double_value = input_object->Number();
- if (FLAG_trace_osr) {
- PrintF(" %s <- %g (double) ; [sp + %d]\n",
- DoubleRegister::AllocationIndexToString(output_reg),
- double_value,
- *input_offset);
- }
- output->SetDoubleRegister(output_reg, double_value);
- break;
- }
-
- case Translation::STACK_SLOT: {
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
- output_offset,
- input_value,
- *input_offset);
- reinterpret_cast<Object*>(input_value)->ShortPrint();
- PrintF("\n");
- }
- output->SetFrameSlot(output_offset, input_value);
- break;
- }
-
- case Translation::INT32_STACK_SLOT: {
- int32_t int32_value = 0;
- if (!input_object->ToInt32(&int32_value)) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
- output_offset,
- int32_value,
- *input_offset);
- }
- output->SetFrameSlot(output_offset, int32_value);
- break;
- }
-
- case Translation::UINT32_STACK_SLOT: {
- uint32_t uint32_value = 0;
- if (!input_object->ToUint32(&uint32_value)) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- %u (uint32) ; [sp + %d]\n",
- output_offset,
- uint32_value,
- *input_offset);
+ case Translation::ARGUMENTS_OBJECT:
+ case Translation::CAPTURED_OBJECT: {
+ int length = iterator->Next();
+ bool is_args = opcode == Translation::ARGUMENTS_OBJECT;
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset);
+ isolate_->heap()->arguments_marker()->ShortPrint();
+ PrintF(" ; object (length = %d, is_args = %d)\n", length, is_args);
}
- output->SetFrameSlot(output_offset, static_cast<int32_t>(uint32_value));
- break;
- }
-
- case Translation::DOUBLE_STACK_SLOT: {
- static const int kLowerOffset = 0 * kPointerSize;
- static const int kUpperOffset = 1 * kPointerSize;
-
- // Abort OSR if we don't have a number.
- if (!input_object->IsNumber()) return false;
-
- int output_index = iterator->Next();
- unsigned output_offset =
- output->GetOffsetFromSlotIndex(output_index);
- double double_value = input_object->Number();
- uint64_t int_value = BitCast<uint64_t, double>(double_value);
- int32_t lower = static_cast<int32_t>(int_value);
- int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n",
- output_offset + kUpperOffset,
- upper,
- double_value,
- *input_offset);
- PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n",
- output_offset + kLowerOffset,
- lower,
- double_value,
- *input_offset);
+ // Use the materialization marker value as a sentinel and fill in
+ // the object after the deoptimized frame is built.
+ intptr_t value = reinterpret_cast<intptr_t>(
+ isolate_->heap()->arguments_marker());
+ AddObjectStart(output_[frame_index]->GetTop() + output_offset,
+ length, is_args);
+ output_[frame_index]->SetFrameSlot(output_offset, value);
+ // We save the object values on the side and materialize the actual
+ // object after the deoptimized frame is built.
+ int object_index = deferred_objects_.length() - 1;
+ for (int i = 0; i < length; i++) {
+ DoTranslateObject(iterator, object_index, i);
}
- output->SetFrameSlot(output_offset + kLowerOffset, lower);
- output->SetFrameSlot(output_offset + kUpperOffset, upper);
- break;
- }
-
- case Translation::LITERAL: {
- // Just ignore non-materialized literals.
- iterator->Next();
- break;
- }
-
- case Translation::ARGUMENTS_OBJECT: {
- // Optimized code assumes that the argument object has not been
- // materialized and so bypasses it when doing arguments access.
- // We should have bailed out before starting the frame
- // translation.
- UNREACHABLE();
- return false;
+ return;
}
}
-
- *input_offset -= kPointerSize;
- return true;
}
-void Deoptimizer::PatchInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code) {
+void Deoptimizer::PatchInterruptCode(Isolate* isolate,
+ Code* unoptimized) {
+ DisallowHeapAllocation no_gc;
+ Code* replacement_code =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+
// Iterate over the back edge table and patch every interrupt
// call to an unconditional call to the replacement code.
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
- Address back_edge_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->back_edge_table_offset();
- uint32_t table_length = Memory::uint32_at(back_edge_cursor);
- back_edge_cursor += kIntSize;
- for (uint32_t i = 0; i < table_length; ++i) {
- uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + 2 * kIntSize);
- if (static_cast<int>(loop_depth) == loop_nesting_level) {
- // Loop back edge has the loop depth that we want to patch.
- uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
- Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- PatchInterruptCodeAt(unoptimized_code,
- pc_after,
- interrupt_code,
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
+
+ for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
+ !back_edges.Done();
+ back_edges.Next()) {
+ if (static_cast<int>(back_edges.loop_depth()) == loop_nesting_level) {
+ ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate,
+ unoptimized,
+ back_edges.pc()));
+ PatchInterruptCodeAt(unoptimized,
+ back_edges.pc(),
replacement_code);
}
- back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
}
- unoptimized_code->set_back_edges_patched_for_osr(true);
-#ifdef DEBUG
- Deoptimizer::VerifyInterruptCode(
- unoptimized_code, interrupt_code, replacement_code, loop_nesting_level);
-#endif // DEBUG
+
+ unoptimized->set_back_edges_patched_for_osr(true);
+ ASSERT(Deoptimizer::VerifyInterruptCode(
+ isolate, unoptimized, loop_nesting_level));
}
-void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code) {
+void Deoptimizer::RevertInterruptCode(Isolate* isolate,
+ Code* unoptimized) {
+ DisallowHeapAllocation no_gc;
+ Code* interrupt_code =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+
// Iterate over the back edge table and revert the patched interrupt calls.
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- ASSERT(unoptimized_code->back_edges_patched_for_osr());
- int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
- Address back_edge_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->back_edge_table_offset();
- uint32_t table_length = Memory::uint32_at(back_edge_cursor);
- back_edge_cursor += kIntSize;
- for (uint32_t i = 0; i < table_length; ++i) {
- uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + 2 * kIntSize);
- if (static_cast<int>(loop_depth) <= loop_nesting_level) {
- uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
- Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- RevertInterruptCodeAt(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code);
+ ASSERT(unoptimized->back_edges_patched_for_osr());
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
+
+ for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
+ !back_edges.Done();
+ back_edges.Next()) {
+ if (static_cast<int>(back_edges.loop_depth()) <= loop_nesting_level) {
+ ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate,
+ unoptimized,
+ back_edges.pc()));
+ RevertInterruptCodeAt(unoptimized, back_edges.pc(), interrupt_code);
}
- back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
}
- unoptimized_code->set_back_edges_patched_for_osr(false);
- unoptimized_code->set_allow_osr_at_loop_nesting_level(0);
-#ifdef DEBUG
+
+ unoptimized->set_back_edges_patched_for_osr(false);
+ unoptimized->set_allow_osr_at_loop_nesting_level(0);
// Assert that none of the back edges are patched anymore.
- Deoptimizer::VerifyInterruptCode(
- unoptimized_code, interrupt_code, replacement_code, -1);
-#endif // DEBUG
+ ASSERT(Deoptimizer::VerifyInterruptCode(isolate, unoptimized, -1));
}
#ifdef DEBUG
-void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code,
+bool Deoptimizer::VerifyInterruptCode(Isolate* isolate,
+ Code* unoptimized,
int loop_nesting_level) {
- CHECK(unoptimized_code->kind() == Code::FUNCTION);
- Address back_edge_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->back_edge_table_offset();
- uint32_t table_length = Memory::uint32_at(back_edge_cursor);
- back_edge_cursor += kIntSize;
- for (uint32_t i = 0; i < table_length; ++i) {
- uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + 2 * kIntSize);
+ DisallowHeapAllocation no_gc;
+ for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
+ !back_edges.Done();
+ back_edges.Next()) {
+ uint32_t loop_depth = back_edges.loop_depth();
CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
- uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
- Address pc_after = unoptimized_code->instruction_start() + pc_offset;
CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
- InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
- back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
+ GetInterruptPatchState(isolate,
+ unoptimized,
+ back_edges.pc()) != NOT_PATCHED);
}
+ return true;
}
#endif // DEBUG
@@ -2520,12 +2422,7 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// into account so we have to avoid double counting them (-2).
unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
#ifdef DEBUG
- if (bailout_type_ == OSR) {
- // TODO(kasperl): It would be nice if we could verify that the
- // size matches with the stack height we can compute based on the
- // environment at the OSR entry. The code for that his built into
- // the DoComputeOsrOutputFrame function for now.
- } else if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
+ if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
unsigned stack_slots = compiled_code_->stack_slots();
unsigned outgoing_size = ComputeOutgoingArgumentSize();
ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
@@ -2571,9 +2468,16 @@ Object* Deoptimizer::ComputeLiteral(int index) const {
}
-void Deoptimizer::AddObjectStart(intptr_t slot_address, int length) {
+void Deoptimizer::AddObjectStart(intptr_t slot, int length, bool is_args) {
ObjectMaterializationDescriptor object_desc(
- reinterpret_cast<Address>(slot_address), length);
+ reinterpret_cast<Address>(slot), jsframe_count_, length, -1, is_args);
+ deferred_objects_.Add(object_desc);
+}
+
+
+void Deoptimizer::AddObjectDuplication(intptr_t slot, int object_index) {
+ ObjectMaterializationDescriptor object_desc(
+ reinterpret_cast<Address>(slot), jsframe_count_, -1, object_index, false);
deferred_objects_.Add(object_desc);
}
@@ -2801,6 +2705,18 @@ void Translation::BeginArgumentsObject(int args_length) {
}
+void Translation::BeginCapturedObject(int length) {
+ buffer_->Add(CAPTURED_OBJECT, zone());
+ buffer_->Add(length, zone());
+}
+
+
+void Translation::DuplicateObject(int object_index) {
+ buffer_->Add(DUPLICATED_OBJECT, zone());
+ buffer_->Add(object_index, zone());
+}
+
+
void Translation::StoreRegister(Register reg) {
buffer_->Add(REGISTER, zone());
buffer_->Add(reg.code(), zone());
@@ -2869,7 +2785,9 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
switch (opcode) {
case GETTER_STUB_FRAME:
case SETTER_STUB_FRAME:
+ case DUPLICATED_OBJECT:
case ARGUMENTS_OBJECT:
+ case CAPTURED_OBJECT:
case REGISTER:
case INT32_REGISTER:
case UINT32_REGISTER:
@@ -2929,8 +2847,12 @@ const char* Translation::StringFor(Opcode opcode) {
return "DOUBLE_STACK_SLOT";
case LITERAL:
return "LITERAL";
+ case DUPLICATED_OBJECT:
+ return "DUPLICATED_OBJECT";
case ARGUMENTS_OBJECT:
return "ARGUMENTS_OBJECT";
+ case CAPTURED_OBJECT:
+ return "CAPTURED_OBJECT";
}
UNREACHABLE();
return "";
@@ -2939,22 +2861,6 @@ const char* Translation::StringFor(Opcode opcode) {
#endif
-DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
- GlobalHandles* global_handles = code->GetIsolate()->global_handles();
- // Globalize the code object and make it weak.
- code_ = Handle<Code>::cast(global_handles->Create(code));
- global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
- this,
- Deoptimizer::HandleWeakDeoptimizedCode);
-}
-
-
-DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
- GlobalHandles* global_handles = code_->GetIsolate()->global_handles();
- global_handles->Destroy(reinterpret_cast<Object**>(code_.location()));
-}
-
-
// We can't intermix stack decoding and allocations because
// deoptimization infrastracture is not GC safe.
// Thus we build a temporary structure in malloced space.
@@ -2974,7 +2880,9 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
// Peeled off before getting here.
break;
+ case Translation::DUPLICATED_OBJECT:
case Translation::ARGUMENTS_OBJECT:
+ case Translation::CAPTURED_OBJECT:
// This can be only emitted for local slots not for argument slots.
break;
diff --git a/chromium/v8/src/deoptimizer.h b/chromium/v8/src/deoptimizer.h
index 7ad1ab0b2e7..7ee5908f762 100644
--- a/chromium/v8/src/deoptimizer.h
+++ b/chromium/v8/src/deoptimizer.h
@@ -58,7 +58,6 @@ static inline double read_double_value(Address p) {
class FrameDescription;
class TranslationIterator;
-class DeoptimizingCodeListNode;
class DeoptimizedFrameInfo;
class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
@@ -77,15 +76,31 @@ class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
class ObjectMaterializationDescriptor BASE_EMBEDDED {
public:
- ObjectMaterializationDescriptor(Address slot_address, int length)
- : slot_address_(slot_address), object_length_(length) { }
+ ObjectMaterializationDescriptor(
+ Address slot_address, int frame, int length, int duplicate, bool is_args)
+ : slot_address_(slot_address),
+ jsframe_index_(frame),
+ object_length_(length),
+ duplicate_object_(duplicate),
+ is_arguments_(is_args) { }
Address slot_address() const { return slot_address_; }
+ int jsframe_index() const { return jsframe_index_; }
int object_length() const { return object_length_; }
+ int duplicate_object() const { return duplicate_object_; }
+ bool is_arguments() const { return is_arguments_; }
+
+ // Only used for allocated receivers in DoComputeConstructStubFrame.
+ void patch_slot_address(intptr_t slot) {
+ slot_address_ = reinterpret_cast<Address>(slot);
+ }
private:
Address slot_address_;
+ int jsframe_index_;
int object_length_;
+ int duplicate_object_;
+ bool is_arguments_;
};
@@ -105,29 +120,22 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
};
-class OptimizedFunctionFilter BASE_EMBEDDED {
- public:
- virtual ~OptimizedFunctionFilter() {}
-
- virtual bool TakeFunction(JSFunction* function) = 0;
-};
-
-
-class Deoptimizer;
-
-
class Deoptimizer : public Malloced {
public:
enum BailoutType {
EAGER,
LAZY,
SOFT,
- OSR,
// This last bailout type is not really a bailout, but used by the
// debugger to deoptimize stack frames to allow inspection.
DEBUGGER
};
+ enum InterruptPatchState {
+ NOT_PATCHED,
+ PATCHED_FOR_OSR
+ };
+
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
struct JumpTableEntry {
@@ -150,7 +158,9 @@ class Deoptimizer : public Malloced {
int output_count() const { return output_count_; }
- Code::Kind compiled_code_kind() const { return compiled_code_->kind(); }
+ Handle<JSFunction> function() const { return Handle<JSFunction>(function_); }
+ Handle<Code> compiled_code() const { return Handle<Code>(compiled_code_); }
+ BailoutType bailout_type() const { return bailout_type_; }
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
@@ -185,68 +195,54 @@ class Deoptimizer : public Malloced {
// execution returns.
static void DeoptimizeFunction(JSFunction* function);
- // Iterate over all the functions which share the same code object
- // and make them use unoptimized version.
- static void ReplaceCodeForRelatedFunctions(JSFunction* function, Code* code);
-
- // Deoptimize all functions in the heap.
+ // Deoptimize all code in the given isolate.
static void DeoptimizeAll(Isolate* isolate);
+ // Deoptimize code associated with the given global object.
static void DeoptimizeGlobalObject(JSObject* object);
- static void DeoptimizeAllFunctionsWith(Isolate* isolate,
- OptimizedFunctionFilter* filter);
-
- static void DeoptimizeCodeList(Isolate* isolate, ZoneList<Code*>* codes);
-
- static void DeoptimizeAllFunctionsForContext(
- Context* context, OptimizedFunctionFilter* filter);
-
- static void VisitAllOptimizedFunctionsForContext(
- Context* context, OptimizedFunctionVisitor* visitor);
+ // Deoptimizes all optimized code that has been previously marked
+ // (via code->set_marked_for_deoptimization) and unlinks all functions that
+ // refer to that code.
+ static void DeoptimizeMarkedCode(Isolate* isolate);
- static void VisitAllOptimizedFunctions(Isolate* isolate,
- OptimizedFunctionVisitor* visitor);
+ // Visit all the known optimized functions in a given isolate.
+ static void VisitAllOptimizedFunctions(
+ Isolate* isolate, OptimizedFunctionVisitor* visitor);
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
// Patch all interrupts with allowed loop depth in the unoptimized code to
// unconditionally call replacement_code.
- static void PatchInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code);
+ static void PatchInterruptCode(Isolate* isolate,
+ Code* unoptimized_code);
// Patch the interrupt at the instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
static void PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code);
// Change all patched interrupts patched in the unoptimized code
// back to normal interrupts.
- static void RevertInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code);
+ static void RevertInterruptCode(Isolate* isolate,
+ Code* unoptimized_code);
// Change patched interrupt in the unoptimized code
// back to a normal interrupt.
static void RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code);
+ Code* interrupt_code);
#ifdef DEBUG
- static bool InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code);
+ static InterruptPatchState GetInterruptPatchState(Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after);
// Verify that all back edges of a certain loop depth are patched.
- static void VerifyInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code,
+ static bool VerifyInterruptCode(Isolate* isolate,
+ Code* unoptimized_code,
int loop_nesting_level);
#endif // DEBUG
@@ -359,7 +355,6 @@ class Deoptimizer : public Malloced {
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
- void DoComputeOsrOutputFrame();
void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
int frame_index);
@@ -372,7 +367,7 @@ class Deoptimizer : public Malloced {
int frame_index);
void DoTranslateObject(TranslationIterator* iterator,
- int object_opcode,
+ int object_index,
int field_index);
enum DeoptimizerTranslatedValueType {
@@ -385,13 +380,6 @@ class Deoptimizer : public Malloced {
unsigned output_offset,
DeoptimizerTranslatedValueType value_type = TRANSLATED_VALUE_IS_TAGGED);
- // Translate a command for OSR. Updates the input offset to be used for
- // the next command. Returns false if translation of the command failed
- // (e.g., a number conversion failed) and may or may not have updated the
- // input offset.
- bool DoOsrTranslateCommand(TranslationIterator* iterator,
- int* input_offset);
-
unsigned ComputeInputFrameSize() const;
unsigned ComputeFixedSize(JSFunction* function) const;
@@ -400,25 +388,49 @@ class Deoptimizer : public Malloced {
Object* ComputeLiteral(int index) const;
- void AddObjectStart(intptr_t slot_address, int argc);
+ void AddObjectStart(intptr_t slot_address, int argc, bool is_arguments);
+ void AddObjectDuplication(intptr_t slot, int object_index);
void AddObjectTaggedValue(intptr_t value);
void AddObjectDoubleValue(double value);
void AddDoubleValue(intptr_t slot_address, double value);
+ bool ArgumentsObjectIsAdapted(int object_index) {
+ ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
+ int reverse_jsframe_index = jsframe_count_ - desc.jsframe_index() - 1;
+ return jsframe_has_adapted_arguments_[reverse_jsframe_index];
+ }
+
+ Handle<JSFunction> ArgumentsObjectFunction(int object_index) {
+ ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
+ int reverse_jsframe_index = jsframe_count_ - desc.jsframe_index() - 1;
+ return jsframe_functions_[reverse_jsframe_index];
+ }
+
+ // Helper function for heap object materialization.
+ Handle<Object> MaterializeNextHeapObject();
+ Handle<Object> MaterializeNextValue();
+
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
- // Weak handle callback for deoptimizing code objects.
- static void HandleWeakDeoptimizedCode(v8::Isolate* isolate,
- v8::Persistent<v8::Value>* obj,
- void* data);
+ // Marks all the code in the given context for deoptimization.
+ static void MarkAllCodeForContext(Context* native_context);
+
+ // Visit all the known optimized functions in a given context.
+ static void VisitAllOptimizedFunctionsForContext(
+ Context* context, OptimizedFunctionVisitor* visitor);
- // Deoptimize the given code and add to appropriate deoptimization lists.
- static void DeoptimizeCode(Isolate* isolate, Code* code);
+ // Deoptimizes all code marked in the given context.
+ static void DeoptimizeMarkedCodeForContext(Context* native_context);
// Patch the given code so that it will deoptimize itself.
static void PatchCodeForDeoptimization(Isolate* isolate, Code* code);
+ // Searches the list of known deoptimizing code for a Code object
+ // containing the given address (which is supposedly faster than
+ // searching all code objects).
+ Code* FindDeoptimizingCode(Address addr);
+
// Fill the input from from a JavaScript frame. This is used when
// the debugger needs to inspect an optimized frame. For normal
// deoptimizations the input frame is filled in generated code.
@@ -455,10 +467,22 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
+ // Deferred values to be materialized.
List<Object*> deferred_objects_tagged_values_;
List<double> deferred_objects_double_values_;
List<ObjectMaterializationDescriptor> deferred_objects_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
+
+ // Output frame information. Only used during heap object materialization.
+ List<Handle<JSFunction> > jsframe_functions_;
+ List<bool> jsframe_has_adapted_arguments_;
+
+ // Materialized objects. Only used during heap object materialization.
+ List<Handle<Object> >* materialized_values_;
+ List<Handle<Object> >* materialized_objects_;
+ int materialization_value_index_;
+ int materialization_object_index_;
+
#ifdef DEBUG
DisallowHeapAllocation* disallow_heap_allocation_;
#endif // DEBUG
@@ -468,7 +492,6 @@ class Deoptimizer : public Malloced {
static const int table_entry_size_;
friend class FrameDescription;
- friend class DeoptimizingCodeListNode;
friend class DeoptimizedFrameInfo;
};
@@ -642,24 +665,16 @@ class DeoptimizerData {
void Iterate(ObjectVisitor* v);
#endif
- Code* FindDeoptimizingCode(Address addr);
- void RemoveDeoptimizingCode(Code* code);
-
private:
MemoryAllocator* allocator_;
int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
- Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo* deoptimized_frame_info_;
#endif
- // List of deoptimized code which still have references from active stack
- // frames. These code objects are needed by the deoptimizer when deoptimizing
- // a frame for which the code object for the function function has been
- // changed from the code present when deoptimizing was done.
- DeoptimizingCodeListNode* deoptimizing_code_list_;
+ Deoptimizer* current_;
friend class Deoptimizer;
@@ -712,7 +727,9 @@ class Translation BASE_EMBEDDED {
SETTER_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
COMPILED_STUB_FRAME,
+ DUPLICATED_OBJECT,
ARGUMENTS_OBJECT,
+ CAPTURED_OBJECT,
REGISTER,
INT32_REGISTER,
UINT32_REGISTER,
@@ -744,6 +761,8 @@ class Translation BASE_EMBEDDED {
void BeginGetterStubFrame(int literal_id);
void BeginSetterStubFrame(int literal_id);
void BeginArgumentsObject(int args_length);
+ void BeginCapturedObject(int length);
+ void DuplicateObject(int object_index);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
void StoreUint32Register(Register reg);
@@ -773,26 +792,6 @@ class Translation BASE_EMBEDDED {
};
-// Linked list holding deoptimizing code objects. The deoptimizing code objects
-// are kept as weak handles until they are no longer activated on the stack.
-class DeoptimizingCodeListNode : public Malloced {
- public:
- explicit DeoptimizingCodeListNode(Code* code);
- ~DeoptimizingCodeListNode();
-
- DeoptimizingCodeListNode* next() const { return next_; }
- void set_next(DeoptimizingCodeListNode* next) { next_ = next; }
- Handle<Code> code() const { return code_; }
-
- private:
- // Global (weak) handle to the deoptimizing code object.
- Handle<Code> code_;
-
- // Next pointer for linked list.
- DeoptimizingCodeListNode* next_;
-};
-
-
class SlotRef BASE_EMBEDDED {
public:
enum SlotRepresentation {
diff --git a/chromium/v8/src/disassembler.cc b/chromium/v8/src/disassembler.cc
index fa8ae1ffc8f..dd620fb3452 100644
--- a/chromium/v8/src/disassembler.cc
+++ b/chromium/v8/src/disassembler.cc
@@ -71,7 +71,7 @@ class V8NameConverter: public disasm::NameConverter {
const char* V8NameConverter::NameOfAddress(byte* pc) const {
- const char* name = Isolate::Current()->builtins()->Lookup(pc);
+ const char* name = code_->GetIsolate()->builtins()->Lookup(pc);
if (name != NULL) {
OS::SNPrintF(v8_buffer_, "%s (%p)", name, pc);
return v8_buffer_.start();
@@ -117,8 +117,8 @@ static int DecodeIt(Isolate* isolate,
byte* end) {
SealHandleScope shs(isolate);
DisallowHeapAllocation no_alloc;
- ExternalReferenceEncoder ref_encoder;
- Heap* heap = HEAP;
+ ExternalReferenceEncoder ref_encoder(isolate);
+ Heap* heap = isolate->heap();
v8::internal::EmbeddedVector<char, 128> decode_buffer;
v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
diff --git a/chromium/v8/src/effects.h b/chromium/v8/src/effects.h
new file mode 100644
index 00000000000..afb8f9e54b3
--- /dev/null
+++ b/chromium/v8/src/effects.h
@@ -0,0 +1,361 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EFFECTS_H_
+#define V8_EFFECTS_H_
+
+#include "v8.h"
+
+#include "types.h"
+
+namespace v8 {
+namespace internal {
+
+
+// A simple struct to represent (write) effects. A write is represented as a
+// modification of type bounds (e.g. of a variable).
+//
+// An effect can either be definite, if the write is known to have taken place,
+// or 'possible', if it was optional. The difference is relevant when composing
+// effects.
+//
+// There are two ways to compose effects: sequentially (they happen one after
+// the other) or alternatively (either one or the other happens). A definite
+// effect cancels out any previous effect upon sequencing. A possible effect
+// merges into a previous effect, i.e., type bounds are merged. Alternative
+// composition always merges bounds. It yields a possible effect if at least
+// one was only possible.
+struct Effect {
+ enum Modality { POSSIBLE, DEFINITE };
+
+ Modality modality;
+ Bounds bounds;
+
+ Effect() : modality(DEFINITE) {}
+ Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {}
+
+ // The unknown effect.
+ static Effect Unknown(Isolate* isolate) {
+ return Effect(Bounds::Unbounded(isolate), POSSIBLE);
+ }
+
+ static Effect Forget(Isolate* isolate) {
+ return Effect(Bounds::Unbounded(isolate), DEFINITE);
+ }
+
+ // Sequential composition, as in 'e1; e2'.
+ static Effect Seq(Effect e1, Effect e2, Isolate* isolate) {
+ if (e2.modality == DEFINITE) return e2;
+ return Effect(Bounds::Either(e1.bounds, e2.bounds, isolate), e1.modality);
+ }
+
+ // Alternative composition, as in 'cond ? e1 : e2'.
+ static Effect Alt(Effect e1, Effect e2, Isolate* isolate) {
+ return Effect(
+ Bounds::Either(e1.bounds, e2.bounds, isolate),
+ e1.modality == POSSIBLE ? POSSIBLE : e2.modality);
+ }
+};
+
+
+// Classes encapsulating sets of effects on variables.
+//
+// Effects maps variables to effects and supports sequential and alternative
+// composition.
+//
+// NestedEffects is an incremental representation that supports persistence
+// through functional extension. It represents the map as an adjoin of a list
+// of maps, whose tail can be shared.
+//
+// Both classes provide similar interfaces, implemented in parts through the
+// EffectsMixin below (using sandwich style, to work around the style guide's
+// MI restriction).
+//
+// We also (ab)use Effects/NestedEffects as a representation for abstract
+// store typings. In that case, only definite effects are of interest.
+
+template<class Var, class Base, class Effects>
+class EffectsMixin: public Base {
+ public:
+ explicit EffectsMixin(Zone* zone) : Base(zone) {}
+
+ Effect Lookup(Var var) {
+ Locator locator;
+ return this->Find(var, &locator)
+ ? locator.value() : Effect::Unknown(Base::isolate());
+ }
+
+ Bounds LookupBounds(Var var) {
+ Effect effect = Lookup(var);
+ return effect.modality == Effect::DEFINITE
+ ? effect.bounds : Bounds::Unbounded(Base::isolate());
+ }
+
+ // Sequential composition.
+ void Seq(Var var, Effect effect) {
+ Locator locator;
+ if (!this->Insert(var, &locator)) {
+ effect = Effect::Seq(locator.value(), effect, Base::isolate());
+ }
+ locator.set_value(effect);
+ }
+
+ void Seq(Effects that) {
+ SeqMerger<EffectsMixin> merge = { *this };
+ that.ForEach(&merge);
+ }
+
+ // Alternative composition.
+ void Alt(Var var, Effect effect) {
+ Locator locator;
+ if (!this->Insert(var, &locator)) {
+ effect = Effect::Alt(locator.value(), effect, Base::isolate());
+ }
+ locator.set_value(effect);
+ }
+
+ void Alt(Effects that) {
+ AltWeakener<EffectsMixin> weaken = { *this, that };
+ this->ForEach(&weaken);
+ AltMerger<EffectsMixin> merge = { *this };
+ that.ForEach(&merge);
+ }
+
+ // Invalidation.
+ void Forget() {
+ Overrider override = {
+ Effect::Forget(Base::isolate()), Effects(Base::zone()) };
+ this->ForEach(&override);
+ Seq(override.effects);
+ }
+
+ protected:
+ typedef typename Base::Locator Locator;
+
+ template<class Self>
+ struct SeqMerger {
+ void Call(Var var, Effect effect) { self.Seq(var, effect); }
+ Self self;
+ };
+
+ template<class Self>
+ struct AltMerger {
+ void Call(Var var, Effect effect) { self.Alt(var, effect); }
+ Self self;
+ };
+
+ template<class Self>
+ struct AltWeakener {
+ void Call(Var var, Effect effect) {
+ if (effect.modality == Effect::DEFINITE && !other.Contains(var)) {
+ effect.modality = Effect::POSSIBLE;
+ Locator locator;
+ self.Insert(var, &locator);
+ locator.set_value(effect);
+ }
+ }
+ Self self;
+ Effects other;
+ };
+
+ struct Overrider {
+ void Call(Var var, Effect effect) { effects.Seq(var, new_effect); }
+ Effect new_effect;
+ Effects effects;
+ };
+};
+
+
+template<class Var, Var kNoVar> class Effects;
+template<class Var, Var kNoVar> class NestedEffectsBase;
+
+template<class Var, Var kNoVar>
+class EffectsBase {
+ public:
+ explicit EffectsBase(Zone* zone) : map_(new(zone) Mapping(zone)) {}
+
+ bool IsEmpty() { return map_->is_empty(); }
+
+ protected:
+ friend class NestedEffectsBase<Var, kNoVar>;
+ friend class
+ EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >;
+
+ Zone* zone() { return map_->allocator().zone(); }
+ Isolate* isolate() { return zone()->isolate(); }
+
+ struct SplayTreeConfig {
+ typedef Var Key;
+ typedef Effect Value;
+ static const Var kNoKey = kNoVar;
+ static Effect NoValue() { return Effect(); }
+ static int Compare(int x, int y) { return y - x; }
+ };
+ typedef ZoneSplayTree<SplayTreeConfig> Mapping;
+ typedef typename Mapping::Locator Locator;
+
+ bool Contains(Var var) {
+ ASSERT(var != kNoVar);
+ return map_->Contains(var);
+ }
+ bool Find(Var var, Locator* locator) {
+ ASSERT(var != kNoVar);
+ return map_->Find(var, locator);
+ }
+ bool Insert(Var var, Locator* locator) {
+ ASSERT(var != kNoVar);
+ return map_->Insert(var, locator);
+ }
+
+ template<class Callback>
+ void ForEach(Callback* callback) {
+ return map_->ForEach(callback);
+ }
+
+ private:
+ Mapping* map_;
+};
+
+template<class Var, Var kNoVar>
+const Var EffectsBase<Var, kNoVar>::SplayTreeConfig::kNoKey;
+
+template<class Var, Var kNoVar>
+class Effects: public
+ EffectsMixin<Var, EffectsBase<Var, kNoVar>, Effects<Var, kNoVar> > {
+ public:
+ explicit Effects(Zone* zone)
+ : EffectsMixin<Var, EffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >(zone)
+ {}
+};
+
+
+template<class Var, Var kNoVar>
+class NestedEffectsBase {
+ public:
+ explicit NestedEffectsBase(Zone* zone) : node_(new(zone) Node(zone)) {}
+
+ template<class Callback>
+ void ForEach(Callback* callback) {
+ if (node_->previous) NestedEffectsBase(node_->previous).ForEach(callback);
+ node_->effects.ForEach(callback);
+ }
+
+ Effects<Var, kNoVar> Top() { return node_->effects; }
+
+ bool IsEmpty() {
+ for (Node* node = node_; node != NULL; node = node->previous) {
+ if (!node->effects.IsEmpty()) return false;
+ }
+ return true;
+ }
+
+ protected:
+ typedef typename EffectsBase<Var, kNoVar>::Locator Locator;
+
+ Zone* zone() { return node_->zone; }
+ Isolate* isolate() { return zone()->isolate(); }
+
+ void push() { node_ = new(node_->zone) Node(node_->zone, node_); }
+ void pop() { node_ = node_->previous; }
+ bool is_empty() { return node_ == NULL; }
+
+ bool Contains(Var var) {
+ ASSERT(var != kNoVar);
+ for (Node* node = node_; node != NULL; node = node->previous) {
+ if (node->effects.Contains(var)) return true;
+ }
+ return false;
+ }
+
+ bool Find(Var var, Locator* locator) {
+ ASSERT(var != kNoVar);
+ for (Node* node = node_; node != NULL; node = node->previous) {
+ if (node->effects.Find(var, locator)) return true;
+ }
+ return false;
+ }
+
+ bool Insert(Var var, Locator* locator);
+
+ private:
+ struct Node: ZoneObject {
+ Zone* zone;
+ Effects<Var, kNoVar> effects;
+ Node* previous;
+ explicit Node(Zone* zone, Node* previous = NULL)
+ : zone(zone), effects(zone), previous(previous) {}
+ };
+
+ explicit NestedEffectsBase(Node* node) : node_(node) {}
+
+ Node* node_;
+};
+
+
+template<class Var, Var kNoVar>
+bool NestedEffectsBase<Var, kNoVar>::Insert(Var var, Locator* locator) {
+ ASSERT(var != kNoVar);
+ if (!node_->effects.Insert(var, locator)) return false;
+ Locator shadowed;
+ for (Node* node = node_->previous; node != NULL; node = node->previous) {
+ if (node->effects.Find(var, &shadowed)) {
+ // Initialize with shadowed entry.
+ locator->set_value(shadowed.value());
+ return false;
+ }
+ }
+ return true;
+}
+
+
+template<class Var, Var kNoVar>
+class NestedEffects: public
+ EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> > {
+ public:
+ explicit NestedEffects(Zone* zone) :
+ EffectsMixin<Var, NestedEffectsBase<Var, kNoVar>, Effects<Var, kNoVar> >(
+ zone) {}
+
+ // Create an extension of the current effect set. The current set should not
+ // be modified while the extension is in use.
+ NestedEffects Push() {
+ NestedEffects result = *this;
+ result.push();
+ return result;
+ }
+
+ NestedEffects Pop() {
+ NestedEffects result = *this;
+ result.pop();
+ ASSERT(!this->is_empty());
+ return result;
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EFFECTS_H_
diff --git a/chromium/v8/src/elements-kind.cc b/chromium/v8/src/elements-kind.cc
index 213aa35c85d..8129051a627 100644
--- a/chromium/v8/src/elements-kind.cc
+++ b/chromium/v8/src/elements-kind.cc
@@ -35,6 +35,36 @@ namespace v8 {
namespace internal {
+int ElementsKindToShiftSize(ElementsKind elements_kind) {
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ return 0;
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ return 1;
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ return 2;
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ return 3;
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ return kPointerSizeLog2;
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
const char* ElementsKindToString(ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
return accessor->name();
diff --git a/chromium/v8/src/elements-kind.h b/chromium/v8/src/elements-kind.h
index da151924be2..69b40578180 100644
--- a/chromium/v8/src/elements-kind.h
+++ b/chromium/v8/src/elements-kind.h
@@ -77,6 +77,7 @@ const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1;
+int ElementsKindToShiftSize(ElementsKind elements_kind);
const char* ElementsKindToString(ElementsKind kind);
void PrintElementsKind(FILE* out, ElementsKind kind);
diff --git a/chromium/v8/src/elements.cc b/chromium/v8/src/elements.cc
index 77abf4e42b8..89621cb3694 100644
--- a/chromium/v8/src/elements.cc
+++ b/chromium/v8/src/elements.cc
@@ -154,7 +154,8 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
- ASSERT(to_base->map() != HEAP->fixed_cow_array_map());
+ ASSERT(to_base->map() !=
+ from_base->GetIsolate()->heap()->fixed_cow_array_map());
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
@@ -492,7 +493,6 @@ static void TraceTopFrame(Isolate* isolate) {
}
StackFrame* raw_frame = it.frame();
if (raw_frame->is_internal()) {
- Isolate* isolate = Isolate::Current();
Code* apply_builtin = isolate->builtins()->builtin(
Builtins::kFunctionApply);
if (raw_frame->unchecked_code() == apply_builtin) {
@@ -581,14 +581,8 @@ class ElementsAccessorBase : public ElementsAccessor {
// When objects are first allocated, its elements are Failures.
if (fixed_array_base->IsFailure()) return;
if (!fixed_array_base->IsHeapObject()) return;
- Map* map = fixed_array_base->map();
// Arrays that have been shifted in place can't be verified.
- Heap* heap = holder->GetHeap();
- if (map == heap->one_pointer_filler_map() ||
- map == heap->two_pointer_filler_map() ||
- map == heap->free_space_map()) {
- return;
- }
+ if (fixed_array_base->IsFiller()) return;
int length = 0;
if (holder->IsJSArray()) {
Object* length_obj = JSArray::cast(holder)->length();
diff --git a/chromium/v8/src/execution.cc b/chromium/v8/src/execution.cc
index d7b9cf5d596..979641a9de5 100644
--- a/chromium/v8/src/execution.cc
+++ b/chromium/v8/src/execution.cc
@@ -148,7 +148,8 @@ static Handle<Object> Invoke(bool is_construct,
}
-Handle<Object> Execution::Call(Handle<Object> callable,
+Handle<Object> Execution::Call(Isolate* isolate,
+ Handle<Object> callable,
Handle<Object> receiver,
int argc,
Handle<Object> argv[],
@@ -157,7 +158,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
*pending_exception = false;
if (!callable->IsJSFunction()) {
- callable = TryGetFunctionDelegate(callable, pending_exception);
+ callable = TryGetFunctionDelegate(isolate, callable, pending_exception);
if (*pending_exception) return callable;
}
Handle<JSFunction> func = Handle<JSFunction>::cast(callable);
@@ -174,7 +175,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
receiver = Handle<Object>(global, func->GetIsolate());
}
} else {
- receiver = ToObject(receiver, pending_exception);
+ receiver = ToObject(isolate, receiver, pending_exception);
}
if (*pending_exception) return callable;
}
@@ -206,10 +207,12 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
catcher.SetCaptureMessage(false);
*caught_exception = false;
+ // Get isolate now, because handle might be persistent
+ // and get destroyed in the next call.
+ Isolate* isolate = func->GetIsolate();
Handle<Object> result = Invoke(false, func, receiver, argc, args,
caught_exception);
- Isolate* isolate = func->GetIsolate();
if (*caught_exception) {
ASSERT(catcher.HasCaught());
ASSERT(isolate->has_pending_exception());
@@ -232,9 +235,9 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
}
-Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
+Handle<Object> Execution::GetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object) {
ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
// If you return a function from here, it will be called when an
@@ -259,10 +262,10 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
}
-Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
+Handle<Object> Execution::TryGetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object,
bool* has_pending_exception) {
ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
// If object is a function proxy, get its handler. Iterate if necessary.
Object* fun = *object;
@@ -290,9 +293,9 @@ Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
}
-Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
+Handle<Object> Execution::GetConstructorDelegate(Isolate* isolate,
+ Handle<Object> object) {
ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
// If you return a function from here, it will be called when an
// attempt is made to call the given object as a constructor.
@@ -317,10 +320,10 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
Handle<Object> Execution::TryGetConstructorDelegate(
+ Isolate* isolate,
Handle<Object> object,
bool* has_pending_exception) {
ASSERT(!object->IsJSFunction());
- Isolate* isolate = Isolate::Current();
// If you return a function from here, it will be called when an
// attempt is made to call the given object as a constructor.
@@ -456,6 +459,22 @@ void StackGuard::RequestGC() {
}
+bool StackGuard::IsInstallCodeRequest() {
+ ExecutionAccess access(isolate_);
+ return (thread_local_.interrupt_flags_ & INSTALL_CODE) != 0;
+}
+
+
+void StackGuard::RequestInstallCode() {
+ ExecutionAccess access(isolate_);
+ thread_local_.interrupt_flags_ |= INSTALL_CODE;
+ if (thread_local_.postpone_interrupts_nesting_ == 0) {
+ thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+ isolate_->heap()->SetStackLimits();
+ }
+}
+
+
bool StackGuard::IsFullDeopt() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & FULL_DEOPT) != 0;
@@ -594,54 +613,60 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \
do { \
- Isolate* isolate = Isolate::Current(); \
Handle<Object> argv[] = args; \
ASSERT(has_pending_exception != NULL); \
- return Call(isolate->name##_fun(), \
+ return Call(isolate, \
+ isolate->name##_fun(), \
isolate->js_builtins_object(), \
ARRAY_SIZE(argv), argv, \
has_pending_exception); \
} while (false)
-Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToNumber(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_number, { obj }, exc);
}
-Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToString(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_string, { obj }, exc);
}
-Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToDetailString(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
}
-Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToObject(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
if (obj->IsSpecObject()) return obj;
RETURN_NATIVE_CALL(to_object, { obj }, exc);
}
-Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToInteger(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_integer, { obj }, exc);
}
-Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToUint32(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
}
-Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
+Handle<Object> Execution::ToInt32(
+ Isolate* isolate, Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_int32, { obj }, exc);
}
-Handle<Object> Execution::NewDate(double time, bool* exc) {
- Isolate* isolate = Isolate::Current();
+Handle<Object> Execution::NewDate(Isolate* isolate, double time, bool* exc) {
Handle<Object> time_obj = isolate->factory()->NewNumber(time);
RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
}
@@ -696,15 +721,18 @@ Handle<JSFunction> Execution::InstantiateFunction(
Handle<FunctionTemplateInfo> data,
bool* exc) {
Isolate* isolate = data->GetIsolate();
- // Fast case: see if the function has already been instantiated
- int serial_number = Smi::cast(data->serial_number())->value();
- Object* elm =
- isolate->native_context()->function_cache()->
- GetElementNoExceptionThrown(serial_number);
- if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
+ if (!data->do_not_cache()) {
+ // Fast case: see if the function has already been instantiated
+ int serial_number = Smi::cast(data->serial_number())->value();
+ Object* elm =
+ isolate->native_context()->function_cache()->
+ GetElementNoExceptionThrown(isolate, serial_number);
+ if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
+ }
// The function has not yet been instantiated in this context; do it.
Handle<Object> args[] = { data };
- Handle<Object> result = Call(isolate->instantiate_fun(),
+ Handle<Object> result = Call(isolate,
+ isolate->instantiate_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
@@ -736,7 +764,8 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
return Handle<JSObject>(JSObject::cast(result));
} else {
Handle<Object> args[] = { data };
- Handle<Object> result = Call(isolate->instantiate_fun(),
+ Handle<Object> result = Call(isolate,
+ isolate->instantiate_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
@@ -747,12 +776,13 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
}
-void Execution::ConfigureInstance(Handle<Object> instance,
+void Execution::ConfigureInstance(Isolate* isolate,
+ Handle<Object> instance,
Handle<Object> instance_template,
bool* exc) {
- Isolate* isolate = Isolate::Current();
Handle<Object> args[] = { instance, instance_template };
- Execution::Call(isolate->configure_instance_fun(),
+ Execution::Call(isolate,
+ isolate->configure_instance_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
@@ -780,9 +810,7 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
}
-static Object* RuntimePreempt() {
- Isolate* isolate = Isolate::Current();
-
+static Object* RuntimePreempt(Isolate* isolate) {
// Clear the preempt request flag.
isolate->stack_guard()->Continue(PREEMPT);
@@ -811,9 +839,7 @@ static Object* RuntimePreempt() {
#ifdef ENABLE_DEBUGGER_SUPPORT
-Object* Execution::DebugBreakHelper() {
- Isolate* isolate = Isolate::Current();
-
+Object* Execution::DebugBreakHelper(Isolate* isolate) {
// Just continue if breaks are disabled.
if (isolate->debug()->disable_break()) {
return isolate->heap()->undefined_value();
@@ -859,15 +885,15 @@ Object* Execution::DebugBreakHelper() {
// Clear the debug break request flag.
isolate->stack_guard()->Continue(DEBUGBREAK);
- ProcessDebugMessages(debug_command_only);
+ ProcessDebugMessages(isolate, debug_command_only);
// Return to continue execution.
return isolate->heap()->undefined_value();
}
-void Execution::ProcessDebugMessages(bool debug_command_only) {
- Isolate* isolate = Isolate::Current();
+void Execution::ProcessDebugMessages(Isolate* isolate,
+ bool debug_command_only) {
// Clear the debug command request flag.
isolate->stack_guard()->Continue(DEBUGCOMMAND);
@@ -878,7 +904,7 @@ void Execution::ProcessDebugMessages(bool debug_command_only) {
HandleScope scope(isolate);
// Enter the debugger. Just continue if we fail to enter the debugger.
- EnterDebugger debugger;
+ EnterDebugger debugger(isolate);
if (debugger.FailedToEnter()) {
return;
}
@@ -906,13 +932,12 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
isolate->counters()->stack_interrupts()->Increment();
isolate->counters()->runtime_profiler_ticks()->Increment();
- isolate->runtime_profiler()->OptimizeNow();
#ifdef ENABLE_DEBUGGER_SUPPORT
if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
- DebugBreakHelper();
+ DebugBreakHelper(isolate);
}
#endif
- if (stack_guard->IsPreempted()) RuntimePreempt();
+ if (stack_guard->IsPreempted()) RuntimePreempt(isolate);
if (stack_guard->IsTerminateExecution()) {
stack_guard->Continue(TERMINATE);
return isolate->TerminateExecution();
@@ -925,6 +950,12 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(FULL_DEOPT);
Deoptimizer::DeoptimizeAll(isolate);
}
+ if (stack_guard->IsInstallCodeRequest()) {
+ ASSERT(FLAG_concurrent_recompilation);
+ stack_guard->Continue(INSTALL_CODE);
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ }
+ isolate->runtime_profiler()->OptimizeNow();
return isolate->heap()->undefined_value();
}
diff --git a/chromium/v8/src/execution.h b/chromium/v8/src/execution.h
index c6bf63d72f2..371ea309d6c 100644
--- a/chromium/v8/src/execution.h
+++ b/chromium/v8/src/execution.h
@@ -42,7 +42,8 @@ enum InterruptFlag {
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
GC_REQUEST = 1 << 5,
- FULL_DEOPT = 1 << 6
+ FULL_DEOPT = 1 << 6,
+ INSTALL_CODE = 1 << 7
};
@@ -62,7 +63,8 @@ class Execution : public AllStatic {
// and the function called is not in strict mode, receiver is converted to
// an object.
//
- static Handle<Object> Call(Handle<Object> callable,
+ static Handle<Object> Call(Isolate* isolate,
+ Handle<Object> callable,
Handle<Object> receiver,
int argc,
Handle<Object> argv[],
@@ -92,28 +94,36 @@ class Execution : public AllStatic {
bool* caught_exception);
// ECMA-262 9.3
- static Handle<Object> ToNumber(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToNumber(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.4
- static Handle<Object> ToInteger(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToInteger(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.5
- static Handle<Object> ToInt32(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToInt32(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.6
- static Handle<Object> ToUint32(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToUint32(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.8
- static Handle<Object> ToString(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToString(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.8
- static Handle<Object> ToDetailString(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToDetailString(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// ECMA-262 9.9
- static Handle<Object> ToObject(Handle<Object> obj, bool* exc);
+ static Handle<Object> ToObject(
+ Isolate* isolate, Handle<Object> obj, bool* exc);
// Create a new date object from 'time'.
- static Handle<Object> NewDate(double time, bool* exc);
+ static Handle<Object> NewDate(
+ Isolate* isolate, double time, bool* exc);
// Create a new regular expression object from 'pattern' and 'flags'.
static Handle<JSRegExp> NewJSRegExp(Handle<String> pattern,
@@ -128,7 +138,8 @@ class Execution : public AllStatic {
Handle<FunctionTemplateInfo> data, bool* exc);
static Handle<JSObject> InstantiateObject(Handle<ObjectTemplateInfo> data,
bool* exc);
- static void ConfigureInstance(Handle<Object> instance,
+ static void ConfigureInstance(Isolate* isolate,
+ Handle<Object> instance,
Handle<Object> data,
bool* exc);
static Handle<String> GetStackTraceLine(Handle<Object> recv,
@@ -136,8 +147,8 @@ class Execution : public AllStatic {
Handle<Object> pos,
Handle<Object> is_global);
#ifdef ENABLE_DEBUGGER_SUPPORT
- static Object* DebugBreakHelper();
- static void ProcessDebugMessages(bool debug_command_only);
+ static Object* DebugBreakHelper(Isolate* isolate);
+ static void ProcessDebugMessages(Isolate* isolate, bool debug_command_only);
#endif
// If the stack guard is triggered, but it is not an actual
@@ -147,14 +158,18 @@ class Execution : public AllStatic {
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as functions.
- static Handle<Object> GetFunctionDelegate(Handle<Object> object);
- static Handle<Object> TryGetFunctionDelegate(Handle<Object> object,
+ static Handle<Object> GetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object);
+ static Handle<Object> TryGetFunctionDelegate(Isolate* isolate,
+ Handle<Object> object,
bool* has_pending_exception);
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as constructors.
- static Handle<Object> GetConstructorDelegate(Handle<Object> object);
- static Handle<Object> TryGetConstructorDelegate(Handle<Object> object,
+ static Handle<Object> GetConstructorDelegate(Isolate* isolate,
+ Handle<Object> object);
+ static Handle<Object> TryGetConstructorDelegate(Isolate* isolate,
+ Handle<Object> object,
bool* has_pending_exception);
};
@@ -199,6 +214,8 @@ class StackGuard {
#endif
bool IsGCRequest();
void RequestGC();
+ bool IsInstallCodeRequest();
+ void RequestInstallCode();
bool IsFullDeopt();
void FullDeopt();
void Continue(InterruptFlag after_what);
diff --git a/chromium/v8/src/extensions/externalize-string-extension.cc b/chromium/v8/src/extensions/externalize-string-extension.cc
index a3630fb9f5b..5fd821b9c07 100644
--- a/chromium/v8/src/extensions/externalize-string-extension.cc
+++ b/chromium/v8/src/extensions/externalize-string-extension.cc
@@ -103,7 +103,8 @@ void ExternalizeStringExtension::Externalize(
reinterpret_cast<char*>(data), string->length());
result = string->MakeExternal(resource);
if (result && !string->IsInternalizedString()) {
- HEAP->external_string_table()->AddString(*string);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ isolate->heap()->external_string_table()->AddString(*string);
}
if (!result) delete resource;
} else {
@@ -113,7 +114,8 @@ void ExternalizeStringExtension::Externalize(
data, string->length());
result = string->MakeExternal(resource);
if (result && !string->IsInternalizedString()) {
- HEAP->external_string_table()->AddString(*string);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ isolate->heap()->external_string_table()->AddString(*string);
}
if (!result) delete resource;
}
diff --git a/chromium/v8/src/extensions/gc-extension.cc b/chromium/v8/src/extensions/gc-extension.cc
index 036b60cb231..308879115fc 100644
--- a/chromium/v8/src/extensions/gc-extension.cc
+++ b/chromium/v8/src/extensions/gc-extension.cc
@@ -39,10 +39,11 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
if (args[0]->BooleanValue()) {
- HEAP->CollectGarbage(NEW_SPACE, "gc extension");
+ isolate->heap()->CollectGarbage(NEW_SPACE, "gc extension");
} else {
- HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
+ isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
}
}
diff --git a/chromium/v8/src/extensions/i18n/break-iterator.cc b/chromium/v8/src/extensions/i18n/break-iterator.cc
deleted file mode 100644
index 0681e264ab0..00000000000
--- a/chromium/v8/src/extensions/i18n/break-iterator.cc
+++ /dev/null
@@ -1,333 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "break-iterator.h"
-
-#include <string.h>
-
-#include "i18n-utils.h"
-#include "unicode/brkiter.h"
-#include "unicode/locid.h"
-#include "unicode/rbbi.h"
-
-namespace v8_i18n {
-
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
-static icu::UnicodeString* ResetAdoptedText(v8::Handle<v8::Object>,
- v8::Handle<v8::Value>);
-static icu::BreakIterator* InitializeBreakIterator(v8::Handle<v8::String>,
- v8::Handle<v8::Object>,
- v8::Handle<v8::Object>);
-static icu::BreakIterator* CreateICUBreakIterator(const icu::Locale&,
- v8::Handle<v8::Object>);
-static void SetResolvedSettings(const icu::Locale&,
- icu::BreakIterator*,
- v8::Handle<v8::Object>);
-
-icu::BreakIterator* BreakIterator::UnpackBreakIterator(
- v8::Handle<v8::Object> obj) {
- v8::HandleScope handle_scope;
-
- // v8::ObjectTemplate doesn't have HasInstance method so we can't check
- // if obj is an instance of BreakIterator class. We'll check for a property
- // that has to be in the object. The same applies to other services, like
- // Collator and DateTimeFormat.
- if (obj->HasOwnProperty(v8::String::New("breakIterator"))) {
- return static_cast<icu::BreakIterator*>(
- obj->GetAlignedPointerFromInternalField(0));
- }
-
- return NULL;
-}
-
-void BreakIterator::DeleteBreakIterator(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* object,
- void* param) {
- // First delete the hidden C++ object.
- // Unpacking should never return NULL here. That would only happen if
- // this method is used as the weak callback for persistent handles not
- // pointing to a break iterator.
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Object> handle = v8::Local<v8::Object>::New(isolate, *object);
- delete UnpackBreakIterator(handle);
-
- delete static_cast<icu::UnicodeString*>(
- handle->GetAlignedPointerFromInternalField(1));
-
- // Then dispose of the persistent handle to JS object.
- object->Dispose(isolate);
-}
-
-
-// Throws a JavaScript exception.
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
- // Returns undefined, and schedules an exception to be thrown.
- return v8::ThrowException(v8::Exception::Error(
- v8::String::New("BreakIterator method called on an object "
- "that is not a BreakIterator.")));
-}
-
-
-// Deletes the old value and sets the adopted text in corresponding
-// JavaScript object.
-icu::UnicodeString* ResetAdoptedText(
- v8::Handle<v8::Object> obj, v8::Handle<v8::Value> value) {
- // Get the previous value from the internal field.
- icu::UnicodeString* text = static_cast<icu::UnicodeString*>(
- obj->GetAlignedPointerFromInternalField(1));
- delete text;
-
- // Assign new value to the internal pointer.
- v8::String::Value text_value(value);
- text = new icu::UnicodeString(
- reinterpret_cast<const UChar*>(*text_value), text_value.length());
- obj->SetAlignedPointerInInternalField(1, text);
-
- // Return new unicode string pointer.
- return text;
-}
-
-void BreakIterator::JSInternalBreakIteratorAdoptText(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsString()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New(
- "Internal error. Iterator and text have to be specified.")));
- return;
- }
-
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
- if (!break_iterator) {
- ThrowUnexpectedObjectError();
- return;
- }
-
- break_iterator->setText(*ResetAdoptedText(args[0]->ToObject(), args[1]));
-}
-
-void BreakIterator::JSInternalBreakIteratorFirst(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
- if (!break_iterator) {
- ThrowUnexpectedObjectError();
- return;
- }
-
- args.GetReturnValue().Set(static_cast<int32_t>(break_iterator->first()));
-}
-
-void BreakIterator::JSInternalBreakIteratorNext(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
- if (!break_iterator) {
- ThrowUnexpectedObjectError();
- return;
- }
-
- args.GetReturnValue().Set(static_cast<int32_t>(break_iterator->next()));
-}
-
-void BreakIterator::JSInternalBreakIteratorCurrent(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
- if (!break_iterator) {
- ThrowUnexpectedObjectError();
- return;
- }
-
- args.GetReturnValue().Set(static_cast<int32_t>(break_iterator->current()));
-}
-
-void BreakIterator::JSInternalBreakIteratorBreakType(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args[0]->ToObject());
- if (!break_iterator) {
- ThrowUnexpectedObjectError();
- return;
- }
-
- // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
- icu::RuleBasedBreakIterator* rule_based_iterator =
- static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
- int32_t status = rule_based_iterator->getRuleStatus();
- // Keep return values in sync with JavaScript BreakType enum.
- v8::Handle<v8::String> result;
- if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
- result = v8::String::New("none");
- } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
- result = v8::String::New("number");
- } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
- result = v8::String::New("letter");
- } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
- result = v8::String::New("kana");
- } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
- result = v8::String::New("ideo");
- } else {
- result = v8::String::New("unknown");
- }
- args.GetReturnValue().Set(result);
-}
-
-void BreakIterator::JSCreateBreakIterator(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 3 || !args[0]->IsString() || !args[1]->IsObject() ||
- !args[2]->IsObject()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("Internal error, wrong parameters.")));
- return;
- }
-
- v8::Isolate* isolate = args.GetIsolate();
- v8::Local<v8::ObjectTemplate> break_iterator_template =
- Utils::GetTemplate2(isolate);
-
- // Create an empty object wrapper.
- v8::Local<v8::Object> local_object = break_iterator_template->NewInstance();
- // But the handle shouldn't be empty.
- // That can happen if there was a stack overflow when creating the object.
- if (local_object.IsEmpty()) {
- args.GetReturnValue().Set(local_object);
- return;
- }
-
- // Set break iterator as internal field of the resulting JS object.
- icu::BreakIterator* break_iterator = InitializeBreakIterator(
- args[0]->ToString(), args[1]->ToObject(), args[2]->ToObject());
-
- if (!break_iterator) {
- v8::ThrowException(v8::Exception::Error(v8::String::New(
- "Internal error. Couldn't create ICU break iterator.")));
- return;
- } else {
- local_object->SetAlignedPointerInInternalField(0, break_iterator);
- // Make sure that the pointer to adopted text is NULL.
- local_object->SetAlignedPointerInInternalField(1, NULL);
-
- v8::TryCatch try_catch;
- local_object->Set(v8::String::New("breakIterator"),
- v8::String::New("valid"));
- if (try_catch.HasCaught()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("Internal error, couldn't set property.")));
- return;
- }
- }
-
- v8::Persistent<v8::Object> wrapper(isolate, local_object);
- // Make object handle weak so we can delete iterator once GC kicks in.
- wrapper.MakeWeak<void>(NULL, &DeleteBreakIterator);
- args.GetReturnValue().Set(wrapper);
- wrapper.ClearAndLeak();
-}
-
-static icu::BreakIterator* InitializeBreakIterator(
- v8::Handle<v8::String> locale,
- v8::Handle<v8::Object> options,
- v8::Handle<v8::Object> resolved) {
- // Convert BCP47 into ICU locale format.
- UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale;
- char icu_result[ULOC_FULLNAME_CAPACITY];
- int icu_length = 0;
- v8::String::AsciiValue bcp47_locale(locale);
- if (bcp47_locale.length() != 0) {
- uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
- &icu_length, &status);
- if (U_FAILURE(status) || icu_length == 0) {
- return NULL;
- }
- icu_locale = icu::Locale(icu_result);
- }
-
- icu::BreakIterator* break_iterator =
- CreateICUBreakIterator(icu_locale, options);
- if (!break_iterator) {
- // Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- break_iterator = CreateICUBreakIterator(no_extension_locale, options);
-
- // Set resolved settings (locale).
- SetResolvedSettings(no_extension_locale, break_iterator, resolved);
- } else {
- SetResolvedSettings(icu_locale, break_iterator, resolved);
- }
-
- return break_iterator;
-}
-
-static icu::BreakIterator* CreateICUBreakIterator(
- const icu::Locale& icu_locale, v8::Handle<v8::Object> options) {
- UErrorCode status = U_ZERO_ERROR;
- icu::BreakIterator* break_iterator = NULL;
- icu::UnicodeString type;
- if (!Utils::ExtractStringSetting(options, "type", &type)) {
- // Type had to be in the options. This would be an internal error.
- return NULL;
- }
-
- if (type == UNICODE_STRING_SIMPLE("character")) {
- break_iterator =
- icu::BreakIterator::createCharacterInstance(icu_locale, status);
- } else if (type == UNICODE_STRING_SIMPLE("sentence")) {
- break_iterator =
- icu::BreakIterator::createSentenceInstance(icu_locale, status);
- } else if (type == UNICODE_STRING_SIMPLE("line")) {
- break_iterator =
- icu::BreakIterator::createLineInstance(icu_locale, status);
- } else {
- // Defualt is word iterator.
- break_iterator =
- icu::BreakIterator::createWordInstance(icu_locale, status);
- }
-
- if (U_FAILURE(status)) {
- delete break_iterator;
- return NULL;
- }
-
- return break_iterator;
-}
-
-static void SetResolvedSettings(const icu::Locale& icu_locale,
- icu::BreakIterator* date_format,
- v8::Handle<v8::Object> resolved) {
- UErrorCode status = U_ZERO_ERROR;
-
- // Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- status = U_ZERO_ERROR;
- uloc_toLanguageTag(
- icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
- if (U_SUCCESS(status)) {
- resolved->Set(v8::String::New("locale"), v8::String::New(result));
- } else {
- // This would never happen, since we got the locale from ICU.
- resolved->Set(v8::String::New("locale"), v8::String::New("und"));
- }
-}
-
-} // namespace v8_i18n
diff --git a/chromium/v8/src/extensions/i18n/break-iterator.h b/chromium/v8/src/extensions/i18n/break-iterator.h
deleted file mode 100644
index c44c20fbc8a..00000000000
--- a/chromium/v8/src/extensions/i18n/break-iterator.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#ifndef V8_EXTENSIONS_I18N_BREAK_ITERATOR_H_
-#define V8_EXTENSIONS_I18N_BREAK_ITERATOR_H_
-
-#include "unicode/uversion.h"
-#include "v8.h"
-
-namespace U_ICU_NAMESPACE {
-class BreakIterator;
-class UnicodeString;
-}
-
-namespace v8_i18n {
-
-class BreakIterator {
- public:
- static void JSCreateBreakIterator(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Helper methods for various bindings.
-
- // Unpacks iterator object from corresponding JavaScript object.
- static icu::BreakIterator* UnpackBreakIterator(v8::Handle<v8::Object> obj);
-
- // Release memory we allocated for the BreakIterator once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteBreakIterator(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* object,
- void* param);
-
- // Assigns new text to the iterator.
- static void JSInternalBreakIteratorAdoptText(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Moves iterator to the beginning of the string and returns new position.
- static void JSInternalBreakIteratorFirst(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Moves iterator to the next position and returns it.
- static void JSInternalBreakIteratorNext(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Returns current iterator's current position.
- static void JSInternalBreakIteratorCurrent(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Returns type of the item from current position.
- // This call is only valid for word break iterators. Others just return 0.
- static void JSInternalBreakIteratorBreakType(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- private:
- BreakIterator() {}
-};
-
-} // namespace v8_i18n
-
-#endif // V8_EXTENSIONS_I18N_BREAK_ITERATOR_H_
diff --git a/chromium/v8/src/extensions/i18n/break-iterator.js b/chromium/v8/src/extensions/i18n/break-iterator.js
deleted file mode 100644
index eefd8c2ab1e..00000000000
--- a/chromium/v8/src/extensions/i18n/break-iterator.js
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Initializes the given object so it's a valid BreakIterator instance.
- * Useful for subclassing.
- */
-function initializeBreakIterator(iterator, locales, options) {
- native function NativeJSCreateBreakIterator();
-
- if (iterator.hasOwnProperty('__initializedIntlObject')) {
- throw new TypeError('Trying to re-initialize v8BreakIterator object.');
- }
-
- if (options === undefined) {
- options = {};
- }
-
- var getOption = getGetOption(options, 'breakiterator');
-
- var internalOptions = {};
-
- defineWEProperty(internalOptions, 'type', getOption(
- 'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
-
- var locale = resolveLocale('breakiterator', locales, options);
- var resolved = Object.defineProperties({}, {
- requestedLocale: {value: locale.locale, writable: true},
- type: {value: internalOptions.type, writable: true},
- locale: {writable: true}
- });
-
- var internalIterator = NativeJSCreateBreakIterator(locale.locale,
- internalOptions,
- resolved);
-
- Object.defineProperty(iterator, 'iterator', {value: internalIterator});
- Object.defineProperty(iterator, 'resolved', {value: resolved});
- Object.defineProperty(iterator, '__initializedIntlObject',
- {value: 'breakiterator'});
-
- return iterator;
-}
-
-
-/**
- * Constructs Intl.v8BreakIterator object given optional locales and options
- * parameters.
- *
- * @constructor
- */
-%SetProperty(Intl, 'v8BreakIterator', function() {
- var locales = arguments[0];
- var options = arguments[1];
-
- if (!this || this === Intl) {
- // Constructor is called as a function.
- return new Intl.v8BreakIterator(locales, options);
- }
-
- return initializeBreakIterator(toObject(this), locales, options);
- },
- ATTRIBUTES.DONT_ENUM
-);
-
-
-/**
- * BreakIterator resolvedOptions method.
- */
-%SetProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject !== 'breakiterator') {
- throw new TypeError('resolvedOptions method called on a non-object or ' +
- 'on a object that is not Intl.v8BreakIterator.');
- }
-
- var segmenter = this;
- var locale = getOptimalLanguageTag(segmenter.resolved.requestedLocale,
- segmenter.resolved.locale);
-
- return {
- locale: locale,
- type: segmenter.resolved.type
- };
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.v8BreakIterator.prototype.resolvedOptions,
- 'resolvedOptions');
-%FunctionRemovePrototype(Intl.v8BreakIterator.prototype.resolvedOptions);
-%SetNativeFlag(Intl.v8BreakIterator.prototype.resolvedOptions);
-
-
-/**
- * Returns the subset of the given locale list for which this locale list
- * has a matching (possibly fallback) locale. Locales appear in the same
- * order in the returned list as in the input list.
- * Options are optional parameter.
- */
-%SetProperty(Intl.v8BreakIterator, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- return supportedLocalesOf('breakiterator', locales, arguments[1]);
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.v8BreakIterator.supportedLocalesOf);
-%SetNativeFlag(Intl.v8BreakIterator.supportedLocalesOf);
-
-
-/**
- * Adopts text to segment using the iterator. Old text, if present,
- * gets discarded.
- */
-function adoptText(iterator, text) {
- native function NativeJSBreakIteratorAdoptText();
- NativeJSBreakIteratorAdoptText(iterator.iterator, String(text));
-}
-
-
-/**
- * Returns index of the first break in the string and moves current pointer.
- */
-function first(iterator) {
- native function NativeJSBreakIteratorFirst();
- return NativeJSBreakIteratorFirst(iterator.iterator);
-}
-
-
-/**
- * Returns the index of the next break and moves the pointer.
- */
-function next(iterator) {
- native function NativeJSBreakIteratorNext();
- return NativeJSBreakIteratorNext(iterator.iterator);
-}
-
-
-/**
- * Returns index of the current break.
- */
-function current(iterator) {
- native function NativeJSBreakIteratorCurrent();
- return NativeJSBreakIteratorCurrent(iterator.iterator);
-}
-
-
-/**
- * Returns type of the current break.
- */
-function breakType(iterator) {
- native function NativeJSBreakIteratorBreakType();
- return NativeJSBreakIteratorBreakType(iterator.iterator);
-}
-
-
-addBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
-addBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
-addBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
-addBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
-addBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);
diff --git a/chromium/v8/src/extensions/i18n/collator.cc b/chromium/v8/src/extensions/i18n/collator.cc
deleted file mode 100644
index 61b1d63e5c2..00000000000
--- a/chromium/v8/src/extensions/i18n/collator.cc
+++ /dev/null
@@ -1,366 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#include "collator.h"
-
-#include "i18n-utils.h"
-#include "unicode/coll.h"
-#include "unicode/locid.h"
-#include "unicode/ucol.h"
-
-namespace v8_i18n {
-
-static icu::Collator* InitializeCollator(
- v8::Handle<v8::String>, v8::Handle<v8::Object>, v8::Handle<v8::Object>);
-
-static icu::Collator* CreateICUCollator(
- const icu::Locale&, v8::Handle<v8::Object>);
-
-static bool SetBooleanAttribute(
- UColAttribute, const char*, v8::Handle<v8::Object>, icu::Collator*);
-
-static void SetResolvedSettings(
- const icu::Locale&, icu::Collator*, v8::Handle<v8::Object>);
-
-static void SetBooleanSetting(
- UColAttribute, icu::Collator*, const char*, v8::Handle<v8::Object>);
-
-icu::Collator* Collator::UnpackCollator(v8::Handle<v8::Object> obj) {
- v8::HandleScope handle_scope;
-
- if (obj->HasOwnProperty(v8::String::New("collator"))) {
- return static_cast<icu::Collator*>(
- obj->GetAlignedPointerFromInternalField(0));
- }
-
- return NULL;
-}
-
-void Collator::DeleteCollator(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* object,
- void* param) {
- // First delete the hidden C++ object.
- // Unpacking should never return NULL here. That would only happen if
- // this method is used as the weak callback for persistent handles not
- // pointing to a collator.
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Object> handle = v8::Local<v8::Object>::New(isolate, *object);
- delete UnpackCollator(handle);
-
- // Then dispose of the persistent handle to JS object.
- object->Dispose(isolate);
-}
-
-
-// Throws a JavaScript exception.
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
- // Returns undefined, and schedules an exception to be thrown.
- return v8::ThrowException(v8::Exception::Error(
- v8::String::New("Collator method called on an object "
- "that is not a Collator.")));
-}
-
-
-// When there's an ICU error, throw a JavaScript error with |message|.
-static v8::Handle<v8::Value> ThrowExceptionForICUError(const char* message) {
- return v8::ThrowException(v8::Exception::Error(v8::String::New(message)));
-}
-
-
-// static
-void Collator::JSInternalCompare(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 3 || !args[0]->IsObject() ||
- !args[1]->IsString() || !args[2]->IsString()) {
- v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Collator and two string arguments are required.")));
- return;
- }
-
- icu::Collator* collator = UnpackCollator(args[0]->ToObject());
- if (!collator) {
- ThrowUnexpectedObjectError();
- return;
- }
-
- v8::String::Value string_value1(args[1]);
- v8::String::Value string_value2(args[2]);
- const UChar* string1 = reinterpret_cast<const UChar*>(*string_value1);
- const UChar* string2 = reinterpret_cast<const UChar*>(*string_value2);
- UErrorCode status = U_ZERO_ERROR;
- UCollationResult result = collator->compare(
- string1, string_value1.length(), string2, string_value2.length(), status);
-
- if (U_FAILURE(status)) {
- ThrowExceptionForICUError(
- "Internal error. Unexpected failure in Collator.compare.");
- return;
- }
-
- args.GetReturnValue().Set(result);
-}
-
-void Collator::JSCreateCollator(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 3 || !args[0]->IsString() || !args[1]->IsObject() ||
- !args[2]->IsObject()) {
- v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Internal error, wrong parameters.")));
- return;
- }
-
- v8::Isolate* isolate = args.GetIsolate();
- v8::Local<v8::ObjectTemplate> intl_collator_template =
- Utils::GetTemplate(isolate);
-
- // Create an empty object wrapper.
- v8::Local<v8::Object> local_object = intl_collator_template->NewInstance();
- // But the handle shouldn't be empty.
- // That can happen if there was a stack overflow when creating the object.
- if (local_object.IsEmpty()) {
- args.GetReturnValue().Set(local_object);
- return;
- }
-
- // Set collator as internal field of the resulting JS object.
- icu::Collator* collator = InitializeCollator(
- args[0]->ToString(), args[1]->ToObject(), args[2]->ToObject());
-
- if (!collator) {
- v8::ThrowException(v8::Exception::Error(v8::String::New(
- "Internal error. Couldn't create ICU collator.")));
- return;
- } else {
- local_object->SetAlignedPointerInInternalField(0, collator);
-
- // Make it safer to unpack later on.
- v8::TryCatch try_catch;
- local_object->Set(v8::String::New("collator"), v8::String::New("valid"));
- if (try_catch.HasCaught()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("Internal error, couldn't set property.")));
- return;
- }
- }
-
- v8::Persistent<v8::Object> wrapper(isolate, local_object);
- // Make object handle weak so we can delete iterator once GC kicks in.
- wrapper.MakeWeak<void>(NULL, &DeleteCollator);
- args.GetReturnValue().Set(wrapper);
- wrapper.ClearAndLeak();
-}
-
-static icu::Collator* InitializeCollator(v8::Handle<v8::String> locale,
- v8::Handle<v8::Object> options,
- v8::Handle<v8::Object> resolved) {
- // Convert BCP47 into ICU locale format.
- UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale;
- char icu_result[ULOC_FULLNAME_CAPACITY];
- int icu_length = 0;
- v8::String::AsciiValue bcp47_locale(locale);
- if (bcp47_locale.length() != 0) {
- uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
- &icu_length, &status);
- if (U_FAILURE(status) || icu_length == 0) {
- return NULL;
- }
- icu_locale = icu::Locale(icu_result);
- }
-
- icu::Collator* collator = CreateICUCollator(icu_locale, options);
- if (!collator) {
- // Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- collator = CreateICUCollator(no_extension_locale, options);
-
- // Set resolved settings (pattern, numbering system).
- SetResolvedSettings(no_extension_locale, collator, resolved);
- } else {
- SetResolvedSettings(icu_locale, collator, resolved);
- }
-
- return collator;
-}
-
-static icu::Collator* CreateICUCollator(
- const icu::Locale& icu_locale, v8::Handle<v8::Object> options) {
- // Make collator from options.
- icu::Collator* collator = NULL;
- UErrorCode status = U_ZERO_ERROR;
- collator = icu::Collator::createInstance(icu_locale, status);
-
- if (U_FAILURE(status)) {
- delete collator;
- return NULL;
- }
-
- // Set flags first, and then override them with sensitivity if necessary.
- SetBooleanAttribute(UCOL_NUMERIC_COLLATION, "numeric", options, collator);
-
- // Normalization is always on, by the spec. We are free to optimize
- // if the strings are already normalized (but we don't have a way to tell
- // that right now).
- collator->setAttribute(UCOL_NORMALIZATION_MODE, UCOL_ON, status);
-
- icu::UnicodeString case_first;
- if (Utils::ExtractStringSetting(options, "caseFirst", &case_first)) {
- if (case_first == UNICODE_STRING_SIMPLE("upper")) {
- collator->setAttribute(UCOL_CASE_FIRST, UCOL_UPPER_FIRST, status);
- } else if (case_first == UNICODE_STRING_SIMPLE("lower")) {
- collator->setAttribute(UCOL_CASE_FIRST, UCOL_LOWER_FIRST, status);
- } else {
- // Default (false/off).
- collator->setAttribute(UCOL_CASE_FIRST, UCOL_OFF, status);
- }
- }
-
- icu::UnicodeString sensitivity;
- if (Utils::ExtractStringSetting(options, "sensitivity", &sensitivity)) {
- if (sensitivity == UNICODE_STRING_SIMPLE("base")) {
- collator->setStrength(icu::Collator::PRIMARY);
- } else if (sensitivity == UNICODE_STRING_SIMPLE("accent")) {
- collator->setStrength(icu::Collator::SECONDARY);
- } else if (sensitivity == UNICODE_STRING_SIMPLE("case")) {
- collator->setStrength(icu::Collator::PRIMARY);
- collator->setAttribute(UCOL_CASE_LEVEL, UCOL_ON, status);
- } else {
- // variant (default)
- collator->setStrength(icu::Collator::TERTIARY);
- }
- }
-
- bool ignore;
- if (Utils::ExtractBooleanSetting(options, "ignorePunctuation", &ignore)) {
- if (ignore) {
- collator->setAttribute(UCOL_ALTERNATE_HANDLING, UCOL_SHIFTED, status);
- }
- }
-
- return collator;
-}
-
-static bool SetBooleanAttribute(UColAttribute attribute,
- const char* name,
- v8::Handle<v8::Object> options,
- icu::Collator* collator) {
- UErrorCode status = U_ZERO_ERROR;
- bool result;
- if (Utils::ExtractBooleanSetting(options, name, &result)) {
- collator->setAttribute(attribute, result ? UCOL_ON : UCOL_OFF, status);
- if (U_FAILURE(status)) {
- return false;
- }
- }
-
- return true;
-}
-
-static void SetResolvedSettings(const icu::Locale& icu_locale,
- icu::Collator* collator,
- v8::Handle<v8::Object> resolved) {
- SetBooleanSetting(UCOL_NUMERIC_COLLATION, collator, "numeric", resolved);
-
- UErrorCode status = U_ZERO_ERROR;
-
- switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
- case UCOL_LOWER_FIRST:
- resolved->Set(v8::String::New("caseFirst"), v8::String::New("lower"));
- break;
- case UCOL_UPPER_FIRST:
- resolved->Set(v8::String::New("caseFirst"), v8::String::New("upper"));
- break;
- default:
- resolved->Set(v8::String::New("caseFirst"), v8::String::New("false"));
- }
-
- switch (collator->getAttribute(UCOL_STRENGTH, status)) {
- case UCOL_PRIMARY: {
- resolved->Set(v8::String::New("strength"), v8::String::New("primary"));
-
- // case level: true + s1 -> case, s1 -> base.
- if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
- resolved->Set(v8::String::New("sensitivity"), v8::String::New("case"));
- } else {
- resolved->Set(v8::String::New("sensitivity"), v8::String::New("base"));
- }
- break;
- }
- case UCOL_SECONDARY:
- resolved->Set(v8::String::New("strength"), v8::String::New("secondary"));
- resolved->Set(v8::String::New("sensitivity"), v8::String::New("accent"));
- break;
- case UCOL_TERTIARY:
- resolved->Set(v8::String::New("strength"), v8::String::New("tertiary"));
- resolved->Set(v8::String::New("sensitivity"), v8::String::New("variant"));
- break;
- case UCOL_QUATERNARY:
- // We shouldn't get quaternary and identical from ICU, but if we do
- // put them into variant.
- resolved->Set(v8::String::New("strength"), v8::String::New("quaternary"));
- resolved->Set(v8::String::New("sensitivity"), v8::String::New("variant"));
- break;
- default:
- resolved->Set(v8::String::New("strength"), v8::String::New("identical"));
- resolved->Set(v8::String::New("sensitivity"), v8::String::New("variant"));
- }
-
- if (UCOL_SHIFTED == collator->getAttribute(UCOL_ALTERNATE_HANDLING, status)) {
- resolved->Set(v8::String::New("ignorePunctuation"),
- v8::Boolean::New(true));
- } else {
- resolved->Set(v8::String::New("ignorePunctuation"),
- v8::Boolean::New(false));
- }
-
- // Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- status = U_ZERO_ERROR;
- uloc_toLanguageTag(
- icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
- if (U_SUCCESS(status)) {
- resolved->Set(v8::String::New("locale"), v8::String::New(result));
- } else {
- // This would never happen, since we got the locale from ICU.
- resolved->Set(v8::String::New("locale"), v8::String::New("und"));
- }
-}
-
-static void SetBooleanSetting(UColAttribute attribute,
- icu::Collator* collator,
- const char* property,
- v8::Handle<v8::Object> resolved) {
- UErrorCode status = U_ZERO_ERROR;
- if (UCOL_ON == collator->getAttribute(attribute, status)) {
- resolved->Set(v8::String::New(property), v8::Boolean::New(true));
- } else {
- resolved->Set(v8::String::New(property), v8::Boolean::New(false));
- }
-}
-
-} // namespace v8_i18n
diff --git a/chromium/v8/src/extensions/i18n/collator.h b/chromium/v8/src/extensions/i18n/collator.h
deleted file mode 100644
index a3991b9ed24..00000000000
--- a/chromium/v8/src/extensions/i18n/collator.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#ifndef V8_EXTENSIONS_I18N_COLLATOR_H_
-#define V8_EXTENSIONS_I18N_COLLATOR_H_
-
-#include "unicode/uversion.h"
-#include "v8.h"
-
-namespace U_ICU_NAMESPACE {
-class Collator;
-class UnicodeString;
-}
-
-namespace v8_i18n {
-
-class Collator {
- public:
- static void JSCreateCollator(const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Helper methods for various bindings.
-
- // Unpacks collator object from corresponding JavaScript object.
- static icu::Collator* UnpackCollator(v8::Handle<v8::Object> obj);
-
- // Release memory we allocated for the Collator once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteCollator(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* object,
- void* param);
-
- // Compare two strings and returns -1, 0 and 1 depending on
- // whether string1 is smaller than, equal to or larger than string2.
- static void JSInternalCompare(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- private:
- Collator() {}
-};
-
-} // namespace v8_i18n
-
-#endif // V8_EXTENSIONS_I18N_COLLATOR_H_
diff --git a/chromium/v8/src/extensions/i18n/collator.js b/chromium/v8/src/extensions/i18n/collator.js
deleted file mode 100644
index 3483515bef2..00000000000
--- a/chromium/v8/src/extensions/i18n/collator.js
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Initializes the given object so it's a valid Collator instance.
- * Useful for subclassing.
- */
-function initializeCollator(collator, locales, options) {
- native function NativeJSCreateCollator();
-
- if (collator.hasOwnProperty('__initializedIntlObject')) {
- throw new TypeError('Trying to re-initialize Collator object.');
- }
-
- if (options === undefined) {
- options = {};
- }
-
- var getOption = getGetOption(options, 'collator');
-
- var internalOptions = {};
-
- defineWEProperty(internalOptions, 'usage', getOption(
- 'usage', 'string', ['sort', 'search'], 'sort'));
-
- var sensitivity = getOption('sensitivity', 'string',
- ['base', 'accent', 'case', 'variant']);
- if (sensitivity === undefined && internalOptions.usage === 'sort') {
- sensitivity = 'variant';
- }
- defineWEProperty(internalOptions, 'sensitivity', sensitivity);
-
- defineWEProperty(internalOptions, 'ignorePunctuation', getOption(
- 'ignorePunctuation', 'boolean', undefined, false));
-
- var locale = resolveLocale('collator', locales, options);
-
- // ICU can't take kb, kc... parameters through localeID, so we need to pass
- // them as options.
- // One exception is -co- which has to be part of the extension, but only for
- // usage: sort, and its value can't be 'standard' or 'search'.
- var extensionMap = parseExtension(locale.extension);
- setOptions(
- options, extensionMap, COLLATOR_KEY_MAP, getOption, internalOptions);
-
- var collation = 'default';
- var extension = '';
- if (extensionMap.hasOwnProperty('co') && internalOptions.usage === 'sort') {
- if (ALLOWED_CO_VALUES.indexOf(extensionMap.co) !== -1) {
- extension = '-u-co-' + extensionMap.co;
- // ICU can't tell us what the collation is, so save user's input.
- collation = extensionMap.co;
- }
- } else if (internalOptions.usage === 'search') {
- extension = '-u-co-search';
- }
- defineWEProperty(internalOptions, 'collation', collation);
-
- var requestedLocale = locale.locale + extension;
-
- // We define all properties C++ code may produce, to prevent security
- // problems. If malicious user decides to redefine Object.prototype.locale
- // we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
- // Object.defineProperties will either succeed defining or throw an error.
- var resolved = Object.defineProperties({}, {
- caseFirst: {writable: true},
- collation: {value: internalOptions.collation, writable: true},
- ignorePunctuation: {writable: true},
- locale: {writable: true},
- numeric: {writable: true},
- requestedLocale: {value: requestedLocale, writable: true},
- sensitivity: {writable: true},
- strength: {writable: true},
- usage: {value: internalOptions.usage, writable: true}
- });
-
- var internalCollator = NativeJSCreateCollator(requestedLocale,
- internalOptions,
- resolved);
-
- // Writable, configurable and enumerable are set to false by default.
- Object.defineProperty(collator, 'collator', {value: internalCollator});
- Object.defineProperty(collator, '__initializedIntlObject',
- {value: 'collator'});
- Object.defineProperty(collator, 'resolved', {value: resolved});
-
- return collator;
-}
-
-
-/**
- * Constructs Intl.Collator object given optional locales and options
- * parameters.
- *
- * @constructor
- */
-%SetProperty(Intl, 'Collator', function() {
- var locales = arguments[0];
- var options = arguments[1];
-
- if (!this || this === Intl) {
- // Constructor is called as a function.
- return new Intl.Collator(locales, options);
- }
-
- return initializeCollator(toObject(this), locales, options);
- },
- ATTRIBUTES.DONT_ENUM
-);
-
-
-/**
- * Collator resolvedOptions method.
- */
-%SetProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject !== 'collator') {
- throw new TypeError('resolvedOptions method called on a non-object ' +
- 'or on a object that is not Intl.Collator.');
- }
-
- var coll = this;
- var locale = getOptimalLanguageTag(coll.resolved.requestedLocale,
- coll.resolved.locale);
-
- return {
- locale: locale,
- usage: coll.resolved.usage,
- sensitivity: coll.resolved.sensitivity,
- ignorePunctuation: coll.resolved.ignorePunctuation,
- numeric: coll.resolved.numeric,
- caseFirst: coll.resolved.caseFirst,
- collation: coll.resolved.collation
- };
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
-%FunctionRemovePrototype(Intl.Collator.prototype.resolvedOptions);
-%SetNativeFlag(Intl.Collator.prototype.resolvedOptions);
-
-
-/**
- * Returns the subset of the given locale list for which this locale list
- * has a matching (possibly fallback) locale. Locales appear in the same
- * order in the returned list as in the input list.
- * Options are optional parameter.
- */
-%SetProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- return supportedLocalesOf('collator', locales, arguments[1]);
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.Collator.supportedLocalesOf);
-%SetNativeFlag(Intl.Collator.supportedLocalesOf);
-
-
-/**
- * When the compare method is called with two arguments x and y, it returns a
- * Number other than NaN that represents the result of a locale-sensitive
- * String comparison of x with y.
- * The result is intended to order String values in the sort order specified
- * by the effective locale and collation options computed during construction
- * of this Collator object, and will be negative, zero, or positive, depending
- * on whether x comes before y in the sort order, the Strings are equal under
- * the sort order, or x comes after y in the sort order, respectively.
- */
-function compare(collator, x, y) {
- native function NativeJSInternalCompare();
- return NativeJSInternalCompare(collator.collator, String(x), String(y));
-};
-
-
-addBoundMethod(Intl.Collator, 'compare', compare, 2);
diff --git a/chromium/v8/src/extensions/i18n/date-format.cc b/chromium/v8/src/extensions/i18n/date-format.cc
deleted file mode 100644
index 1058e37a58c..00000000000
--- a/chromium/v8/src/extensions/i18n/date-format.cc
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#include "date-format.h"
-
-#include <string.h>
-
-#include "i18n-utils.h"
-#include "unicode/calendar.h"
-#include "unicode/dtfmtsym.h"
-#include "unicode/dtptngen.h"
-#include "unicode/locid.h"
-#include "unicode/numsys.h"
-#include "unicode/smpdtfmt.h"
-#include "unicode/timezone.h"
-
-namespace v8_i18n {
-
-static icu::SimpleDateFormat* InitializeDateTimeFormat(v8::Handle<v8::String>,
- v8::Handle<v8::Object>,
- v8::Handle<v8::Object>);
-static icu::SimpleDateFormat* CreateICUDateFormat(const icu::Locale&,
- v8::Handle<v8::Object>);
-static void SetResolvedSettings(const icu::Locale&,
- icu::SimpleDateFormat*,
- v8::Handle<v8::Object>);
-
-icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
- v8::Handle<v8::Object> obj) {
- v8::HandleScope handle_scope;
-
- if (obj->HasOwnProperty(v8::String::New("dateFormat"))) {
- return static_cast<icu::SimpleDateFormat*>(
- obj->GetAlignedPointerFromInternalField(0));
- }
-
- return NULL;
-}
-
-void DateFormat::DeleteDateFormat(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* object,
- void* param) {
- // First delete the hidden C++ object.
- // Unpacking should never return NULL here. That would only happen if
- // this method is used as the weak callback for persistent handles not
- // pointing to a date time formatter.
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Object> handle = v8::Local<v8::Object>::New(isolate, *object);
- delete UnpackDateFormat(handle);
-
- // Then dispose of the persistent handle to JS object.
- object->Dispose(isolate);
-}
-
-void DateFormat::JSInternalFormat(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- double millis = 0.0;
- if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsDate()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New(
- "Internal error. Formatter and date value have to be specified.")));
- return;
- } else {
- millis = v8::Date::Cast(*args[1])->NumberValue();
- }
-
- icu::SimpleDateFormat* date_format = UnpackDateFormat(args[0]->ToObject());
- if (!date_format) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("DateTimeFormat method called on an object "
- "that is not a DateTimeFormat.")));
- return;
- }
-
- icu::UnicodeString result;
- date_format->format(millis, result);
-
- args.GetReturnValue().Set(v8::String::New(
- reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
-}
-
-void DateFormat::JSInternalParse(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- icu::UnicodeString string_date;
- if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsString()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New(
- "Internal error. Formatter and string have to be specified.")));
- return;
- } else {
- if (!Utils::V8StringToUnicodeString(args[1], &string_date)) {
- string_date = "";
- }
- }
-
- icu::SimpleDateFormat* date_format = UnpackDateFormat(args[0]->ToObject());
- if (!date_format) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("DateTimeFormat method called on an object "
- "that is not a DateTimeFormat.")));
- return;
- }
-
- UErrorCode status = U_ZERO_ERROR;
- UDate date = date_format->parse(string_date, status);
- if (U_FAILURE(status)) {
- return;
- }
-
- args.GetReturnValue().Set(v8::Date::New(static_cast<double>(date)));
-}
-
-void DateFormat::JSCreateDateTimeFormat(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 3 ||
- !args[0]->IsString() ||
- !args[1]->IsObject() ||
- !args[2]->IsObject()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("Internal error, wrong parameters.")));
- return;
- }
-
- v8::Isolate* isolate = args.GetIsolate();
- v8::Local<v8::ObjectTemplate> date_format_template =
- Utils::GetTemplate(isolate);
-
- // Create an empty object wrapper.
- v8::Local<v8::Object> local_object = date_format_template->NewInstance();
- // But the handle shouldn't be empty.
- // That can happen if there was a stack overflow when creating the object.
- if (local_object.IsEmpty()) {
- args.GetReturnValue().Set(local_object);
- return;
- }
-
- // Set date time formatter as internal field of the resulting JS object.
- icu::SimpleDateFormat* date_format = InitializeDateTimeFormat(
- args[0]->ToString(), args[1]->ToObject(), args[2]->ToObject());
-
- if (!date_format) {
- v8::ThrowException(v8::Exception::Error(v8::String::New(
- "Internal error. Couldn't create ICU date time formatter.")));
- return;
- } else {
- local_object->SetAlignedPointerInInternalField(0, date_format);
-
- v8::TryCatch try_catch;
- local_object->Set(v8::String::New("dateFormat"), v8::String::New("valid"));
- if (try_catch.HasCaught()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("Internal error, couldn't set property.")));
- return;
- }
- }
-
- v8::Persistent<v8::Object> wrapper(isolate, local_object);
- // Make object handle weak so we can delete iterator once GC kicks in.
- wrapper.MakeWeak<void>(NULL, &DeleteDateFormat);
- args.GetReturnValue().Set(wrapper);
- wrapper.ClearAndLeak();
-}
-
-static icu::SimpleDateFormat* InitializeDateTimeFormat(
- v8::Handle<v8::String> locale,
- v8::Handle<v8::Object> options,
- v8::Handle<v8::Object> resolved) {
- // Convert BCP47 into ICU locale format.
- UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale;
- char icu_result[ULOC_FULLNAME_CAPACITY];
- int icu_length = 0;
- v8::String::AsciiValue bcp47_locale(locale);
- if (bcp47_locale.length() != 0) {
- uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
- &icu_length, &status);
- if (U_FAILURE(status) || icu_length == 0) {
- return NULL;
- }
- icu_locale = icu::Locale(icu_result);
- }
-
- icu::SimpleDateFormat* date_format = CreateICUDateFormat(icu_locale, options);
- if (!date_format) {
- // Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- date_format = CreateICUDateFormat(no_extension_locale, options);
-
- // Set resolved settings (pattern, numbering system, calendar).
- SetResolvedSettings(no_extension_locale, date_format, resolved);
- } else {
- SetResolvedSettings(icu_locale, date_format, resolved);
- }
-
- return date_format;
-}
-
-static icu::SimpleDateFormat* CreateICUDateFormat(
- const icu::Locale& icu_locale, v8::Handle<v8::Object> options) {
- // Create time zone as specified by the user. We have to re-create time zone
- // since calendar takes ownership.
- icu::TimeZone* tz = NULL;
- icu::UnicodeString timezone;
- if (Utils::ExtractStringSetting(options, "timeZone", &timezone)) {
- tz = icu::TimeZone::createTimeZone(timezone);
- } else {
- tz = icu::TimeZone::createDefault();
- }
-
- // Create a calendar using locale, and apply time zone to it.
- UErrorCode status = U_ZERO_ERROR;
- icu::Calendar* calendar =
- icu::Calendar::createInstance(tz, icu_locale, status);
-
- // Make formatter from skeleton. Calendar and numbering system are added
- // to the locale as Unicode extension (if they were specified at all).
- icu::SimpleDateFormat* date_format = NULL;
- icu::UnicodeString skeleton;
- if (Utils::ExtractStringSetting(options, "skeleton", &skeleton)) {
- icu::DateTimePatternGenerator* generator =
- icu::DateTimePatternGenerator::createInstance(icu_locale, status);
- icu::UnicodeString pattern;
- if (U_SUCCESS(status)) {
- pattern = generator->getBestPattern(skeleton, status);
- delete generator;
- }
-
- date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
- if (U_SUCCESS(status)) {
- date_format->adoptCalendar(calendar);
- }
- }
-
- if (U_FAILURE(status)) {
- delete calendar;
- delete date_format;
- date_format = NULL;
- }
-
- return date_format;
-}
-
-static void SetResolvedSettings(const icu::Locale& icu_locale,
- icu::SimpleDateFormat* date_format,
- v8::Handle<v8::Object> resolved) {
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString pattern;
- date_format->toPattern(pattern);
- resolved->Set(v8::String::New("pattern"),
- v8::String::New(reinterpret_cast<const uint16_t*>(
- pattern.getBuffer()), pattern.length()));
-
- // Set time zone and calendar.
- if (date_format) {
- const icu::Calendar* calendar = date_format->getCalendar();
- const char* calendar_name = calendar->getType();
- resolved->Set(v8::String::New("calendar"), v8::String::New(calendar_name));
-
- const icu::TimeZone& tz = calendar->getTimeZone();
- icu::UnicodeString time_zone;
- tz.getID(time_zone);
-
- icu::UnicodeString canonical_time_zone;
- icu::TimeZone::getCanonicalID(time_zone, canonical_time_zone, status);
- if (U_SUCCESS(status)) {
- if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
- resolved->Set(v8::String::New("timeZone"), v8::String::New("UTC"));
- } else {
- resolved->Set(v8::String::New("timeZone"),
- v8::String::New(reinterpret_cast<const uint16_t*>(
- canonical_time_zone.getBuffer()),
- canonical_time_zone.length()));
- }
- }
- }
-
- // Ugly hack. ICU doesn't expose numbering system in any way, so we have
- // to assume that for given locale NumberingSystem constructor produces the
- // same digits as NumberFormat/Calendar would.
- status = U_ZERO_ERROR;
- icu::NumberingSystem* numbering_system =
- icu::NumberingSystem::createInstance(icu_locale, status);
- if (U_SUCCESS(status)) {
- const char* ns = numbering_system->getName();
- resolved->Set(v8::String::New("numberingSystem"), v8::String::New(ns));
- } else {
- resolved->Set(v8::String::New("numberingSystem"), v8::Undefined());
- }
- delete numbering_system;
-
- // Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- status = U_ZERO_ERROR;
- uloc_toLanguageTag(
- icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
- if (U_SUCCESS(status)) {
- resolved->Set(v8::String::New("locale"), v8::String::New(result));
- } else {
- // This would never happen, since we got the locale from ICU.
- resolved->Set(v8::String::New("locale"), v8::String::New("und"));
- }
-}
-
-} // namespace v8_i18n
diff --git a/chromium/v8/src/extensions/i18n/date-format.h b/chromium/v8/src/extensions/i18n/date-format.h
deleted file mode 100644
index daa5964e254..00000000000
--- a/chromium/v8/src/extensions/i18n/date-format.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#ifndef V8_EXTENSIONS_I18N_DATE_FORMAT_H_
-#define V8_EXTENSIONS_I18N_DATE_FORMAT_H_
-
-#include "unicode/uversion.h"
-#include "v8.h"
-
-namespace U_ICU_NAMESPACE {
-class SimpleDateFormat;
-}
-
-namespace v8_i18n {
-
-class DateFormat {
- public:
- static void JSCreateDateTimeFormat(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Helper methods for various bindings.
-
- // Unpacks date format object from corresponding JavaScript object.
- static icu::SimpleDateFormat* UnpackDateFormat(
- v8::Handle<v8::Object> obj);
-
- // Release memory we allocated for the DateFormat once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteDateFormat(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* object,
- void* param);
-
- // Formats date and returns corresponding string.
- static void JSInternalFormat(const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Parses date and returns corresponding Date object or undefined if parse
- // failed.
- static void JSInternalParse(const v8::FunctionCallbackInfo<v8::Value>& args);
-
- private:
- DateFormat();
-};
-
-} // namespace v8_i18n
-
-#endif // V8_EXTENSIONS_I18N_DATE_FORMAT_H_
diff --git a/chromium/v8/src/extensions/i18n/date-format.js b/chromium/v8/src/extensions/i18n/date-format.js
deleted file mode 100644
index 04e7a7c7b9d..00000000000
--- a/chromium/v8/src/extensions/i18n/date-format.js
+++ /dev/null
@@ -1,478 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Returns a string that matches LDML representation of the options object.
- */
-function toLDMLString(options) {
- var getOption = getGetOption(options, 'dateformat');
-
- var ldmlString = '';
-
- var option = getOption('weekday', 'string', ['narrow', 'short', 'long']);
- ldmlString += appendToLDMLString(
- option, {narrow: 'EEEEE', short: 'EEE', long: 'EEEE'});
-
- option = getOption('era', 'string', ['narrow', 'short', 'long']);
- ldmlString += appendToLDMLString(
- option, {narrow: 'GGGGG', short: 'GGG', long: 'GGGG'});
-
- option = getOption('year', 'string', ['2-digit', 'numeric']);
- ldmlString += appendToLDMLString(option, {'2-digit': 'yy', 'numeric': 'y'});
-
- option = getOption('month', 'string',
- ['2-digit', 'numeric', 'narrow', 'short', 'long']);
- ldmlString += appendToLDMLString(option, {'2-digit': 'MM', 'numeric': 'M',
- 'narrow': 'MMMMM', 'short': 'MMM', 'long': 'MMMM'});
-
- option = getOption('day', 'string', ['2-digit', 'numeric']);
- ldmlString += appendToLDMLString(
- option, {'2-digit': 'dd', 'numeric': 'd'});
-
- var hr12 = getOption('hour12', 'boolean');
- option = getOption('hour', 'string', ['2-digit', 'numeric']);
- if (hr12 === undefined) {
- ldmlString += appendToLDMLString(option, {'2-digit': 'jj', 'numeric': 'j'});
- } else if (hr12 === true) {
- ldmlString += appendToLDMLString(option, {'2-digit': 'hh', 'numeric': 'h'});
- } else {
- ldmlString += appendToLDMLString(option, {'2-digit': 'HH', 'numeric': 'H'});
- }
-
- option = getOption('minute', 'string', ['2-digit', 'numeric']);
- ldmlString += appendToLDMLString(option, {'2-digit': 'mm', 'numeric': 'm'});
-
- option = getOption('second', 'string', ['2-digit', 'numeric']);
- ldmlString += appendToLDMLString(option, {'2-digit': 'ss', 'numeric': 's'});
-
- option = getOption('timeZoneName', 'string', ['short', 'long']);
- ldmlString += appendToLDMLString(option, {short: 'v', long: 'vv'});
-
- return ldmlString;
-}
-
-
-/**
- * Returns either LDML equivalent of the current option or empty string.
- */
-function appendToLDMLString(option, pairs) {
- if (option !== undefined) {
- return pairs[option];
- } else {
- return '';
- }
-}
-
-
-/**
- * Returns object that matches LDML representation of the date.
- */
-function fromLDMLString(ldmlString) {
- // First remove '' quoted text, so we lose 'Uhr' strings.
- ldmlString = ldmlString.replace(QUOTED_STRING_RE, '');
-
- var options = {};
- var match = ldmlString.match(/E{3,5}/g);
- options = appendToDateTimeObject(
- options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
-
- match = ldmlString.match(/G{3,5}/g);
- options = appendToDateTimeObject(
- options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
-
- match = ldmlString.match(/y{1,2}/g);
- options = appendToDateTimeObject(
- options, 'year', match, {y: 'numeric', yy: '2-digit'});
-
- match = ldmlString.match(/M{1,5}/g);
- options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
- M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
-
- // Sometimes we get L instead of M for month - standalone name.
- match = ldmlString.match(/L{1,5}/g);
- options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
- L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
-
- match = ldmlString.match(/d{1,2}/g);
- options = appendToDateTimeObject(
- options, 'day', match, {d: 'numeric', dd: '2-digit'});
-
- match = ldmlString.match(/h{1,2}/g);
- if (match !== null) {
- options['hour12'] = true;
- }
- options = appendToDateTimeObject(
- options, 'hour', match, {h: 'numeric', hh: '2-digit'});
-
- match = ldmlString.match(/H{1,2}/g);
- if (match !== null) {
- options['hour12'] = false;
- }
- options = appendToDateTimeObject(
- options, 'hour', match, {H: 'numeric', HH: '2-digit'});
-
- match = ldmlString.match(/m{1,2}/g);
- options = appendToDateTimeObject(
- options, 'minute', match, {m: 'numeric', mm: '2-digit'});
-
- match = ldmlString.match(/s{1,2}/g);
- options = appendToDateTimeObject(
- options, 'second', match, {s: 'numeric', ss: '2-digit'});
-
- match = ldmlString.match(/v{1,2}/g);
- options = appendToDateTimeObject(
- options, 'timeZoneName', match, {v: 'short', vv: 'long'});
-
- return options;
-}
-
-
-function appendToDateTimeObject(options, option, match, pairs) {
- if (match === null) {
- if (!options.hasOwnProperty(option)) {
- defineWEProperty(options, option, undefined);
- }
- return options;
- }
-
- var property = match[0];
- defineWEProperty(options, option, pairs[property]);
-
- return options;
-}
-
-
-/**
- * Returns options with at least default values in it.
- */
-function toDateTimeOptions(options, required, defaults) {
- if (options === undefined) {
- options = null;
- } else {
- options = toObject(options);
- }
-
- options = Object.apply(this, [options]);
-
- var needsDefault = true;
- if ((required === 'date' || required === 'any') &&
- (options.weekday !== undefined || options.year !== undefined ||
- options.month !== undefined || options.day !== undefined)) {
- needsDefault = false;
- }
-
- if ((required === 'time' || required === 'any') &&
- (options.hour !== undefined || options.minute !== undefined ||
- options.second !== undefined)) {
- needsDefault = false;
- }
-
- if (needsDefault && (defaults === 'date' || defaults === 'all')) {
- Object.defineProperty(options, 'year', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- Object.defineProperty(options, 'month', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- Object.defineProperty(options, 'day', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- }
-
- if (needsDefault && (defaults === 'time' || defaults === 'all')) {
- Object.defineProperty(options, 'hour', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- Object.defineProperty(options, 'minute', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- Object.defineProperty(options, 'second', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- }
-
- return options;
-}
-
-
-/**
- * Initializes the given object so it's a valid DateTimeFormat instance.
- * Useful for subclassing.
- */
-function initializeDateTimeFormat(dateFormat, locales, options) {
- native function NativeJSCreateDateTimeFormat();
-
- if (dateFormat.hasOwnProperty('__initializedIntlObject')) {
- throw new TypeError('Trying to re-initialize DateTimeFormat object.');
- }
-
- if (options === undefined) {
- options = {};
- }
-
- var locale = resolveLocale('dateformat', locales, options);
-
- options = toDateTimeOptions(options, 'any', 'date');
-
- var getOption = getGetOption(options, 'dateformat');
-
- // We implement only best fit algorithm, but still need to check
- // if the formatMatcher values are in range.
- var matcher = getOption('formatMatcher', 'string',
- ['basic', 'best fit'], 'best fit');
-
- // Build LDML string for the skeleton that we pass to the formatter.
- var ldmlString = toLDMLString(options);
-
- // Filter out supported extension keys so we know what to put in resolved
- // section later on.
- // We need to pass calendar and number system to the method.
- var tz = canonicalizeTimeZoneID(options.timeZone);
-
- // ICU prefers options to be passed using -u- extension key/values, so
- // we need to build that.
- var internalOptions = {};
- var extensionMap = parseExtension(locale.extension);
- var extension = setOptions(options, extensionMap, DATETIME_FORMAT_KEY_MAP,
- getOption, internalOptions);
-
- var requestedLocale = locale.locale + extension;
- var resolved = Object.defineProperties({}, {
- calendar: {writable: true},
- day: {writable: true},
- era: {writable: true},
- hour12: {writable: true},
- hour: {writable: true},
- locale: {writable: true},
- minute: {writable: true},
- month: {writable: true},
- numberingSystem: {writable: true},
- pattern: {writable: true},
- requestedLocale: {value: requestedLocale, writable: true},
- second: {writable: true},
- timeZone: {writable: true},
- timeZoneName: {writable: true},
- tz: {value: tz, writable: true},
- weekday: {writable: true},
- year: {writable: true}
- });
-
- var formatter = NativeJSCreateDateTimeFormat(
- requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
-
- if (tz !== undefined && tz !== resolved.timeZone) {
- throw new RangeError('Unsupported time zone specified ' + tz);
- }
-
- Object.defineProperty(dateFormat, 'formatter', {value: formatter});
- Object.defineProperty(dateFormat, 'resolved', {value: resolved});
- Object.defineProperty(dateFormat, '__initializedIntlObject',
- {value: 'dateformat'});
-
- return dateFormat;
-}
-
-
-/**
- * Constructs Intl.DateTimeFormat object given optional locales and options
- * parameters.
- *
- * @constructor
- */
-%SetProperty(Intl, 'DateTimeFormat', function() {
- var locales = arguments[0];
- var options = arguments[1];
-
- if (!this || this === Intl) {
- // Constructor is called as a function.
- return new Intl.DateTimeFormat(locales, options);
- }
-
- return initializeDateTimeFormat(toObject(this), locales, options);
- },
- ATTRIBUTES.DONT_ENUM
-);
-
-
-/**
- * DateTimeFormat resolvedOptions method.
- */
-%SetProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject !== 'dateformat') {
- throw new TypeError('resolvedOptions method called on a non-object or ' +
- 'on a object that is not Intl.DateTimeFormat.');
- }
-
- var format = this;
- var fromPattern = fromLDMLString(format.resolved.pattern);
- var userCalendar = ICU_CALENDAR_MAP[format.resolved.calendar];
- if (userCalendar === undefined) {
- // Use ICU name if we don't have a match. It shouldn't happen, but
- // it would be too strict to throw for this.
- userCalendar = format.resolved.calendar;
- }
-
- var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
- format.resolved.locale);
-
- var result = {
- locale: locale,
- numberingSystem: format.resolved.numberingSystem,
- calendar: userCalendar,
- timeZone: format.resolved.timeZone
- };
-
- addWECPropertyIfDefined(result, 'timeZoneName', fromPattern.timeZoneName);
- addWECPropertyIfDefined(result, 'era', fromPattern.era);
- addWECPropertyIfDefined(result, 'year', fromPattern.year);
- addWECPropertyIfDefined(result, 'month', fromPattern.month);
- addWECPropertyIfDefined(result, 'day', fromPattern.day);
- addWECPropertyIfDefined(result, 'weekday', fromPattern.weekday);
- addWECPropertyIfDefined(result, 'hour12', fromPattern.hour12);
- addWECPropertyIfDefined(result, 'hour', fromPattern.hour);
- addWECPropertyIfDefined(result, 'minute', fromPattern.minute);
- addWECPropertyIfDefined(result, 'second', fromPattern.second);
-
- return result;
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.DateTimeFormat.prototype.resolvedOptions,
- 'resolvedOptions');
-%FunctionRemovePrototype(Intl.DateTimeFormat.prototype.resolvedOptions);
-%SetNativeFlag(Intl.DateTimeFormat.prototype.resolvedOptions);
-
-
-/**
- * Returns the subset of the given locale list for which this locale list
- * has a matching (possibly fallback) locale. Locales appear in the same
- * order in the returned list as in the input list.
- * Options are optional parameter.
- */
-%SetProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- return supportedLocalesOf('dateformat', locales, arguments[1]);
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.DateTimeFormat.supportedLocalesOf);
-%SetNativeFlag(Intl.DateTimeFormat.supportedLocalesOf);
-
-
-/**
- * Returns a String value representing the result of calling ToNumber(date)
- * according to the effective locale and the formatting options of this
- * DateTimeFormat.
- */
-function formatDate(formatter, dateValue) {
- native function NativeJSInternalDateFormat();
-
- var dateMs;
- if (dateValue === undefined) {
- dateMs = Date.now();
- } else {
- dateMs = Number(dateValue);
- }
-
- if (!isFinite(dateMs)) {
- throw new RangeError('Provided date is not in valid range.');
- }
-
- return NativeJSInternalDateFormat(formatter.formatter, new Date(dateMs));
-}
-
-
-/**
- * Returns a Date object representing the result of calling ToString(value)
- * according to the effective locale and the formatting options of this
- * DateTimeFormat.
- * Returns undefined if date string cannot be parsed.
- */
-function parseDate(formatter, value) {
- native function NativeJSInternalDateParse();
- return NativeJSInternalDateParse(formatter.formatter, String(value));
-}
-
-
-// 0 because date is optional argument.
-addBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
-addBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
-
-
-/**
- * Returns canonical Area/Location name, or throws an exception if the zone
- * name is invalid IANA name.
- */
-function canonicalizeTimeZoneID(tzID) {
- // Skip undefined zones.
- if (tzID === undefined) {
- return tzID;
- }
-
- // Special case handling (UTC, GMT).
- var upperID = tzID.toUpperCase();
- if (upperID === 'UTC' || upperID === 'GMT' ||
- upperID === 'ETC/UTC' || upperID === 'ETC/GMT') {
- return 'UTC';
- }
-
- // We expect only _ and / beside ASCII letters.
- // All inputs should conform to Area/Location from now on.
- var match = TIMEZONE_NAME_CHECK_RE.exec(tzID);
- if (match === null) {
- throw new RangeError('Expected Area/Location for time zone, got ' + tzID);
- }
-
- var result = toTitleCaseWord(match[1]) + '/' + toTitleCaseWord(match[2]);
- var i = 3;
- while (match[i] !== undefined && i < match.length) {
- result = result + '_' + toTitleCaseWord(match[i]);
- i++;
- }
-
- return result;
-}
diff --git a/chromium/v8/src/extensions/i18n/footer.js b/chromium/v8/src/extensions/i18n/footer.js
deleted file mode 100644
index adaa6334624..00000000000
--- a/chromium/v8/src/extensions/i18n/footer.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-// Fix RegExp global state so we don't fail WebKit layout test:
-// fast/js/regexp-caching.html
-// It seems that 'g' or test() operations leave state changed.
-var CLEANUP_RE = new RegExp('');
-CLEANUP_RE.test('');
-
-return Intl;
-}())});
diff --git a/chromium/v8/src/extensions/i18n/globals.js b/chromium/v8/src/extensions/i18n/globals.js
deleted file mode 100644
index 68fabe777f5..00000000000
--- a/chromium/v8/src/extensions/i18n/globals.js
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-
-/**
- * List of available services.
- */
-var AVAILABLE_SERVICES = ['collator',
- 'numberformat',
- 'dateformat',
- 'breakiterator'];
-
-/**
- * Caches available locales for each service.
- */
-var AVAILABLE_LOCALES = {
- 'collator': undefined,
- 'numberformat': undefined,
- 'dateformat': undefined,
- 'breakiterator': undefined
-};
-
-/**
- * Caches default ICU locale.
- */
-var DEFAULT_ICU_LOCALE = undefined;
-
-/**
- * Unicode extension regular expression.
- */
-var UNICODE_EXTENSION_RE = new RegExp('-u(-[a-z0-9]{2,8})+', 'g');
-
-/**
- * Matches any Unicode extension.
- */
-var ANY_EXTENSION_RE = new RegExp('-[a-z0-9]{1}-.*', 'g');
-
-/**
- * Replace quoted text (single quote, anything but the quote and quote again).
- */
-var QUOTED_STRING_RE = new RegExp("'[^']+'", 'g');
-
-/**
- * Matches valid service name.
- */
-var SERVICE_RE =
- new RegExp('^(collator|numberformat|dateformat|breakiterator)$');
-
-/**
- * Validates a language tag against bcp47 spec.
- * Actual value is assigned on first run.
- */
-var LANGUAGE_TAG_RE = undefined;
-
-/**
- * Helps find duplicate variants in the language tag.
- */
-var LANGUAGE_VARIANT_RE = undefined;
-
-/**
- * Helps find duplicate singletons in the language tag.
- */
-var LANGUAGE_SINGLETON_RE = undefined;
-
-/**
- * Matches valid IANA time zone names.
- */
-var TIMEZONE_NAME_CHECK_RE =
- new RegExp('^([A-Za-z]+)/([A-Za-z]+)(?:_([A-Za-z]+))*$');
-
-/**
- * Maps ICU calendar names into LDML type.
- */
-var ICU_CALENDAR_MAP = {
- 'gregorian': 'gregory',
- 'japanese': 'japanese',
- 'buddhist': 'buddhist',
- 'roc': 'roc',
- 'persian': 'persian',
- 'islamic-civil': 'islamicc',
- 'islamic': 'islamic',
- 'hebrew': 'hebrew',
- 'chinese': 'chinese',
- 'indian': 'indian',
- 'coptic': 'coptic',
- 'ethiopic': 'ethiopic',
- 'ethiopic-amete-alem': 'ethioaa'
-};
-
-/**
- * Map of Unicode extensions to option properties, and their values and types,
- * for a collator.
- */
-var COLLATOR_KEY_MAP = {
- 'kn': {'property': 'numeric', 'type': 'boolean'},
- 'kf': {'property': 'caseFirst', 'type': 'string',
- 'values': ['false', 'lower', 'upper']}
-};
-
-/**
- * Map of Unicode extensions to option properties, and their values and types,
- * for a number format.
- */
-var NUMBER_FORMAT_KEY_MAP = {
- 'nu': {'property': undefined, 'type': 'string'}
-};
-
-/**
- * Map of Unicode extensions to option properties, and their values and types,
- * for a date/time format.
- */
-var DATETIME_FORMAT_KEY_MAP = {
- 'ca': {'property': undefined, 'type': 'string'},
- 'nu': {'property': undefined, 'type': 'string'}
-};
-
-/**
- * Allowed -u-co- values. List taken from:
- * http://unicode.org/repos/cldr/trunk/common/bcp47/collation.xml
- */
-var ALLOWED_CO_VALUES = [
- 'big5han', 'dict', 'direct', 'ducet', 'gb2312', 'phonebk', 'phonetic',
- 'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
-];
-
-/**
- * Object attributes (configurable, writable, enumerable).
- * To combine attributes, OR them.
- * Values/names are copied from v8/include/v8.h:PropertyAttribute
- */
-var ATTRIBUTES = {
- 'NONE': 0,
- 'READ_ONLY': 1,
- 'DONT_ENUM': 2,
- 'DONT_DELETE': 4
-};
-
-/**
- * Error message for when function object is created with new and it's not
- * a constructor.
- */
-var ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR =
- 'Function object that\'s not a constructor was created with new';
diff --git a/chromium/v8/src/extensions/i18n/header.js b/chromium/v8/src/extensions/i18n/header.js
deleted file mode 100644
index b854ce5eada..00000000000
--- a/chromium/v8/src/extensions/i18n/header.js
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Intl object is a single object that has some named properties,
- * all of which are constructors.
- */
-Object.defineProperty(this, "Intl", { enumerable: false, value: (function() {
-
-'use strict';
-
-var Intl = {};
diff --git a/chromium/v8/src/extensions/i18n/i18n-extension.cc b/chromium/v8/src/extensions/i18n/i18n-extension.cc
deleted file mode 100644
index 1c77b8899fb..00000000000
--- a/chromium/v8/src/extensions/i18n/i18n-extension.cc
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#include "i18n-extension.h"
-
-#include "break-iterator.h"
-#include "collator.h"
-#include "date-format.h"
-#include "locale.h"
-#include "natives.h"
-#include "number-format.h"
-
-using v8::internal::I18NNatives;
-
-namespace v8_i18n {
-
-Extension::Extension()
- : v8::Extension("v8/i18n",
- reinterpret_cast<const char*>(
- I18NNatives::GetScriptsSource().start()),
- 0,
- 0,
- I18NNatives::GetScriptsSource().length()) {}
-
-v8::Handle<v8::FunctionTemplate> Extension::GetNativeFunction(
- v8::Handle<v8::String> name) {
- // Standalone, helper methods.
- if (name->Equals(v8::String::New("NativeJSCanonicalizeLanguageTag"))) {
- return v8::FunctionTemplate::New(JSCanonicalizeLanguageTag);
- } else if (name->Equals(v8::String::New("NativeJSAvailableLocalesOf"))) {
- return v8::FunctionTemplate::New(JSAvailableLocalesOf);
- } else if (name->Equals(v8::String::New("NativeJSGetDefaultICULocale"))) {
- return v8::FunctionTemplate::New(JSGetDefaultICULocale);
- } else if (name->Equals(v8::String::New("NativeJSGetLanguageTagVariants"))) {
- return v8::FunctionTemplate::New(JSGetLanguageTagVariants);
- }
-
- // Date format and parse.
- if (name->Equals(v8::String::New("NativeJSCreateDateTimeFormat"))) {
- return v8::FunctionTemplate::New(DateFormat::JSCreateDateTimeFormat);
- } else if (name->Equals(v8::String::New("NativeJSInternalDateFormat"))) {
- return v8::FunctionTemplate::New(DateFormat::JSInternalFormat);
- } else if (name->Equals(v8::String::New("NativeJSInternalDateParse"))) {
- return v8::FunctionTemplate::New(DateFormat::JSInternalParse);
- }
-
- // Number format and parse.
- if (name->Equals(v8::String::New("NativeJSCreateNumberFormat"))) {
- return v8::FunctionTemplate::New(NumberFormat::JSCreateNumberFormat);
- } else if (name->Equals(v8::String::New("NativeJSInternalNumberFormat"))) {
- return v8::FunctionTemplate::New(NumberFormat::JSInternalFormat);
- } else if (name->Equals(v8::String::New("NativeJSInternalNumberParse"))) {
- return v8::FunctionTemplate::New(NumberFormat::JSInternalParse);
- }
-
- // Collator.
- if (name->Equals(v8::String::New("NativeJSCreateCollator"))) {
- return v8::FunctionTemplate::New(Collator::JSCreateCollator);
- } else if (name->Equals(v8::String::New("NativeJSInternalCompare"))) {
- return v8::FunctionTemplate::New(Collator::JSInternalCompare);
- }
-
- // Break iterator.
- if (name->Equals(v8::String::New("NativeJSCreateBreakIterator"))) {
- return v8::FunctionTemplate::New(BreakIterator::JSCreateBreakIterator);
- } else if (name->Equals(v8::String::New("NativeJSBreakIteratorAdoptText"))) {
- return v8::FunctionTemplate::New(
- BreakIterator::JSInternalBreakIteratorAdoptText);
- } else if (name->Equals(v8::String::New("NativeJSBreakIteratorFirst"))) {
- return v8::FunctionTemplate::New(
- BreakIterator::JSInternalBreakIteratorFirst);
- } else if (name->Equals(v8::String::New("NativeJSBreakIteratorNext"))) {
- return v8::FunctionTemplate::New(
- BreakIterator::JSInternalBreakIteratorNext);
- } else if (name->Equals(v8::String::New("NativeJSBreakIteratorCurrent"))) {
- return v8::FunctionTemplate::New(
- BreakIterator::JSInternalBreakIteratorCurrent);
- } else if (name->Equals(v8::String::New("NativeJSBreakIteratorBreakType"))) {
- return v8::FunctionTemplate::New(
- BreakIterator::JSInternalBreakIteratorBreakType);
- }
-
- return v8::Handle<v8::FunctionTemplate>();
-}
-
-
-void Extension::Register() {
- static Extension i18n_extension;
- static v8::DeclareExtension extension_declaration(&i18n_extension);
-}
-
-} // namespace v8_i18n
diff --git a/chromium/v8/src/extensions/i18n/i18n-extension.h b/chromium/v8/src/extensions/i18n/i18n-extension.h
deleted file mode 100644
index 050c336a67a..00000000000
--- a/chromium/v8/src/extensions/i18n/i18n-extension.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#ifndef V8_EXTENSIONS_I18N_I18N_EXTENSION_H_
-#define V8_EXTENSIONS_I18N_I18N_EXTENSION_H_
-
-#include "v8.h"
-
-namespace v8_i18n {
-
-class Extension : public v8::Extension {
- public:
- Extension();
-
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<v8::String> name);
-
- static void Register();
-
- private:
- static Extension* extension_;
-};
-
-} // namespace v8_i18n
-
-#endif // V8_EXTENSIONS_I18N_I18N_EXTENSION_H_
diff --git a/chromium/v8/src/extensions/i18n/i18n-utils.cc b/chromium/v8/src/extensions/i18n/i18n-utils.cc
deleted file mode 100644
index eac11669047..00000000000
--- a/chromium/v8/src/extensions/i18n/i18n-utils.cc
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#include "i18n-utils.h"
-
-#include <string.h>
-
-#include "unicode/unistr.h"
-
-namespace v8_i18n {
-
-// static
-void Utils::StrNCopy(char* dest, int length, const char* src) {
- if (!dest || !src) return;
-
- strncpy(dest, src, length);
- dest[length - 1] = '\0';
-}
-
-
-// static
-bool Utils::V8StringToUnicodeString(const v8::Handle<v8::Value>& input,
- icu::UnicodeString* output) {
- v8::String::Utf8Value utf8_value(input);
-
- if (*utf8_value == NULL) return false;
-
- output->setTo(icu::UnicodeString::fromUTF8(*utf8_value));
-
- return true;
-}
-
-
-// static
-bool Utils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- icu::UnicodeString* result) {
- if (!setting || !result) return false;
-
- v8::HandleScope handle_scope;
- v8::TryCatch try_catch;
- v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
- if (try_catch.HasCaught()) {
- return false;
- }
- // No need to check if |value| is empty because it's taken care of
- // by TryCatch above.
- if (!value->IsUndefined() && !value->IsNull() && value->IsString()) {
- return V8StringToUnicodeString(value, result);
- }
- return false;
-}
-
-
-// static
-bool Utils::ExtractIntegerSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- int32_t* result) {
- if (!setting || !result) return false;
-
- v8::HandleScope handle_scope;
- v8::TryCatch try_catch;
- v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
- if (try_catch.HasCaught()) {
- return false;
- }
- // No need to check if |value| is empty because it's taken care of
- // by TryCatch above.
- if (!value->IsUndefined() && !value->IsNull() && value->IsNumber()) {
- *result = static_cast<int32_t>(value->Int32Value());
- return true;
- }
- return false;
-}
-
-
-// static
-bool Utils::ExtractBooleanSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- bool* result) {
- if (!setting || !result) return false;
-
- v8::HandleScope handle_scope;
- v8::TryCatch try_catch;
- v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
- if (try_catch.HasCaught()) {
- return false;
- }
- // No need to check if |value| is empty because it's taken care of
- // by TryCatch above.
- if (!value->IsUndefined() && !value->IsNull() && value->IsBoolean()) {
- *result = static_cast<bool>(value->BooleanValue());
- return true;
- }
- return false;
-}
-
-
-// static
-void Utils::AsciiToUChar(const char* source,
- int32_t source_length,
- UChar* target,
- int32_t target_length) {
- int32_t length =
- source_length < target_length ? source_length : target_length;
-
- if (length <= 0) {
- return;
- }
-
- for (int32_t i = 0; i < length - 1; ++i) {
- target[i] = static_cast<UChar>(source[i]);
- }
-
- target[length - 1] = 0x0u;
-}
-
-
-// static
-v8::Local<v8::ObjectTemplate> Utils::GetTemplate(v8::Isolate* isolate) {
- i::Isolate* internal = reinterpret_cast<i::Isolate*>(isolate);
- if (internal->heap()->i18n_template_one() ==
- internal->heap()->the_hole_value()) {
- v8::Local<v8::ObjectTemplate> raw_template(v8::ObjectTemplate::New());
- raw_template->SetInternalFieldCount(1);
- internal->heap()
- ->SetI18nTemplateOne(*v8::Utils::OpenHandle(*raw_template));
- }
-
- return v8::Utils::ToLocal(i::Handle<i::ObjectTemplateInfo>::cast(
- internal->factory()->i18n_template_one()));
-}
-
-
-// static
-v8::Local<v8::ObjectTemplate> Utils::GetTemplate2(v8::Isolate* isolate) {
- i::Isolate* internal = reinterpret_cast<i::Isolate*>(isolate);
- if (internal->heap()->i18n_template_two() ==
- internal->heap()->the_hole_value()) {
- v8::Local<v8::ObjectTemplate> raw_template(v8::ObjectTemplate::New());
- raw_template->SetInternalFieldCount(2);
- internal->heap()
- ->SetI18nTemplateTwo(*v8::Utils::OpenHandle(*raw_template));
- }
-
- return v8::Utils::ToLocal(i::Handle<i::ObjectTemplateInfo>::cast(
- internal->factory()->i18n_template_two()));
-}
-
-} // namespace v8_i18n
diff --git a/chromium/v8/src/extensions/i18n/i18n-utils.h b/chromium/v8/src/extensions/i18n/i18n-utils.h
deleted file mode 100644
index db5d1b6ac0d..00000000000
--- a/chromium/v8/src/extensions/i18n/i18n-utils.h
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#ifndef V8_EXTENSIONS_I18N_SRC_UTILS_H_
-#define V8_EXTENSIONS_I18N_SRC_UTILS_H_
-
-#include "unicode/uversion.h"
-#include "v8.h"
-
-namespace U_ICU_NAMESPACE {
-class UnicodeString;
-}
-
-namespace v8_i18n {
-
-class Utils {
- public:
- // Safe string copy. Null terminates the destination. Copies at most
- // (length - 1) bytes.
- // We can't use snprintf since it's not supported on all relevant platforms.
- // We can't use OS::SNPrintF, it's only for internal code.
- static void StrNCopy(char* dest, int length, const char* src);
-
- // Converts v8::String into UnicodeString. Returns false if input
- // can't be converted into utf8.
- static bool V8StringToUnicodeString(const v8::Handle<v8::Value>& input,
- icu::UnicodeString* output);
-
- // Extract a String setting named in |settings| and set it to |result|.
- // Return true if it's specified. Otherwise, return false.
- static bool ExtractStringSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- icu::UnicodeString* result);
-
- // Extract a Integer setting named in |settings| and set it to |result|.
- // Return true if it's specified. Otherwise, return false.
- static bool ExtractIntegerSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- int32_t* result);
-
- // Extract a Boolean setting named in |settings| and set it to |result|.
- // Return true if it's specified. Otherwise, return false.
- static bool ExtractBooleanSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- bool* result);
-
- // Converts ASCII array into UChar array.
- // Target is always \0 terminated.
- static void AsciiToUChar(const char* source,
- int32_t source_length,
- UChar* target,
- int32_t target_length);
-
- // Creates an ObjectTemplate with one internal field.
- static v8::Local<v8::ObjectTemplate> GetTemplate(v8::Isolate* isolate);
-
- // Creates an ObjectTemplate with two internal fields.
- static v8::Local<v8::ObjectTemplate> GetTemplate2(v8::Isolate* isolate);
-
- private:
- Utils() {}
-};
-
-} // namespace v8_i18n
-
-#endif // V8_EXTENSIONS_I18N_UTILS_H_
diff --git a/chromium/v8/src/extensions/i18n/i18n-utils.js b/chromium/v8/src/extensions/i18n/i18n-utils.js
deleted file mode 100644
index d7e9486c507..00000000000
--- a/chromium/v8/src/extensions/i18n/i18n-utils.js
+++ /dev/null
@@ -1,541 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Adds bound method to the prototype of the given object.
- */
-function addBoundMethod(obj, methodName, implementation, length) {
- function getter() {
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject === undefined) {
- throw new TypeError('Method ' + methodName + ' called on a ' +
- 'non-object or on a wrong type of object.');
- }
- var internalName = '__bound' + methodName + '__';
- if (this[internalName] === undefined) {
- var that = this;
- var boundMethod;
- if (length === undefined || length === 2) {
- boundMethod = function(x, y) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
- return implementation(that, x, y);
- }
- } else if (length === 1) {
- boundMethod = function(x) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
- return implementation(that, x);
- }
- } else {
- boundMethod = function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
- // DateTimeFormat.format needs to be 0 arg method, but can stil
- // receive optional dateValue param. If one was provided, pass it
- // along.
- if (arguments.length > 0) {
- return implementation(that, arguments[0]);
- } else {
- return implementation(that);
- }
- }
- }
- %FunctionSetName(boundMethod, internalName);
- %FunctionRemovePrototype(boundMethod);
- %SetNativeFlag(boundMethod);
- this[internalName] = boundMethod;
- }
- return this[internalName];
- }
-
- %FunctionSetName(getter, methodName);
- %FunctionRemovePrototype(getter);
- %SetNativeFlag(getter);
-
- Object.defineProperty(obj.prototype, methodName, {
- get: getter,
- enumerable: false,
- configurable: true
- });
-}
-
-
-/**
- * Returns an intersection of locales and service supported locales.
- * Parameter locales is treated as a priority list.
- */
-function supportedLocalesOf(service, locales, options) {
- if (service.match(SERVICE_RE) === null) {
- throw new Error('Internal error, wrong service type: ' + service);
- }
-
- // Provide defaults if matcher was not specified.
- if (options === undefined) {
- options = {};
- } else {
- options = toObject(options);
- }
-
- var matcher = options.localeMatcher;
- if (matcher !== undefined) {
- matcher = String(matcher);
- if (matcher !== 'lookup' && matcher !== 'best fit') {
- throw new RangeError('Illegal value for localeMatcher:' + matcher);
- }
- } else {
- matcher = 'best fit';
- }
-
- var requestedLocales = initializeLocaleList(locales);
-
- // Cache these, they don't ever change per service.
- if (AVAILABLE_LOCALES[service] === undefined) {
- AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
- }
-
- // Use either best fit or lookup algorithm to match locales.
- if (matcher === 'best fit') {
- return initializeLocaleList(bestFitSupportedLocalesOf(
- requestedLocales, AVAILABLE_LOCALES[service]));
- }
-
- return initializeLocaleList(lookupSupportedLocalesOf(
- requestedLocales, AVAILABLE_LOCALES[service]));
-}
-
-
-/**
- * Returns the subset of the provided BCP 47 language priority list for which
- * this service has a matching locale when using the BCP 47 Lookup algorithm.
- * Locales appear in the same order in the returned list as in the input list.
- */
-function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
- var matchedLocales = [];
- for (var i = 0; i < requestedLocales.length; ++i) {
- // Remove -u- extension.
- var locale = requestedLocales[i].replace(UNICODE_EXTENSION_RE, '');
- do {
- if (availableLocales[locale] !== undefined) {
- // Push requested locale not the resolved one.
- matchedLocales.push(requestedLocales[i]);
- break;
- }
- // Truncate locale if possible, if not break.
- var pos = locale.lastIndexOf('-');
- if (pos === -1) {
- break;
- }
- locale = locale.substring(0, pos);
- } while (true);
- }
-
- return matchedLocales;
-}
-
-
-/**
- * Returns the subset of the provided BCP 47 language priority list for which
- * this service has a matching locale when using the implementation
- * dependent algorithm.
- * Locales appear in the same order in the returned list as in the input list.
- */
-function bestFitSupportedLocalesOf(requestedLocales, availableLocales) {
- // TODO(cira): implement better best fit algorithm.
- return lookupSupportedLocalesOf(requestedLocales, availableLocales);
-}
-
-
-/**
- * Returns a getOption function that extracts property value for given
- * options object. If property is missing it returns defaultValue. If value
- * is out of range for that property it throws RangeError.
- */
-function getGetOption(options, caller) {
- if (options === undefined) {
- throw new Error('Internal ' + caller + ' error. ' +
- 'Default options are missing.');
- }
-
- var getOption = function getOption(property, type, values, defaultValue) {
- if (options[property] !== undefined) {
- var value = options[property];
- switch (type) {
- case 'boolean':
- value = Boolean(value);
- break;
- case 'string':
- value = String(value);
- break;
- case 'number':
- value = Number(value);
- break;
- default:
- throw new Error('Internal error. Wrong value type.');
- }
- if (values !== undefined && values.indexOf(value) === -1) {
- throw new RangeError('Value ' + value + ' out of range for ' + caller +
- ' options property ' + property);
- }
-
- return value;
- }
-
- return defaultValue;
- }
-
- return getOption;
-}
-
-
-/**
- * Compares a BCP 47 language priority list requestedLocales against the locales
- * in availableLocales and determines the best available language to meet the
- * request. Two algorithms are available to match the locales: the Lookup
- * algorithm described in RFC 4647 section 3.4, and an implementation dependent
- * best-fit algorithm. Independent of the locale matching algorithm, options
- * specified through Unicode locale extension sequences are negotiated
- * separately, taking the caller's relevant extension keys and locale data as
- * well as client-provided options into consideration. Returns an object with
- * a locale property whose value is the language tag of the selected locale,
- * and properties for each key in relevantExtensionKeys providing the selected
- * value for that key.
- */
-function resolveLocale(service, requestedLocales, options) {
- requestedLocales = initializeLocaleList(requestedLocales);
-
- var getOption = getGetOption(options, service);
- var matcher = getOption('localeMatcher', 'string',
- ['lookup', 'best fit'], 'best fit');
- var resolved;
- if (matcher === 'lookup') {
- resolved = lookupMatcher(service, requestedLocales);
- } else {
- resolved = bestFitMatcher(service, requestedLocales);
- }
-
- return resolved;
-}
-
-
-/**
- * Returns best matched supported locale and extension info using basic
- * lookup algorithm.
- */
-function lookupMatcher(service, requestedLocales) {
- native function NativeJSGetDefaultICULocale();
-
- if (service.match(SERVICE_RE) === null) {
- throw new Error('Internal error, wrong service type: ' + service);
- }
-
- // Cache these, they don't ever change per service.
- if (AVAILABLE_LOCALES[service] === undefined) {
- AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
- }
-
- for (var i = 0; i < requestedLocales.length; ++i) {
- // Remove all extensions.
- var locale = requestedLocales[i].replace(ANY_EXTENSION_RE, '');
- do {
- if (AVAILABLE_LOCALES[service][locale] !== undefined) {
- // Return the resolved locale and extension.
- var extensionMatch = requestedLocales[i].match(UNICODE_EXTENSION_RE);
- var extension = (extensionMatch === null) ? '' : extensionMatch[0];
- return {'locale': locale, 'extension': extension, 'position': i};
- }
- // Truncate locale if possible.
- var pos = locale.lastIndexOf('-');
- if (pos === -1) {
- break;
- }
- locale = locale.substring(0, pos);
- } while (true);
- }
-
- // Didn't find a match, return default.
- if (DEFAULT_ICU_LOCALE === undefined) {
- DEFAULT_ICU_LOCALE = NativeJSGetDefaultICULocale();
- }
-
- return {'locale': DEFAULT_ICU_LOCALE, 'extension': '', 'position': -1};
-}
-
-
-/**
- * Returns best matched supported locale and extension info using
- * implementation dependend algorithm.
- */
-function bestFitMatcher(service, requestedLocales) {
- // TODO(cira): implement better best fit algorithm.
- return lookupMatcher(service, requestedLocales);
-}
-
-
-/**
- * Parses Unicode extension into key - value map.
- * Returns empty object if the extension string is invalid.
- * We are not concerned with the validity of the values at this point.
- */
-function parseExtension(extension) {
- var extensionSplit = extension.split('-');
-
- // Assume ['', 'u', ...] input, but don't throw.
- if (extensionSplit.length <= 2 ||
- (extensionSplit[0] !== '' && extensionSplit[1] !== 'u')) {
- return {};
- }
-
- // Key is {2}alphanum, value is {3,8}alphanum.
- // Some keys may not have explicit values (booleans).
- var extensionMap = {};
- var previousKey = undefined;
- for (var i = 2; i < extensionSplit.length; ++i) {
- var length = extensionSplit[i].length;
- var element = extensionSplit[i];
- if (length === 2) {
- extensionMap[element] = undefined;
- previousKey = element;
- } else if (length >= 3 && length <=8 && previousKey !== undefined) {
- extensionMap[previousKey] = element;
- previousKey = undefined;
- } else {
- // There is a value that's too long, or that doesn't have a key.
- return {};
- }
- }
-
- return extensionMap;
-}
-
-
-/**
- * Converts parameter to an Object if possible.
- */
-function toObject(value) {
- if (value === undefined || value === null) {
- throw new TypeError('Value cannot be converted to an Object.');
- }
-
- return Object(value);
-}
-
-
-/**
- * Populates internalOptions object with boolean key-value pairs
- * from extensionMap and options.
- * Returns filtered extension (number and date format constructors use
- * Unicode extensions for passing parameters to ICU).
- * It's used for extension-option pairs only, e.g. kn-normalization, but not
- * for 'sensitivity' since it doesn't have extension equivalent.
- * Extensions like nu and ca don't have options equivalent, so we place
- * undefined in the map.property to denote that.
- */
-function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
- var extension = '';
-
- var updateExtension = function updateExtension(key, value) {
- return '-' + key + '-' + String(value);
- }
-
- var updateProperty = function updateProperty(property, type, value) {
- if (type === 'boolean' && (typeof value === 'string')) {
- value = (value === 'true') ? true : false;
- }
-
- if (property !== undefined) {
- defineWEProperty(outOptions, property, value);
- }
- }
-
- for (var key in keyValues) {
- if (keyValues.hasOwnProperty(key)) {
- var value = undefined;
- var map = keyValues[key];
- if (map.property !== undefined) {
- // This may return true if user specifies numeric: 'false', since
- // Boolean('nonempty') === true.
- value = getOption(map.property, map.type, map.values);
- }
- if (value !== undefined) {
- updateProperty(map.property, map.type, value);
- extension += updateExtension(key, value);
- continue;
- }
- // User options didn't have it, check Unicode extension.
- // Here we want to convert strings 'true', 'false' into proper Boolean
- // values (not a user error).
- if (extensionMap.hasOwnProperty(key)) {
- value = extensionMap[key];
- if (value !== undefined) {
- updateProperty(map.property, map.type, value);
- extension += updateExtension(key, value);
- } else if (map.type === 'boolean') {
- // Boolean keys are allowed not to have values in Unicode extension.
- // Those default to true.
- updateProperty(map.property, map.type, true);
- extension += updateExtension(key, true);
- }
- }
- }
- }
-
- return extension === ''? '' : '-u' + extension;
-}
-
-
-/**
- * Converts all OwnProperties into
- * configurable: false, writable: false, enumerable: true.
- */
-function freezeArray(array) {
- array.forEach(function(element, index) {
- Object.defineProperty(array, index, {value: element,
- configurable: false,
- writable: false,
- enumerable: true});
- });
-
- Object.defineProperty(array, 'length', {value: array.length,
- writable: false});
-
- return array;
-}
-
-
-/**
- * It's sometimes desireable to leave user requested locale instead of ICU
- * supported one (zh-TW is equivalent to zh-Hant-TW, so we should keep shorter
- * one, if that was what user requested).
- * This function returns user specified tag if its maximized form matches ICU
- * resolved locale. If not we return ICU result.
- */
-function getOptimalLanguageTag(original, resolved) {
- // Returns Array<Object>, where each object has maximized and base properties.
- // Maximized: zh -> zh-Hans-CN
- // Base: zh-CN-u-ca-gregory -> zh-CN
- native function NativeJSGetLanguageTagVariants();
-
- // Take care of grandfathered or simple cases.
- if (original === resolved) {
- return original;
- }
-
- var locales = NativeJSGetLanguageTagVariants([original, resolved]);
- if (locales[0].maximized !== locales[1].maximized) {
- return resolved;
- }
-
- // Preserve extensions of resolved locale, but swap base tags with original.
- var resolvedBase = new RegExp('^' + locales[1].base);
- return resolved.replace(resolvedBase, locales[0].base);
-}
-
-
-/**
- * Returns an Object that contains all of supported locales for a given
- * service.
- * In addition to the supported locales we add xx-ZZ locale for each xx-Yyyy-ZZ
- * that is supported. This is required by the spec.
- */
-function getAvailableLocalesOf(service) {
- native function NativeJSAvailableLocalesOf();
- var available = NativeJSAvailableLocalesOf(service);
-
- for (var i in available) {
- if (available.hasOwnProperty(i)) {
- var parts = i.match(/^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/);
- if (parts !== null) {
- // Build xx-ZZ. We don't care about the actual value,
- // as long it's not undefined.
- available[parts[1] + '-' + parts[3]] = null;
- }
- }
- }
-
- return available;
-}
-
-
-/**
- * Defines a property and sets writable and enumerable to true.
- * Configurable is false by default.
- */
-function defineWEProperty(object, property, value) {
- Object.defineProperty(object, property,
- {value: value, writable: true, enumerable: true});
-}
-
-
-/**
- * Adds property to an object if the value is not undefined.
- * Sets configurable descriptor to false.
- */
-function addWEPropertyIfDefined(object, property, value) {
- if (value !== undefined) {
- defineWEProperty(object, property, value);
- }
-}
-
-
-/**
- * Defines a property and sets writable, enumerable and configurable to true.
- */
-function defineWECProperty(object, property, value) {
- Object.defineProperty(object, property,
- {value: value,
- writable: true,
- enumerable: true,
- configurable: true});
-}
-
-
-/**
- * Adds property to an object if the value is not undefined.
- * Sets all descriptors to true.
- */
-function addWECPropertyIfDefined(object, property, value) {
- if (value !== undefined) {
- defineWECProperty(object, property, value);
- }
-}
-
-
-/**
- * Returns titlecased word, aMeRricA -> America.
- */
-function toTitleCaseWord(word) {
- return word.substr(0, 1).toUpperCase() + word.substr(1).toLowerCase();
-}
diff --git a/chromium/v8/src/extensions/i18n/locale.cc b/chromium/v8/src/extensions/i18n/locale.cc
deleted file mode 100644
index 6b6f9ac314d..00000000000
--- a/chromium/v8/src/extensions/i18n/locale.cc
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#include "locale.h"
-
-#include <string.h>
-
-#include "unicode/brkiter.h"
-#include "unicode/coll.h"
-#include "unicode/datefmt.h"
-#include "unicode/numfmt.h"
-#include "unicode/uloc.h"
-#include "unicode/uversion.h"
-
-namespace v8_i18n {
-
-void JSCanonicalizeLanguageTag(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- // Expect locale id which is a string.
- if (args.Length() != 1 || !args[0]->IsString()) {
- v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Locale identifier, as a string, is required.")));
- return;
- }
-
- UErrorCode error = U_ZERO_ERROR;
-
- char icu_result[ULOC_FULLNAME_CAPACITY];
- int icu_length = 0;
-
- // Return value which denotes invalid language tag.
- const char* const kInvalidTag = "invalid-tag";
-
- v8::String::AsciiValue locale_id(args[0]->ToString());
- if (*locale_id == NULL) {
- args.GetReturnValue().Set(v8::String::New(kInvalidTag));
- return;
- }
-
- uloc_forLanguageTag(*locale_id, icu_result, ULOC_FULLNAME_CAPACITY,
- &icu_length, &error);
- if (U_FAILURE(error) || icu_length == 0) {
- args.GetReturnValue().Set(v8::String::New(kInvalidTag));
- return;
- }
-
- char result[ULOC_FULLNAME_CAPACITY];
-
- // Force strict BCP47 rules.
- uloc_toLanguageTag(icu_result, result, ULOC_FULLNAME_CAPACITY, TRUE, &error);
-
- if (U_FAILURE(error)) {
- args.GetReturnValue().Set(v8::String::New(kInvalidTag));
- return;
- }
-
- args.GetReturnValue().Set(v8::String::New(result));
-}
-
-
-void JSAvailableLocalesOf(const v8::FunctionCallbackInfo<v8::Value>& args) {
- // Expect service name which is a string.
- if (args.Length() != 1 || !args[0]->IsString()) {
- v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Service identifier, as a string, is required.")));
- return;
- }
-
- const icu::Locale* available_locales = NULL;
-
- int32_t count = 0;
- v8::String::AsciiValue service(args[0]->ToString());
- if (strcmp(*service, "collator") == 0) {
- available_locales = icu::Collator::getAvailableLocales(count);
- } else if (strcmp(*service, "numberformat") == 0) {
- available_locales = icu::NumberFormat::getAvailableLocales(count);
- } else if (strcmp(*service, "dateformat") == 0) {
- available_locales = icu::DateFormat::getAvailableLocales(count);
- } else if (strcmp(*service, "breakiterator") == 0) {
- available_locales = icu::BreakIterator::getAvailableLocales(count);
- }
-
- v8::TryCatch try_catch;
- UErrorCode error = U_ZERO_ERROR;
- char result[ULOC_FULLNAME_CAPACITY];
- v8::Handle<v8::Object> locales = v8::Object::New();
-
- for (int32_t i = 0; i < count; ++i) {
- const char* icu_name = available_locales[i].getName();
-
- error = U_ZERO_ERROR;
- // No need to force strict BCP47 rules.
- uloc_toLanguageTag(icu_name, result, ULOC_FULLNAME_CAPACITY, FALSE, &error);
- if (U_FAILURE(error)) {
- // This shouldn't happen, but lets not break the user.
- continue;
- }
-
- // Index is just a dummy value for the property value.
- locales->Set(v8::String::New(result), v8::Integer::New(i));
- if (try_catch.HasCaught()) {
- // Ignore error, but stop processing and return.
- break;
- }
- }
-
- args.GetReturnValue().Set(locales);
-}
-
-
-void JSGetDefaultICULocale(const v8::FunctionCallbackInfo<v8::Value>& args) {
- icu::Locale default_locale;
-
- // Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- UErrorCode status = U_ZERO_ERROR;
- uloc_toLanguageTag(
- default_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
- if (U_SUCCESS(status)) {
- args.GetReturnValue().Set(v8::String::New(result));
- return;
- }
-
- args.GetReturnValue().Set(v8::String::New("und"));
-}
-
-
-void JSGetLanguageTagVariants(const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::TryCatch try_catch;
-
- // Expect an array of strings.
- if (args.Length() != 1 || !args[0]->IsArray()) {
- v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Internal error. Expected Array<String>.")));
- return;
- }
-
- v8::Local<v8::Array> input = v8::Local<v8::Array>::Cast(args[0]);
- v8::Handle<v8::Array> output = v8::Array::New(input->Length());
- for (unsigned int i = 0; i < input->Length(); ++i) {
- v8::Local<v8::Value> locale_id = input->Get(i);
- if (try_catch.HasCaught()) {
- break;
- }
-
- if (!locale_id->IsString()) {
- v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Internal error. Array element is missing "
- "or it isn't a string.")));
- return;
- }
-
- v8::String::AsciiValue ascii_locale_id(locale_id);
- if (*ascii_locale_id == NULL) {
- v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Internal error. Non-ASCII locale identifier.")));
- return;
- }
-
- UErrorCode error = U_ZERO_ERROR;
-
- // Convert from BCP47 to ICU format.
- // de-DE-u-co-phonebk -> de_DE@collation=phonebook
- char icu_locale[ULOC_FULLNAME_CAPACITY];
- int icu_locale_length = 0;
- uloc_forLanguageTag(*ascii_locale_id, icu_locale, ULOC_FULLNAME_CAPACITY,
- &icu_locale_length, &error);
- if (U_FAILURE(error) || icu_locale_length == 0) {
- v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Internal error. Failed to convert locale to ICU.")));
- return;
- }
-
- // Maximize the locale.
- // de_DE@collation=phonebook -> de_Latn_DE@collation=phonebook
- char icu_max_locale[ULOC_FULLNAME_CAPACITY];
- uloc_addLikelySubtags(
- icu_locale, icu_max_locale, ULOC_FULLNAME_CAPACITY, &error);
-
- // Remove extensions from maximized locale.
- // de_Latn_DE@collation=phonebook -> de_Latn_DE
- char icu_base_max_locale[ULOC_FULLNAME_CAPACITY];
- uloc_getBaseName(
- icu_max_locale, icu_base_max_locale, ULOC_FULLNAME_CAPACITY, &error);
-
- // Get original name without extensions.
- // de_DE@collation=phonebook -> de_DE
- char icu_base_locale[ULOC_FULLNAME_CAPACITY];
- uloc_getBaseName(
- icu_locale, icu_base_locale, ULOC_FULLNAME_CAPACITY, &error);
-
- // Convert from ICU locale format to BCP47 format.
- // de_Latn_DE -> de-Latn-DE
- char base_max_locale[ULOC_FULLNAME_CAPACITY];
- uloc_toLanguageTag(icu_base_max_locale, base_max_locale,
- ULOC_FULLNAME_CAPACITY, FALSE, &error);
-
- // de_DE -> de-DE
- char base_locale[ULOC_FULLNAME_CAPACITY];
- uloc_toLanguageTag(
- icu_base_locale, base_locale, ULOC_FULLNAME_CAPACITY, FALSE, &error);
-
- if (U_FAILURE(error)) {
- v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Internal error. Couldn't generate maximized "
- "or base locale.")));
- return;
- }
-
- v8::Handle<v8::Object> result = v8::Object::New();
- result->Set(v8::String::New("maximized"), v8::String::New(base_max_locale));
- result->Set(v8::String::New("base"), v8::String::New(base_locale));
- if (try_catch.HasCaught()) {
- break;
- }
-
- output->Set(i, result);
- if (try_catch.HasCaught()) {
- break;
- }
- }
-
- args.GetReturnValue().Set(output);
-}
-
-} // namespace v8_i18n
diff --git a/chromium/v8/src/extensions/i18n/locale.h b/chromium/v8/src/extensions/i18n/locale.h
deleted file mode 100644
index c39568e5d9d..00000000000
--- a/chromium/v8/src/extensions/i18n/locale.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#ifndef V8_EXTENSIONS_I18N_SRC_LOCALE_H_
-#define V8_EXTENSIONS_I18N_SRC_LOCALE_H_
-
-#include "unicode/uversion.h"
-#include "v8.h"
-
-namespace v8_i18n {
-
-// Canonicalizes the BCP47 language tag using BCP47 rules.
-// Returns 'invalid-tag' in case input was not well formed.
-void JSCanonicalizeLanguageTag(const v8::FunctionCallbackInfo<v8::Value>& args);
-
-// Returns a list of available locales for collator, date or number formatter.
-void JSAvailableLocalesOf(const v8::FunctionCallbackInfo<v8::Value>& args);
-
-// Returns default ICU locale.
-void JSGetDefaultICULocale(const v8::FunctionCallbackInfo<v8::Value>& args);
-
-// Returns an array of objects, that have maximized and base names of inputs.
-// Unicode extensions are dropped from both.
-// Input: ['zh-TW-u-nu-thai', 'sr']
-// Output: [{maximized: 'zh-Hant-TW', base: 'zh-TW'},
-// {maximized: 'sr-Cyrl-RS', base: 'sr'}]
-void JSGetLanguageTagVariants(const v8::FunctionCallbackInfo<v8::Value>& args);
-
-} // namespace v8_i18n
-
-#endif // V8_EXTENSIONS_I18N_LOCALE_H_
diff --git a/chromium/v8/src/extensions/i18n/locale.js b/chromium/v8/src/extensions/i18n/locale.js
deleted file mode 100644
index ea95b87192e..00000000000
--- a/chromium/v8/src/extensions/i18n/locale.js
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Canonicalizes the language tag, or throws in case the tag is invalid.
- */
-function canonicalizeLanguageTag(localeID) {
- native function NativeJSCanonicalizeLanguageTag();
-
- // null is typeof 'object' so we have to do extra check.
- if (typeof localeID !== 'string' && typeof localeID !== 'object' ||
- localeID === null) {
- throw new TypeError('Language ID should be string or object.');
- }
-
- var localeString = String(localeID);
-
- if (isValidLanguageTag(localeString) === false) {
- throw new RangeError('Invalid language tag: ' + localeString);
- }
-
- // This call will strip -kn but not -kn-true extensions.
- // ICU bug filled - http://bugs.icu-project.org/trac/ticket/9265.
- // TODO(cira): check if -u-kn-true-kc-true-kh-true still throws after
- // upgrade to ICU 4.9.
- var tag = NativeJSCanonicalizeLanguageTag(localeString);
- if (tag === 'invalid-tag') {
- throw new RangeError('Invalid language tag: ' + localeString);
- }
-
- return tag;
-}
-
-
-/**
- * Returns an array where all locales are canonicalized and duplicates removed.
- * Throws on locales that are not well formed BCP47 tags.
- */
-function initializeLocaleList(locales) {
- var seen = [];
- if (locales === undefined) {
- // Constructor is called without arguments.
- seen = [];
- } else {
- // We allow single string localeID.
- if (typeof locales === 'string') {
- seen.push(canonicalizeLanguageTag(locales));
- return freezeArray(seen);
- }
-
- var o = toObject(locales);
- // Converts it to UInt32 (>>> is shr on 32bit integers).
- var len = o.length >>> 0;
-
- for (var k = 0; k < len; k++) {
- if (k in o) {
- var value = o[k];
-
- var tag = canonicalizeLanguageTag(value);
-
- if (seen.indexOf(tag) === -1) {
- seen.push(tag);
- }
- }
- }
- }
-
- return freezeArray(seen);
-}
-
-
-/**
- * Validates the language tag. Section 2.2.9 of the bcp47 spec
- * defines a valid tag.
- *
- * ICU is too permissible and lets invalid tags, like
- * hant-cmn-cn, through.
- *
- * Returns false if the language tag is invalid.
- */
-function isValidLanguageTag(locale) {
- // Check if it's well-formed, including grandfadered tags.
- if (LANGUAGE_TAG_RE.test(locale) === false) {
- return false;
- }
-
- // Just return if it's a x- form. It's all private.
- if (locale.indexOf('x-') === 0) {
- return true;
- }
-
- // Check if there are any duplicate variants or singletons (extensions).
-
- // Remove private use section.
- locale = locale.split(/-x-/)[0];
-
- // Skip language since it can match variant regex, so we start from 1.
- // We are matching i-klingon here, but that's ok, since i-klingon-klingon
- // is not valid and would fail LANGUAGE_TAG_RE test.
- var variants = [];
- var extensions = [];
- var parts = locale.split(/-/);
- for (var i = 1; i < parts.length; i++) {
- var value = parts[i];
- if (LANGUAGE_VARIANT_RE.test(value) === true && extensions.length === 0) {
- if (variants.indexOf(value) === -1) {
- variants.push(value);
- } else {
- return false;
- }
- }
-
- if (LANGUAGE_SINGLETON_RE.test(value) === true) {
- if (extensions.indexOf(value) === -1) {
- extensions.push(value);
- } else {
- return false;
- }
- }
- }
-
- return true;
- }
-
-
-/**
- * Builds a regular expresion that validates the language tag
- * against bcp47 spec.
- * Uses http://tools.ietf.org/html/bcp47, section 2.1, ABNF.
- * Runs on load and initializes the global REs.
- */
-(function() {
- var alpha = '[a-zA-Z]';
- var digit = '[0-9]';
- var alphanum = '(' + alpha + '|' + digit + ')';
- var regular = '(art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|' +
- 'zh-min|zh-min-nan|zh-xiang)';
- var irregular = '(en-GB-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|' +
- 'i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|' +
- 'i-tsu|sgn-BE-FR|sgn-BE-NL|sgn-CH-DE)';
- var grandfathered = '(' + irregular + '|' + regular + ')';
- var privateUse = '(x(-' + alphanum + '{1,8})+)';
-
- var singleton = '(' + digit + '|[A-WY-Za-wy-z])';
- LANGUAGE_SINGLETON_RE = new RegExp('^' + singleton + '$', 'i');
-
- var extension = '(' + singleton + '(-' + alphanum + '{2,8})+)';
-
- var variant = '(' + alphanum + '{5,8}|(' + digit + alphanum + '{3}))';
- LANGUAGE_VARIANT_RE = new RegExp('^' + variant + '$', 'i');
-
- var region = '(' + alpha + '{2}|' + digit + '{3})';
- var script = '(' + alpha + '{4})';
- var extLang = '(' + alpha + '{3}(-' + alpha + '{3}){0,2})';
- var language = '(' + alpha + '{2,3}(-' + extLang + ')?|' + alpha + '{4}|' +
- alpha + '{5,8})';
- var langTag = language + '(-' + script + ')?(-' + region + ')?(-' +
- variant + ')*(-' + extension + ')*(-' + privateUse + ')?';
-
- var languageTag =
- '^(' + langTag + '|' + privateUse + '|' + grandfathered + ')$';
- LANGUAGE_TAG_RE = new RegExp(languageTag, 'i');
-})();
diff --git a/chromium/v8/src/extensions/i18n/number-format.cc b/chromium/v8/src/extensions/i18n/number-format.cc
deleted file mode 100644
index 136471561c4..00000000000
--- a/chromium/v8/src/extensions/i18n/number-format.cc
+++ /dev/null
@@ -1,418 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#include "number-format.h"
-
-#include <string.h>
-
-#include "i18n-utils.h"
-#include "unicode/curramt.h"
-#include "unicode/dcfmtsym.h"
-#include "unicode/decimfmt.h"
-#include "unicode/locid.h"
-#include "unicode/numfmt.h"
-#include "unicode/numsys.h"
-#include "unicode/uchar.h"
-#include "unicode/ucurr.h"
-#include "unicode/unum.h"
-#include "unicode/uversion.h"
-
-namespace v8_i18n {
-
-static icu::DecimalFormat* InitializeNumberFormat(v8::Handle<v8::String>,
- v8::Handle<v8::Object>,
- v8::Handle<v8::Object>);
-static icu::DecimalFormat* CreateICUNumberFormat(const icu::Locale&,
- v8::Handle<v8::Object>);
-static void SetResolvedSettings(const icu::Locale&,
- icu::DecimalFormat*,
- v8::Handle<v8::Object>);
-
-icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
- v8::Handle<v8::Object> obj) {
- v8::HandleScope handle_scope;
-
- // v8::ObjectTemplate doesn't have HasInstance method so we can't check
- // if obj is an instance of NumberFormat class. We'll check for a property
- // that has to be in the object. The same applies to other services, like
- // Collator and DateTimeFormat.
- if (obj->HasOwnProperty(v8::String::New("numberFormat"))) {
- return static_cast<icu::DecimalFormat*>(
- obj->GetAlignedPointerFromInternalField(0));
- }
-
- return NULL;
-}
-
-void NumberFormat::DeleteNumberFormat(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* object,
- void* param) {
- // First delete the hidden C++ object.
- // Unpacking should never return NULL here. That would only happen if
- // this method is used as the weak callback for persistent handles not
- // pointing to a date time formatter.
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Object> handle = v8::Local<v8::Object>::New(isolate, *object);
- delete UnpackNumberFormat(handle);
-
- // Then dispose of the persistent handle to JS object.
- object->Dispose(isolate);
-}
-
-void NumberFormat::JSInternalFormat(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsNumber()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("Formatter and numeric value have to be specified.")));
- return;
- }
-
- icu::DecimalFormat* number_format = UnpackNumberFormat(args[0]->ToObject());
- if (!number_format) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("NumberFormat method called on an object "
- "that is not a NumberFormat.")));
- return;
- }
-
- // ICU will handle actual NaN value properly and return NaN string.
- icu::UnicodeString result;
- number_format->format(args[1]->NumberValue(), result);
-
- args.GetReturnValue().Set(v8::String::New(
- reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
-}
-
-void NumberFormat::JSInternalParse(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 2 || !args[0]->IsObject() || !args[1]->IsString()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("Formatter and string have to be specified.")));
- return;
- }
-
- icu::DecimalFormat* number_format = UnpackNumberFormat(args[0]->ToObject());
- if (!number_format) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("NumberFormat method called on an object "
- "that is not a NumberFormat.")));
- return;
- }
-
- // ICU will handle actual NaN value properly and return NaN string.
- icu::UnicodeString string_number;
- if (!Utils::V8StringToUnicodeString(args[1]->ToString(), &string_number)) {
- string_number = "";
- }
-
- UErrorCode status = U_ZERO_ERROR;
- icu::Formattable result;
- // ICU 4.6 doesn't support parseCurrency call. We need to wait for ICU49
- // to be part of Chrome.
- // TODO(cira): Include currency parsing code using parseCurrency call.
- // We need to check if the formatter parses all currencies or only the
- // one it was constructed with (it will impact the API - how to return ISO
- // code and the value).
- number_format->parse(string_number, result, status);
- if (U_FAILURE(status)) {
- return;
- }
-
- switch (result.getType()) {
- case icu::Formattable::kDouble:
- args.GetReturnValue().Set(result.getDouble());
- return;
- case icu::Formattable::kLong:
- args.GetReturnValue().Set(result.getLong());
- return;
- case icu::Formattable::kInt64:
- args.GetReturnValue().Set(static_cast<double>(result.getInt64()));
- return;
- default:
- return;
- }
-}
-
-void NumberFormat::JSCreateNumberFormat(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 3 ||
- !args[0]->IsString() ||
- !args[1]->IsObject() ||
- !args[2]->IsObject()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("Internal error, wrong parameters.")));
- return;
- }
-
- v8::Isolate* isolate = args.GetIsolate();
- v8::Local<v8::ObjectTemplate> number_format_template =
- Utils::GetTemplate(isolate);
-
- // Create an empty object wrapper.
- v8::Local<v8::Object> local_object = number_format_template->NewInstance();
- // But the handle shouldn't be empty.
- // That can happen if there was a stack overflow when creating the object.
- if (local_object.IsEmpty()) {
- args.GetReturnValue().Set(local_object);
- return;
- }
-
- // Set number formatter as internal field of the resulting JS object.
- icu::DecimalFormat* number_format = InitializeNumberFormat(
- args[0]->ToString(), args[1]->ToObject(), args[2]->ToObject());
-
- if (!number_format) {
- v8::ThrowException(v8::Exception::Error(v8::String::New(
- "Internal error. Couldn't create ICU number formatter.")));
- return;
- } else {
- local_object->SetAlignedPointerInInternalField(0, number_format);
-
- v8::TryCatch try_catch;
- local_object->Set(v8::String::New("numberFormat"),
- v8::String::New("valid"));
- if (try_catch.HasCaught()) {
- v8::ThrowException(v8::Exception::Error(
- v8::String::New("Internal error, couldn't set property.")));
- return;
- }
- }
-
- v8::Persistent<v8::Object> wrapper(isolate, local_object);
- // Make object handle weak so we can delete iterator once GC kicks in.
- wrapper.MakeWeak<void>(NULL, &DeleteNumberFormat);
- args.GetReturnValue().Set(wrapper);
- wrapper.ClearAndLeak();
-}
-
-static icu::DecimalFormat* InitializeNumberFormat(
- v8::Handle<v8::String> locale,
- v8::Handle<v8::Object> options,
- v8::Handle<v8::Object> resolved) {
- // Convert BCP47 into ICU locale format.
- UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale;
- char icu_result[ULOC_FULLNAME_CAPACITY];
- int icu_length = 0;
- v8::String::AsciiValue bcp47_locale(locale);
- if (bcp47_locale.length() != 0) {
- uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
- &icu_length, &status);
- if (U_FAILURE(status) || icu_length == 0) {
- return NULL;
- }
- icu_locale = icu::Locale(icu_result);
- }
-
- icu::DecimalFormat* number_format =
- CreateICUNumberFormat(icu_locale, options);
- if (!number_format) {
- // Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- number_format = CreateICUNumberFormat(no_extension_locale, options);
-
- // Set resolved settings (pattern, numbering system).
- SetResolvedSettings(no_extension_locale, number_format, resolved);
- } else {
- SetResolvedSettings(icu_locale, number_format, resolved);
- }
-
- return number_format;
-}
-
-static icu::DecimalFormat* CreateICUNumberFormat(
- const icu::Locale& icu_locale, v8::Handle<v8::Object> options) {
- // Make formatter from options. Numbering system is added
- // to the locale as Unicode extension (if it was specified at all).
- UErrorCode status = U_ZERO_ERROR;
- icu::DecimalFormat* number_format = NULL;
- icu::UnicodeString style;
- icu::UnicodeString currency;
- if (Utils::ExtractStringSetting(options, "style", &style)) {
- if (style == UNICODE_STRING_SIMPLE("currency")) {
- Utils::ExtractStringSetting(options, "currency", &currency);
-
- icu::UnicodeString display;
- Utils::ExtractStringSetting(options, "currencyDisplay", &display);
-#if (U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM <= 6)
- icu::NumberFormat::EStyles style;
- if (display == UNICODE_STRING_SIMPLE("code")) {
- style = icu::NumberFormat::kIsoCurrencyStyle;
- } else if (display == UNICODE_STRING_SIMPLE("name")) {
- style = icu::NumberFormat::kPluralCurrencyStyle;
- } else {
- style = icu::NumberFormat::kCurrencyStyle;
- }
-#else // ICU version is 4.8 or above (we ignore versions below 4.0).
- UNumberFormatStyle style;
- if (display == UNICODE_STRING_SIMPLE("code")) {
- style = UNUM_CURRENCY_ISO;
- } else if (display == UNICODE_STRING_SIMPLE("name")) {
- style = UNUM_CURRENCY_PLURAL;
- } else {
- style = UNUM_CURRENCY;
- }
-#endif
-
- number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createInstance(icu_locale, style, status));
- } else if (style == UNICODE_STRING_SIMPLE("percent")) {
- number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createPercentInstance(icu_locale, status));
- if (U_FAILURE(status)) {
- delete number_format;
- return NULL;
- }
- // Make sure 1.1% doesn't go into 2%.
- number_format->setMinimumFractionDigits(1);
- } else {
- // Make a decimal instance by default.
- number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createInstance(icu_locale, status));
- }
- }
-
- if (U_FAILURE(status)) {
- delete number_format;
- return NULL;
- }
-
- // Set all options.
- if (!currency.isEmpty()) {
- number_format->setCurrency(currency.getBuffer(), status);
- }
-
- int32_t digits;
- if (Utils::ExtractIntegerSetting(
- options, "minimumIntegerDigits", &digits)) {
- number_format->setMinimumIntegerDigits(digits);
- }
-
- if (Utils::ExtractIntegerSetting(
- options, "minimumFractionDigits", &digits)) {
- number_format->setMinimumFractionDigits(digits);
- }
-
- if (Utils::ExtractIntegerSetting(
- options, "maximumFractionDigits", &digits)) {
- number_format->setMaximumFractionDigits(digits);
- }
-
- bool significant_digits_used = false;
- if (Utils::ExtractIntegerSetting(
- options, "minimumSignificantDigits", &digits)) {
- number_format->setMinimumSignificantDigits(digits);
- significant_digits_used = true;
- }
-
- if (Utils::ExtractIntegerSetting(
- options, "maximumSignificantDigits", &digits)) {
- number_format->setMaximumSignificantDigits(digits);
- significant_digits_used = true;
- }
-
- number_format->setSignificantDigitsUsed(significant_digits_used);
-
- bool grouping;
- if (Utils::ExtractBooleanSetting(options, "useGrouping", &grouping)) {
- number_format->setGroupingUsed(grouping);
- }
-
- // Set rounding mode.
- number_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
-
- return number_format;
-}
-
-static void SetResolvedSettings(const icu::Locale& icu_locale,
- icu::DecimalFormat* number_format,
- v8::Handle<v8::Object> resolved) {
- icu::UnicodeString pattern;
- number_format->toPattern(pattern);
- resolved->Set(v8::String::New("pattern"),
- v8::String::New(reinterpret_cast<const uint16_t*>(
- pattern.getBuffer()), pattern.length()));
-
- // Set resolved currency code in options.currency if not empty.
- icu::UnicodeString currency(number_format->getCurrency());
- if (!currency.isEmpty()) {
- resolved->Set(v8::String::New("currency"),
- v8::String::New(reinterpret_cast<const uint16_t*>(
- currency.getBuffer()), currency.length()));
- }
-
- // Ugly hack. ICU doesn't expose numbering system in any way, so we have
- // to assume that for given locale NumberingSystem constructor produces the
- // same digits as NumberFormat would.
- UErrorCode status = U_ZERO_ERROR;
- icu::NumberingSystem* numbering_system =
- icu::NumberingSystem::createInstance(icu_locale, status);
- if (U_SUCCESS(status)) {
- const char* ns = numbering_system->getName();
- resolved->Set(v8::String::New("numberingSystem"), v8::String::New(ns));
- } else {
- resolved->Set(v8::String::New("numberingSystem"), v8::Undefined());
- }
- delete numbering_system;
-
- resolved->Set(v8::String::New("useGrouping"),
- v8::Boolean::New(number_format->isGroupingUsed()));
-
- resolved->Set(v8::String::New("minimumIntegerDigits"),
- v8::Integer::New(number_format->getMinimumIntegerDigits()));
-
- resolved->Set(v8::String::New("minimumFractionDigits"),
- v8::Integer::New(number_format->getMinimumFractionDigits()));
-
- resolved->Set(v8::String::New("maximumFractionDigits"),
- v8::Integer::New(number_format->getMaximumFractionDigits()));
-
- if (resolved->HasOwnProperty(v8::String::New("minimumSignificantDigits"))) {
- resolved->Set(v8::String::New("minimumSignificantDigits"), v8::Integer::New(
- number_format->getMinimumSignificantDigits()));
- }
-
- if (resolved->HasOwnProperty(v8::String::New("maximumSignificantDigits"))) {
- resolved->Set(v8::String::New("maximumSignificantDigits"), v8::Integer::New(
- number_format->getMaximumSignificantDigits()));
- }
-
- // Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- status = U_ZERO_ERROR;
- uloc_toLanguageTag(
- icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
- if (U_SUCCESS(status)) {
- resolved->Set(v8::String::New("locale"), v8::String::New(result));
- } else {
- // This would never happen, since we got the locale from ICU.
- resolved->Set(v8::String::New("locale"), v8::String::New("und"));
- }
-}
-
-} // namespace v8_i18n
diff --git a/chromium/v8/src/extensions/i18n/number-format.h b/chromium/v8/src/extensions/i18n/number-format.h
deleted file mode 100644
index d4dbc4d6f3b..00000000000
--- a/chromium/v8/src/extensions/i18n/number-format.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-#ifndef V8_EXTENSIONS_I18N_NUMBER_FORMAT_H_
-#define V8_EXTENSIONS_I18N_NUMBER_FORMAT_H_
-
-#include "unicode/uversion.h"
-#include "v8.h"
-
-namespace U_ICU_NAMESPACE {
-class DecimalFormat;
-}
-
-namespace v8_i18n {
-
-class NumberFormat {
- public:
- static void JSCreateNumberFormat(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Helper methods for various bindings.
-
- // Unpacks date format object from corresponding JavaScript object.
- static icu::DecimalFormat* UnpackNumberFormat(v8::Handle<v8::Object> obj);
-
- // Release memory we allocated for the NumberFormat once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteNumberFormat(v8::Isolate* isolate,
- v8::Persistent<v8::Object>* object,
- void* param);
-
- // Formats number and returns corresponding string.
- static void JSInternalFormat(const v8::FunctionCallbackInfo<v8::Value>& args);
-
- // Parses a string and returns a number.
- static void JSInternalParse(const v8::FunctionCallbackInfo<v8::Value>& args);
-
- private:
- NumberFormat();
-};
-
-} // namespace v8_i18n
-
-#endif // V8_EXTENSIONS_I18N_NUMBER_FORMAT_H_
diff --git a/chromium/v8/src/extensions/i18n/number-format.js b/chromium/v8/src/extensions/i18n/number-format.js
deleted file mode 100644
index 1cd3db13554..00000000000
--- a/chromium/v8/src/extensions/i18n/number-format.js
+++ /dev/null
@@ -1,295 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-/**
- * Verifies that the input is a well-formed ISO 4217 currency code.
- * Don't uppercase to test. It could convert invalid code into a valid one.
- * For example \u00DFP (Eszett+P) becomes SSP.
- */
-function isWellFormedCurrencyCode(currency) {
- return typeof currency == "string" &&
- currency.length == 3 &&
- currency.match(/[^A-Za-z]/) == null;
-}
-
-
-/**
- * Returns the valid digit count for a property, or throws RangeError on
- * a value out of the range.
- */
-function getNumberOption(options, property, min, max, fallback) {
- var value = options[property];
- if (value !== undefined) {
- value = Number(value);
- if (isNaN(value) || value < min || value > max) {
- throw new RangeError(property + ' value is out of range.');
- }
- return Math.floor(value);
- }
-
- return fallback;
-}
-
-
-/**
- * Initializes the given object so it's a valid NumberFormat instance.
- * Useful for subclassing.
- */
-function initializeNumberFormat(numberFormat, locales, options) {
- native function NativeJSCreateNumberFormat();
-
- if (numberFormat.hasOwnProperty('__initializedIntlObject')) {
- throw new TypeError('Trying to re-initialize NumberFormat object.');
- }
-
- if (options === undefined) {
- options = {};
- }
-
- var getOption = getGetOption(options, 'numberformat');
-
- var locale = resolveLocale('numberformat', locales, options);
-
- var internalOptions = {};
- defineWEProperty(internalOptions, 'style', getOption(
- 'style', 'string', ['decimal', 'percent', 'currency'], 'decimal'));
-
- var currency = getOption('currency', 'string');
- if (currency !== undefined && !isWellFormedCurrencyCode(currency)) {
- throw new RangeError('Invalid currency code: ' + currency);
- }
-
- if (internalOptions.style === 'currency' && currency === undefined) {
- throw new TypeError('Currency code is required with currency style.');
- }
-
- var currencyDisplay = getOption(
- 'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
- if (internalOptions.style === 'currency') {
- defineWEProperty(internalOptions, 'currency', currency.toUpperCase());
- defineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
- }
-
- // Digit ranges.
- var mnid = getNumberOption(options, 'minimumIntegerDigits', 1, 21, 1);
- defineWEProperty(internalOptions, 'minimumIntegerDigits', mnid);
-
- var mnfd = getNumberOption(options, 'minimumFractionDigits', 0, 20, 0);
- defineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
-
- var mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20, 3);
- defineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
-
- var mnsd = options['minimumSignificantDigits'];
- var mxsd = options['maximumSignificantDigits'];
- if (mnsd !== undefined || mxsd !== undefined) {
- mnsd = getNumberOption(options, 'minimumSignificantDigits', 1, 21, 0);
- defineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
-
- mxsd = getNumberOption(options, 'maximumSignificantDigits', mnsd, 21, 21);
- defineWEProperty(internalOptions, 'maximumSignificantDigits', mxsd);
- }
-
- // Grouping.
- defineWEProperty(internalOptions, 'useGrouping', getOption(
- 'useGrouping', 'boolean', undefined, true));
-
- // ICU prefers options to be passed using -u- extension key/values for
- // number format, so we need to build that.
- var extensionMap = parseExtension(locale.extension);
- var extension = setOptions(options, extensionMap, NUMBER_FORMAT_KEY_MAP,
- getOption, internalOptions);
-
- var requestedLocale = locale.locale + extension;
- var resolved = Object.defineProperties({}, {
- currency: {writable: true},
- currencyDisplay: {writable: true},
- locale: {writable: true},
- maximumFractionDigits: {writable: true},
- minimumFractionDigits: {writable: true},
- minimumIntegerDigits: {writable: true},
- numberingSystem: {writable: true},
- requestedLocale: {value: requestedLocale, writable: true},
- style: {value: internalOptions.style, writable: true},
- useGrouping: {writable: true}
- });
- if (internalOptions.hasOwnProperty('minimumSignificantDigits')) {
- defineWEProperty(resolved, 'minimumSignificantDigits', undefined);
- }
- if (internalOptions.hasOwnProperty('maximumSignificantDigits')) {
- defineWEProperty(resolved, 'maximumSignificantDigits', undefined);
- }
- var formatter = NativeJSCreateNumberFormat(requestedLocale,
- internalOptions,
- resolved);
-
- // We can't get information about number or currency style from ICU, so we
- // assume user request was fulfilled.
- if (internalOptions.style === 'currency') {
- Object.defineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
- writable: true});
- }
-
- Object.defineProperty(numberFormat, 'formatter', {value: formatter});
- Object.defineProperty(numberFormat, 'resolved', {value: resolved});
- Object.defineProperty(numberFormat, '__initializedIntlObject',
- {value: 'numberformat'});
-
- return numberFormat;
-}
-
-
-/**
- * Constructs Intl.NumberFormat object given optional locales and options
- * parameters.
- *
- * @constructor
- */
-%SetProperty(Intl, 'NumberFormat', function() {
- var locales = arguments[0];
- var options = arguments[1];
-
- if (!this || this === Intl) {
- // Constructor is called as a function.
- return new Intl.NumberFormat(locales, options);
- }
-
- return initializeNumberFormat(toObject(this), locales, options);
- },
- ATTRIBUTES.DONT_ENUM
-);
-
-
-/**
- * NumberFormat resolvedOptions method.
- */
-%SetProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- if (!this || typeof this !== 'object' ||
- this.__initializedIntlObject !== 'numberformat') {
- throw new TypeError('resolvedOptions method called on a non-object' +
- ' or on a object that is not Intl.NumberFormat.');
- }
-
- var format = this;
- var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
- format.resolved.locale);
-
- var result = {
- locale: locale,
- numberingSystem: format.resolved.numberingSystem,
- style: format.resolved.style,
- useGrouping: format.resolved.useGrouping,
- minimumIntegerDigits: format.resolved.minimumIntegerDigits,
- minimumFractionDigits: format.resolved.minimumFractionDigits,
- maximumFractionDigits: format.resolved.maximumFractionDigits,
- };
-
- if (result.style === 'currency') {
- defineWECProperty(result, 'currency', format.resolved.currency);
- defineWECProperty(result, 'currencyDisplay',
- format.resolved.currencyDisplay);
- }
-
- if (format.resolved.hasOwnProperty('minimumSignificantDigits')) {
- defineWECProperty(result, 'minimumSignificantDigits',
- format.resolved.minimumSignificantDigits);
- }
-
- if (format.resolved.hasOwnProperty('maximumSignificantDigits')) {
- defineWECProperty(result, 'maximumSignificantDigits',
- format.resolved.maximumSignificantDigits);
- }
-
- return result;
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.NumberFormat.prototype.resolvedOptions,
- 'resolvedOptions');
-%FunctionRemovePrototype(Intl.NumberFormat.prototype.resolvedOptions);
-%SetNativeFlag(Intl.NumberFormat.prototype.resolvedOptions);
-
-
-/**
- * Returns the subset of the given locale list for which this locale list
- * has a matching (possibly fallback) locale. Locales appear in the same
- * order in the returned list as in the input list.
- * Options are optional parameter.
- */
-%SetProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- return supportedLocalesOf('numberformat', locales, arguments[1]);
- },
- ATTRIBUTES.DONT_ENUM
-);
-%FunctionSetName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
-%FunctionRemovePrototype(Intl.NumberFormat.supportedLocalesOf);
-%SetNativeFlag(Intl.NumberFormat.supportedLocalesOf);
-
-
-/**
- * Returns a String value representing the result of calling ToNumber(value)
- * according to the effective locale and the formatting options of this
- * NumberFormat.
- */
-function formatNumber(formatter, value) {
- native function NativeJSInternalNumberFormat();
-
- // Spec treats -0 and +0 as 0.
- var number = Number(value);
- if (number === -0) {
- number = 0;
- }
-
- return NativeJSInternalNumberFormat(formatter.formatter, number);
-}
-
-
-/**
- * Returns a Number that represents string value that was passed in.
- */
-function parseNumber(formatter, value) {
- native function NativeJSInternalNumberParse();
-
- return NativeJSInternalNumberParse(formatter.formatter, String(value));
-}
-
-
-addBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
-addBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);
diff --git a/chromium/v8/src/extensions/i18n/overrides.js b/chromium/v8/src/extensions/i18n/overrides.js
deleted file mode 100644
index b2d60b3fc67..00000000000
--- a/chromium/v8/src/extensions/i18n/overrides.js
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// limitations under the License.
-
-// ECMAScript 402 API implementation is broken into separate files for
-// each service. The build system combines them together into one
-// Intl namespace.
-
-
-// Save references to Intl objects and methods we use, for added security.
-var savedObjects = {
- 'collator': Intl.Collator,
- 'numberformat': Intl.NumberFormat,
- 'dateformatall': Intl.DateTimeFormat,
- 'dateformatdate': Intl.DateTimeFormat,
- 'dateformattime': Intl.DateTimeFormat
-};
-
-
-// Default (created with undefined locales and options parameters) collator,
-// number and date format instances. They'll be created as needed.
-var defaultObjects = {
- 'collator': undefined,
- 'numberformat': undefined,
- 'dateformatall': undefined,
- 'dateformatdate': undefined,
- 'dateformattime': undefined,
-};
-
-
-/**
- * Returns cached or newly created instance of a given service.
- * We cache only default instances (where no locales or options are provided).
- */
-function cachedOrNewService(service, locales, options, defaults) {
- var useOptions = (defaults === undefined) ? options : defaults;
- if (locales === undefined && options === undefined) {
- if (defaultObjects[service] === undefined) {
- defaultObjects[service] = new savedObjects[service](locales, useOptions);
- }
- return defaultObjects[service];
- }
- return new savedObjects[service](locales, useOptions);
-}
-
-
-/**
- * Compares this and that, and returns less than 0, 0 or greater than 0 value.
- * Overrides the built-in method.
- */
-Object.defineProperty(String.prototype, 'localeCompare', {
- value: function(that) {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- if (this === undefined || this === null) {
- throw new TypeError('Method invoked on undefined or null value.');
- }
-
- var locales = arguments[1];
- var options = arguments[2];
- var collator = cachedOrNewService('collator', locales, options);
- return compare(collator, this, that);
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName(String.prototype.localeCompare, 'localeCompare');
-%FunctionRemovePrototype(String.prototype.localeCompare);
-%SetNativeFlag(String.prototype.localeCompare);
-
-
-/**
- * Formats a Number object (this) using locale and options values.
- * If locale or options are omitted, defaults are used.
- */
-Object.defineProperty(Number.prototype, 'toLocaleString', {
- value: function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- if (!(this instanceof Number) && typeof(this) !== 'number') {
- throw new TypeError('Method invoked on an object that is not Number.');
- }
-
- var locales = arguments[0];
- var options = arguments[1];
- var numberFormat = cachedOrNewService('numberformat', locales, options);
- return formatNumber(numberFormat, this);
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName(Number.prototype.toLocaleString, 'toLocaleString');
-%FunctionRemovePrototype(Number.prototype.toLocaleString);
-%SetNativeFlag(Number.prototype.toLocaleString);
-
-
-/**
- * Returns actual formatted date or fails if date parameter is invalid.
- */
-function toLocaleDateTime(date, locales, options, required, defaults, service) {
- if (!(date instanceof Date)) {
- throw new TypeError('Method invoked on an object that is not Date.');
- }
-
- if (isNaN(date)) {
- return 'Invalid Date';
- }
-
- var internalOptions = toDateTimeOptions(options, required, defaults);
-
- var dateFormat =
- cachedOrNewService(service, locales, options, internalOptions);
-
- return formatDate(dateFormat, date);
-}
-
-
-/**
- * Formats a Date object (this) using locale and options values.
- * If locale or options are omitted, defaults are used - both date and time are
- * present in the output.
- */
-Object.defineProperty(Date.prototype, 'toLocaleString', {
- value: function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- var locales = arguments[0];
- var options = arguments[1];
- return toLocaleDateTime(
- this, locales, options, 'any', 'all', 'dateformatall');
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName(Date.prototype.toLocaleString, 'toLocaleString');
-%FunctionRemovePrototype(Date.prototype.toLocaleString);
-%SetNativeFlag(Date.prototype.toLocaleString);
-
-
-/**
- * Formats a Date object (this) using locale and options values.
- * If locale or options are omitted, defaults are used - only date is present
- * in the output.
- */
-Object.defineProperty(Date.prototype, 'toLocaleDateString', {
- value: function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- var locales = arguments[0];
- var options = arguments[1];
- return toLocaleDateTime(
- this, locales, options, 'date', 'date', 'dateformatdate');
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName(Date.prototype.toLocaleDateString, 'toLocaleDateString');
-%FunctionRemovePrototype(Date.prototype.toLocaleDateString);
-%SetNativeFlag(Date.prototype.toLocaleDateString);
-
-
-/**
- * Formats a Date object (this) using locale and options values.
- * If locale or options are omitted, defaults are used - only time is present
- * in the output.
- */
-Object.defineProperty(Date.prototype, 'toLocaleTimeString', {
- value: function() {
- if (%_IsConstructCall()) {
- throw new TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
- }
-
- var locales = arguments[0];
- var options = arguments[1];
- return toLocaleDateTime(
- this, locales, options, 'time', 'time', 'dateformattime');
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName(Date.prototype.toLocaleTimeString, 'toLocaleTimeString');
-%FunctionRemovePrototype(Date.prototype.toLocaleTimeString);
-%SetNativeFlag(Date.prototype.toLocaleTimeString);
diff --git a/chromium/v8/src/extensions/statistics-extension.cc b/chromium/v8/src/extensions/statistics-extension.cc
index 32bc07de8bd..651d99d4526 100644
--- a/chromium/v8/src/extensions/statistics-extension.cc
+++ b/chromium/v8/src/extensions/statistics-extension.cc
@@ -60,7 +60,7 @@ static void AddNumber(v8::Local<v8::Object> object,
void StatisticsExtension::GetCounters(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
Heap* heap = isolate->heap();
if (args.Length() > 0) { // GC if first argument evaluates to true.
diff --git a/chromium/v8/src/factory.cc b/chromium/v8/src/factory.cc
index 3ca0efa2107..acbaf3c862c 100644
--- a/chromium/v8/src/factory.cc
+++ b/chromium/v8/src/factory.cc
@@ -130,7 +130,8 @@ Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
int slack) {
ASSERT(0 <= number_of_descriptors);
CALL_HEAP_FUNCTION(isolate(),
- DescriptorArray::Allocate(number_of_descriptors, slack),
+ DescriptorArray::Allocate(
+ isolate(), number_of_descriptors, slack),
DescriptorArray);
}
@@ -140,7 +141,8 @@ Handle<DeoptimizationInputData> Factory::NewDeoptimizationInputData(
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
CALL_HEAP_FUNCTION(isolate(),
- DeoptimizationInputData::Allocate(deopt_entry_count,
+ DeoptimizationInputData::Allocate(isolate(),
+ deopt_entry_count,
pretenure),
DeoptimizationInputData);
}
@@ -151,7 +153,8 @@ Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData(
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
CALL_HEAP_FUNCTION(isolate(),
- DeoptimizationOutputData::Allocate(deopt_entry_count,
+ DeoptimizationOutputData::Allocate(isolate(),
+ deopt_entry_count,
pretenure),
DeoptimizationOutputData);
}
@@ -664,7 +667,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
return result;
}
- if (V8::UseCrankshaft() &&
+ if (isolate()->use_crankshaft() &&
FLAG_always_opt &&
result->is_compiled() &&
!function_info->is_toplevel() &&
@@ -806,7 +809,7 @@ Handle<String> Factory::EmergencyNewError(const char* message,
*p++ = ' ';
space--;
if (space > 0) {
- MaybeObject* maybe_arg = args->GetElement(i);
+ MaybeObject* maybe_arg = args->GetElement(isolate(), i);
Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
const char* arg = *arg_str->ToCString();
Vector<char> v2(p, static_cast<int>(space));
@@ -1023,10 +1026,11 @@ Handle<GlobalObject> Factory::NewGlobalObject(
Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map,
- PretenureFlag pretenure) {
+ PretenureFlag pretenure,
+ bool alloc_props) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(*map, pretenure),
+ isolate()->heap()->AllocateJSObjectFromMap(*map, pretenure, alloc_props),
JSObject);
}
@@ -1079,13 +1083,6 @@ void Factory::SetContent(Handle<JSArray> array,
}
-void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- array->EnsureCanContainHeapObjectElements());
-}
-
-
void Factory::EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
uint32_t length,
@@ -1189,13 +1186,6 @@ void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
}
-void Factory::SetIdentityHash(Handle<JSObject> object, Smi* hash) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- object->SetIdentityHash(hash, ALLOW_CREATION));
-}
-
-
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
@@ -1215,6 +1205,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
shared->set_num_literals(literals_array_size);
if (is_generator) {
shared->set_instance_class_name(isolate()->heap()->Generator_string());
+ shared->DisableOptimization(kGenerator);
}
return shared;
}
@@ -1326,7 +1317,7 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
Handle<Object> Factory::ToObject(Handle<Object> object) {
- CALL_HEAP_FUNCTION(isolate(), object->ToObject(), Object);
+ CALL_HEAP_FUNCTION(isolate(), object->ToObject(isolate()), Object);
}
@@ -1391,8 +1382,10 @@ Handle<JSFunction> Factory::CreateApiFunction(
Smi::cast(instance_template->internal_field_count())->value();
}
+ // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
+ // JSObject::GetHeaderSize.
int instance_size = kPointerSize * internal_field_count;
- InstanceType type = INVALID_TYPE;
+ InstanceType type;
switch (instance_type) {
case JavaScriptObject:
type = JS_OBJECT_TYPE;
@@ -1407,9 +1400,10 @@ Handle<JSFunction> Factory::CreateApiFunction(
instance_size += JSGlobalProxy::kSize;
break;
default:
+ UNREACHABLE();
+ type = JS_OBJECT_TYPE; // Keep the compiler happy.
break;
}
- ASSERT(type != INVALID_TYPE);
Handle<JSFunction> result =
NewFunction(Factory::empty_string(),
@@ -1462,15 +1456,29 @@ Handle<JSFunction> Factory::CreateApiFunction(
result->shared()->set_construct_stub(*construct_stub);
result->shared()->DontAdaptArguments();
- // Recursively copy parent templates' accessors, 'data' may be modified.
+ // Recursively copy parent instance templates' accessors,
+ // 'data' may be modified.
int max_number_of_additional_properties = 0;
+ int max_number_of_static_properties = 0;
FunctionTemplateInfo* info = *obj;
while (true) {
- Object* props = info->property_accessors();
- if (!props->IsUndefined()) {
- Handle<Object> props_handle(props, isolate());
- NeanderArray props_array(props_handle);
- max_number_of_additional_properties += props_array.length();
+ if (!info->instance_template()->IsUndefined()) {
+ Object* props =
+ ObjectTemplateInfo::cast(
+ info->instance_template())->property_accessors();
+ if (!props->IsUndefined()) {
+ Handle<Object> props_handle(props, isolate());
+ NeanderArray props_array(props_handle);
+ max_number_of_additional_properties += props_array.length();
+ }
+ }
+ if (!info->property_accessors()->IsUndefined()) {
+ Object* props = info->property_accessors();
+ if (!props->IsUndefined()) {
+ Handle<Object> props_handle(props, isolate());
+ NeanderArray props_array(props_handle);
+ max_number_of_static_properties += props_array.length();
+ }
}
Object* parent = info->parent_template();
if (parent->IsUndefined()) break;
@@ -1479,17 +1487,44 @@ Handle<JSFunction> Factory::CreateApiFunction(
Map::EnsureDescriptorSlack(map, max_number_of_additional_properties);
+ // Use a temporary FixedArray to acculumate static accessors
+ int valid_descriptors = 0;
+ Handle<FixedArray> array;
+ if (max_number_of_static_properties > 0) {
+ array = NewFixedArray(max_number_of_static_properties);
+ }
+
while (true) {
- Handle<Object> props = Handle<Object>(obj->property_accessors(),
- isolate());
- if (!props->IsUndefined()) {
- Map::AppendCallbackDescriptors(map, props);
+ // Install instance descriptors
+ if (!obj->instance_template()->IsUndefined()) {
+ Handle<ObjectTemplateInfo> instance =
+ Handle<ObjectTemplateInfo>(
+ ObjectTemplateInfo::cast(obj->instance_template()), isolate());
+ Handle<Object> props = Handle<Object>(instance->property_accessors(),
+ isolate());
+ if (!props->IsUndefined()) {
+ Map::AppendCallbackDescriptors(map, props);
+ }
+ }
+ // Accumulate static accessors
+ if (!obj->property_accessors()->IsUndefined()) {
+ Handle<Object> props = Handle<Object>(obj->property_accessors(),
+ isolate());
+ valid_descriptors =
+ AccessorInfo::AppendUnique(props, array, valid_descriptors);
}
+ // Climb parent chain
Handle<Object> parent = Handle<Object>(obj->parent_template(), isolate());
if (parent->IsUndefined()) break;
obj = Handle<FunctionTemplateInfo>::cast(parent);
}
+ // Install accumulated static accessors
+ for (int i = 0; i < valid_descriptors; i++) {
+ Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
+ JSObject::SetAccessor(result, accessor);
+ }
+
ASSERT(result->shared()->IsApiFunction());
return result;
}
@@ -1588,7 +1623,8 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
// instance template.
Handle<Object> instance_template(desc->instance_template(), isolate());
if (!instance_template->IsUndefined()) {
- Execution::ConfigureInstance(instance,
+ Execution::ConfigureInstance(isolate(),
+ instance,
instance_template,
pending_exception);
} else {
diff --git a/chromium/v8/src/factory.h b/chromium/v8/src/factory.h
index dc7933aa20f..1bdf474337c 100644
--- a/chromium/v8/src/factory.h
+++ b/chromium/v8/src/factory.h
@@ -301,7 +301,11 @@ class Factory {
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
Handle<JSObject> NewJSObjectFromMap(Handle<Map> map,
- PretenureFlag pretenure = NOT_TENURED);
+ PretenureFlag pretenure = NOT_TENURED,
+ bool allocate_properties = true);
+
+ Handle<JSObject> NewJSObjectFromMapForDeoptimizer(
+ Handle<Map> map, PretenureFlag pretenure = NOT_TENURED);
// JS modules are pretenured.
Handle<JSModule> NewJSModule(Handle<Context> context,
@@ -324,7 +328,6 @@ class Factory {
void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
- void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
void EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
uint32_t length,
@@ -342,8 +345,6 @@ class Factory {
void BecomeJSObject(Handle<JSReceiver> object);
void BecomeJSFunction(Handle<JSReceiver> object);
- void SetIdentityHash(Handle<JSObject> object, Smi* hash);
-
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
@@ -638,6 +639,24 @@ class IdempotentPointerToHandleCodeTrampoline {
: reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
}
+ template<typename R, typename P1, typename P2, typename P3, typename P4,
+ typename P5, typename P6, typename P7>
+ MUST_USE_RESULT MaybeObject* CallWithReturnValue(
+ R (*function)(P1, P2, P3, P4, P5, P6, P7),
+ P1 p1,
+ P2 p2,
+ P3 p3,
+ P4 p4,
+ P5 p5,
+ P6 p6,
+ P7 p7) {
+ int collections = isolate_->heap()->gc_count();
+ Handle<Object> result = (*function)(p1, p2, p3, p4, p5, p6, p7);
+ return (collections == isolate_->heap()->gc_count())
+ ? *result
+ : reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
+ }
+
private:
Isolate* isolate_;
};
diff --git a/chromium/v8/src/flag-definitions.h b/chromium/v8/src/flag-definitions.h
index 5fc5d880b3e..08cd8304e4d 100644
--- a/chromium/v8/src/flag-definitions.h
+++ b/chromium/v8/src/flag-definitions.h
@@ -41,15 +41,12 @@
extern ctype FLAG_##nam;
#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
static ctype const FLAG_##nam = def;
-#define DEFINE_implication(whenflag, thenflag)
// We want to supply the actual storage and value for the flag variable in the
// .cc file. We only do this for writable flags.
#elif defined(FLAG_MODE_DEFINE)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
ctype FLAG_##nam = def;
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
// We need to define all of our default values so that the Flag structure can
// access them by pointer. These are just used internally inside of one .cc,
@@ -57,21 +54,18 @@
#elif defined(FLAG_MODE_DEFINE_DEFAULTS)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
static ctype const FLAGDEFAULT_##nam = def;
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
// We want to write entries into our meta data table, for internal parsing and
// printing / etc in the flag parser code. We only do this for writable flags.
#elif defined(FLAG_MODE_META)
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
{ Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false },
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
+#define FLAG_ALIAS(ftype, ctype, alias, nam) \
+ { Flag::TYPE_##ftype, #alias, &FLAG_##nam, &FLAGDEFAULT_##nam, \
+ "alias for --"#nam, false },
// We produce the code to set flags when it is implied by another flag.
#elif defined(FLAG_MODE_DEFINE_IMPLICATIONS)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt)
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
#define DEFINE_implication(whenflag, thenflag) \
if (FLAG_##whenflag) FLAG_##thenflag = true;
@@ -79,6 +73,24 @@
#error No mode supplied when including flags.defs
#endif
+// Dummy defines for modes where it is not relevant.
+#ifndef FLAG_FULL
+#define FLAG_FULL(ftype, ctype, nam, def, cmt)
+#endif
+
+#ifndef FLAG_READONLY
+#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
+#endif
+
+#ifndef FLAG_ALIAS
+#define FLAG_ALIAS(ftype, ctype, alias, nam)
+#endif
+
+#ifndef DEFINE_implication
+#define DEFINE_implication(whenflag, thenflag)
+#endif
+
+
#ifdef FLAG_MODE_DECLARE
// Structure used to hold a collection of arguments to the JavaScript code.
#define JSARGUMENTS_INIT {{}}
@@ -135,11 +147,18 @@ public:
# define ENABLE_32DREGS_DEFAULT false
#endif
-#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
-#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
-#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
+#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
+#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
+#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
#define DEFINE_string(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
-#define DEFINE_args(nam, def, cmt) FLAG(ARGS, JSArguments, nam, def, cmt)
+#define DEFINE_args(nam, def, cmt) FLAG(ARGS, JSArguments, nam, def, cmt)
+
+#define DEFINE_ALIAS_bool(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam)
+#define DEFINE_ALIAS_int(alias, nam) FLAG_ALIAS(INT, int, alias, nam)
+#define DEFINE_ALIAS_float(alias, nam) FLAG_ALIAS(FLOAT, double, alias, nam)
+#define DEFINE_ALIAS_string(alias, nam) \
+ FLAG_ALIAS(STRING, const char*, alias, nam)
+#define DEFINE_ALIAS_args(alias, nam) FLAG_ALIAS(ARGS, JSArguments, alias, nam)
//
// Flags in all modes.
@@ -164,9 +183,9 @@ DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
DEFINE_bool(harmony_observation, false,
"enable harmony object observation (implies harmony collections")
-DEFINE_bool(harmony_typed_arrays, false,
+DEFINE_bool(harmony_typed_arrays, true,
"enable harmony typed arrays")
-DEFINE_bool(harmony_array_buffer, false,
+DEFINE_bool(harmony_array_buffer, true,
"enable harmony array buffer")
DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
@@ -174,6 +193,7 @@ DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
DEFINE_bool(harmony_numeric_literals, false,
"enable harmony numeric literals (0o77, 0b11)")
DEFINE_bool(harmony_strings, false, "enable harmony string")
+DEFINE_bool(harmony_arrays, false, "enable harmony arrays")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
@@ -185,6 +205,7 @@ DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony, harmony_iteration)
DEFINE_implication(harmony, harmony_numeric_literals)
DEFINE_implication(harmony, harmony_strings)
+DEFINE_implication(harmony, harmony_arrays)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
// TODO[dslomov] add harmony => harmony_typed_arrays
@@ -192,8 +213,6 @@ DEFINE_implication(harmony_observation, harmony_collections)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
-DEFINE_bool(compiled_keyed_stores, true, "use optimizing compiler to "
- "generate keyed store stubs")
DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
@@ -237,6 +256,7 @@ DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
"crankshaft harvests type feedback from stub cache")
DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
+DEFINE_string(trace_hydrogen_filter, "*", "hydrogen tracing filter")
DEFINE_bool(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs")
DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases")
@@ -262,10 +282,11 @@ DEFINE_int(deopt_every_n_garbage_collections,
"deoptimize every n garbage collections")
DEFINE_bool(print_deopt_stress, false, "print number of possible deopt points")
DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
+DEFINE_bool(trap_on_stub_deopt, false,
+ "put a break point before deoptimizing a stub")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
-DEFINE_bool(idefs, false, "use informative definitions")
DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
DEFINE_bool(array_bounds_checks_hoisting, false,
@@ -302,16 +323,17 @@ DEFINE_bool(opt_safe_uint32_operations, true,
"allow uint32 values on optimize frames if they are used only in "
"safe operations")
-DEFINE_bool(parallel_recompilation, true,
+DEFINE_bool(concurrent_recompilation, true,
"optimizing hot functions asynchronously on a separate thread")
-DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
-DEFINE_int(parallel_recompilation_queue_length, 8,
- "the length of the parallel compilation queue")
-DEFINE_int(parallel_recompilation_delay, 0,
+DEFINE_bool(trace_concurrent_recompilation, false,
+ "track concurrent recompilation")
+DEFINE_int(concurrent_recompilation_queue_length, 8,
+ "the length of the concurrent compilation queue")
+DEFINE_int(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
-DEFINE_bool(omit_prototype_checks_for_leaf_maps, true,
- "do not emit prototype checks if all prototypes have leaf maps, "
- "deoptimize the optimized code if the layout of the maps changes.")
+DEFINE_bool(concurrent_osr, false,
+ "concurrent on-stack replacement")
+
DEFINE_bool(omit_map_checks_for_leaf_maps, true,
"do not emit check maps for constant values that have a leaf map, "
"deoptimize the optimized code if the layout of the maps changes.")
@@ -358,8 +380,6 @@ DEFINE_bool(enable_sse4_1, true,
"enable use of SSE4.1 instructions if available")
DEFINE_bool(enable_cmov, true,
"enable use of CMOV instruction if available")
-DEFINE_bool(enable_rdtsc, true,
- "enable use of RDTSC instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT,
@@ -381,7 +401,6 @@ DEFINE_bool(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
// bootstrapper.cc
-DEFINE_bool(enable_i18n, true, "enable i18n extension")
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_bool(expose_gc, false, "expose gc extension")
@@ -436,6 +455,10 @@ DEFINE_bool(compilation_cache, true, "enable compilation cache")
DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
+// cpu-profiler.cc
+DEFINE_int(cpu_profiler_sampling_interval, 1000,
+ "CPU profiler sampling interval in microseconds")
+
// debug.cc
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(trace_js_array_abuse, false,
@@ -521,6 +544,7 @@ DEFINE_bool(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
+DEFINE_bool(js_accessor_ics, false, "create ics for js accessors")
// macro-assembler-ia32.cc
DEFINE_bool(native_code_counters, false,
@@ -590,7 +614,7 @@ DEFINE_int(testing_int_flag, 13, "testing_int_flag")
DEFINE_float(testing_float_flag, 2.5, "float-flag")
DEFINE_string(testing_string_flag, "Hello, world!", "string-flag")
DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness")
-#ifdef WIN32
+#ifdef _WIN32
DEFINE_string(testing_serialization_file, "C:\\Windows\\Temp\\serdes",
"file in which to testing_serialize heap")
#else
@@ -665,13 +689,14 @@ DEFINE_bool(stress_compaction, false,
DEFINE_bool(enable_slow_asserts, false,
"enable asserts that are slow to execute")
-// codegen-ia32.cc / codegen-arm.cc
+// codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
DEFINE_bool(print_source, false, "pretty print source code")
DEFINE_bool(print_builtin_source, false,
"pretty print source code for builtins")
DEFINE_bool(print_ast, false, "print source AST")
DEFINE_bool(print_builtin_ast, false, "print source AST for builtins")
DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
+DEFINE_bool(trap_on_abort, false, "replace aborts by breakpoints")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
@@ -744,9 +769,6 @@ DEFINE_bool(log_snapshot_positions, false,
DEFINE_bool(log_suspect, false, "Log suspect operations.")
DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).")
-DEFINE_bool(prof_lazy, false,
- "Used with --prof, only does sampling and logging"
- " when profiler is active.")
DEFINE_bool(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_bool(log_regexp, false, "Log regular expression execution.")
@@ -809,11 +831,19 @@ DEFINE_implication(print_all_code, trace_codegen)
#undef FLAG_FULL
#undef FLAG_READONLY
#undef FLAG
+#undef FLAG_ALIAS
#undef DEFINE_bool
#undef DEFINE_int
#undef DEFINE_string
+#undef DEFINE_float
+#undef DEFINE_args
#undef DEFINE_implication
+#undef DEFINE_ALIAS_bool
+#undef DEFINE_ALIAS_int
+#undef DEFINE_ALIAS_string
+#undef DEFINE_ALIAS_float
+#undef DEFINE_ALIAS_args
#undef FLAG_MODE_DECLARE
#undef FLAG_MODE_DEFINE
diff --git a/chromium/v8/src/flags.cc b/chromium/v8/src/flags.cc
index 855e20712c6..4e18cc8c808 100644
--- a/chromium/v8/src/flags.cc
+++ b/chromium/v8/src/flags.cc
@@ -268,6 +268,11 @@ List<const char*>* FlagList::argv() {
}
+inline char NormalizeChar(char ch) {
+ return ch == '_' ? '-' : ch;
+}
+
+
// Helper function to parse flags: Takes an argument arg and splits it into
// a flag name and flag value (or NULL if they are missing). is_bool is set
// if the arg started with "-no" or "--no". The buffer may be used to NUL-
@@ -295,6 +300,7 @@ static void SplitArgument(const char* arg,
}
if (arg[0] == 'n' && arg[1] == 'o') {
arg += 2; // remove "no"
+ if (NormalizeChar(arg[0]) == '-') arg++; // remove dash after "no".
*is_bool = true;
}
*name = arg;
@@ -318,11 +324,6 @@ static void SplitArgument(const char* arg,
}
-inline char NormalizeChar(char ch) {
- return ch == '_' ? '-' : ch;
-}
-
-
static bool EqualNames(const char* a, const char* b) {
for (int i = 0; NormalizeChar(a[i]) == NormalizeChar(b[i]); i++) {
if (a[i] == '\0') {
diff --git a/chromium/v8/src/flags.h b/chromium/v8/src/flags.h
index f0b239b6f20..fe182e5221c 100644
--- a/chromium/v8/src/flags.h
+++ b/chromium/v8/src/flags.h
@@ -24,9 +24,12 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
#ifndef V8_FLAGS_H_
#define V8_FLAGS_H_
+#include "atomicops.h"
+
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/frames.cc b/chromium/v8/src/frames.cc
index c17a9d5f82e..167277f7996 100644
--- a/chromium/v8/src/frames.cc
+++ b/chromium/v8/src/frames.cc
@@ -489,7 +489,7 @@ Address StackFrame::UnpaddedFP() const {
Code* EntryFrame::unchecked_code() const {
- return HEAP->js_entry_code();
+ return isolate()->heap()->js_entry_code();
}
@@ -512,7 +512,7 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
Code* EntryConstructFrame::unchecked_code() const {
- return HEAP->js_construct_entry_code();
+ return isolate()->heap()->js_construct_entry_code();
}
@@ -814,8 +814,7 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
PrintF("+%d", code_offset);
SharedFunctionInfo* shared = fun->shared();
if (print_line_number) {
- Code* code = Code::cast(
- v8::internal::Isolate::Current()->FindCodeObject(pc));
+ Code* code = Code::cast(isolate->FindCodeObject(pc));
int source_pos = code->SourcePosition(pc);
Object* maybe_script = shared->script();
if (maybe_script->IsScript()) {
diff --git a/chromium/v8/src/full-codegen.cc b/chromium/v8/src/full-codegen.cc
index 6d802e965d2..91a51731a58 100644
--- a/chromium/v8/src/full-codegen.cc
+++ b/chromium/v8/src/full-codegen.cc
@@ -333,7 +333,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable() &&
- !info->function()->flags()->Contains(kDontOptimize) &&
+ !info->function()->dont_optimize() &&
info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
@@ -350,21 +350,17 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_back_edge_table_offset(table_offset);
code->set_back_edges_patched_for_osr(false);
CodeGenerator::PrintCode(code, info);
- info->SetCode(code); // May be an empty handle.
+ info->SetCode(code);
#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit && !code.is_null()) {
+ if (FLAG_gdbjit) {
GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo();
-
GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
}
#endif
- if (!code.is_null()) {
- void* line_info =
- masm.positions_recorder()->DetachJITHandlerData();
- LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(*code, line_info));
- }
- return !code.is_null();
+ void* line_info = masm.positions_recorder()->DetachJITHandlerData();
+ LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(*code, line_info));
+ return true;
}
@@ -419,7 +415,7 @@ void FullCodeGenerator::Initialize() {
!Snapshot::HaveASnapshotToStartFrom();
masm_->set_emit_debug_code(generate_debug_code_);
masm_->set_predictable_code_size(true);
- InitializeAstVisitor();
+ InitializeAstVisitor(info_->isolate());
}
@@ -512,7 +508,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
- __ push(reg);
+ __ Push(reg);
}
@@ -530,7 +526,7 @@ void FullCodeGenerator::EffectContext::PlugTOS() const {
void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
- __ pop(result_register());
+ __ Pop(result_register());
}
@@ -540,7 +536,7 @@ void FullCodeGenerator::StackValueContext::PlugTOS() const {
void FullCodeGenerator::TestContext::PlugTOS() const {
// For simplicity we always test the accumulator register.
- __ pop(result_register());
+ __ Pop(result_register());
codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -834,7 +830,7 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
} else {
// Check if the statement will be breakable without adding a debug break
// slot.
- BreakableStatementChecker checker;
+ BreakableStatementChecker checker(isolate());
checker.Check(stmt);
// Record the statement position right here if the statement is not
// breakable. For breakable statements the actual recording of the
@@ -860,7 +856,7 @@ void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
} else {
// Check if the expression will be breakable without adding a debug break
// slot.
- BreakableStatementChecker checker;
+ BreakableStatementChecker checker(isolate());
checker.Check(expr);
// Record a statement position right here if the expression is not
// breakable. For breakable expressions the actual recording of the
@@ -1006,7 +1002,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
VisitForAccumulatorValue(left);
// We want the value in the accumulator for the test, and on the stack in
// case we need it.
- __ push(result_register());
+ __ Push(result_register());
Label discard, restore;
if (is_logical_and) {
DoTest(left, &discard, &restore, &restore);
@@ -1014,7 +1010,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
DoTest(left, &restore, &discard, &restore);
}
__ bind(&restore);
- __ pop(result_register());
+ __ Pop(result_register());
__ jmp(&done);
__ bind(&discard);
__ Drop(1);
@@ -1024,7 +1020,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
VisitForAccumulatorValue(left);
// We want the value in the accumulator for the test, and on the stack in
// case we need it.
- __ push(result_register());
+ __ Push(result_register());
Label discard;
if (is_logical_and) {
DoTest(left, &discard, &done, &discard);
@@ -1416,7 +1412,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// Extend the context before executing the catch block.
{ Comment cmnt(masm_, "[ Extend catch context");
__ Push(stmt->variable()->name());
- __ push(result_register());
+ __ Push(result_register());
PushFunctionArgumentForContextAllocation();
__ CallRuntime(Runtime::kPushCatchContext, 3);
StoreToFrameField(StandardFrameConstants::kContextOffset,
@@ -1481,7 +1477,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// preserved by the finally block. Call the finally block and then
// rethrow the exception if it returns.
__ Call(&finally_entry);
- __ push(result_register());
+ __ Push(result_register());
__ CallRuntime(Runtime::kReThrow, 1);
// Finally block implementation.
diff --git a/chromium/v8/src/full-codegen.h b/chromium/v8/src/full-codegen.h
index a9db54e32c1..5580cb3e86c 100644
--- a/chromium/v8/src/full-codegen.h
+++ b/chromium/v8/src/full-codegen.h
@@ -31,11 +31,14 @@
#include "v8.h"
#include "allocation.h"
+#include "assert-scope.h"
#include "ast.h"
#include "code-stubs.h"
#include "codegen.h"
#include "compiler.h"
#include "data-flow.h"
+#include "globals.h"
+#include "objects.h"
namespace v8 {
namespace internal {
@@ -49,8 +52,8 @@ class JumpPatchSite;
// debugger to piggybag on.
class BreakableStatementChecker: public AstVisitor {
public:
- BreakableStatementChecker() : is_breakable_(false) {
- InitializeAstVisitor();
+ explicit BreakableStatementChecker(Isolate* isolate) : is_breakable_(false) {
+ InitializeAstVisitor(isolate);
}
void Check(Statement* stmt);
@@ -136,7 +139,64 @@ class FullCodeGenerator: public AstVisitor {
#error Unsupported target architecture.
#endif
- static const int kBackEdgeEntrySize = 3 * kIntSize;
+ class BackEdgeTableIterator {
+ public:
+ explicit BackEdgeTableIterator(Code* unoptimized,
+ DisallowHeapAllocation* required) {
+ ASSERT(unoptimized->kind() == Code::FUNCTION);
+ instruction_start_ = unoptimized->instruction_start();
+ cursor_ = instruction_start_ + unoptimized->back_edge_table_offset();
+ ASSERT(cursor_ < instruction_start_ + unoptimized->instruction_size());
+ table_length_ = Memory::uint32_at(cursor_);
+ cursor_ += kTableLengthSize;
+ end_ = cursor_ + table_length_ * kEntrySize;
+ }
+
+ bool Done() { return cursor_ >= end_; }
+
+ void Next() {
+ ASSERT(!Done());
+ cursor_ += kEntrySize;
+ }
+
+ BailoutId ast_id() {
+ ASSERT(!Done());
+ return BailoutId(static_cast<int>(
+ Memory::uint32_at(cursor_ + kAstIdOffset)));
+ }
+
+ uint32_t loop_depth() {
+ ASSERT(!Done());
+ return Memory::uint32_at(cursor_ + kLoopDepthOffset);
+ }
+
+ uint32_t pc_offset() {
+ ASSERT(!Done());
+ return Memory::uint32_at(cursor_ + kPcOffsetOffset);
+ }
+
+ Address pc() {
+ ASSERT(!Done());
+ return instruction_start_ + pc_offset();
+ }
+
+ uint32_t table_length() { return table_length_; }
+
+ private:
+ static const int kTableLengthSize = kIntSize;
+ static const int kAstIdOffset = 0 * kIntSize;
+ static const int kPcOffsetOffset = 1 * kIntSize;
+ static const int kLoopDepthOffset = 2 * kIntSize;
+ static const int kEntrySize = 3 * kIntSize;
+
+ Address cursor_;
+ Address end_;
+ Address instruction_start_;
+ uint32_t table_length_;
+
+ DISALLOW_COPY_AND_ASSIGN(BackEdgeTableIterator);
+ };
+
private:
class Breakable;
@@ -625,8 +685,6 @@ class FullCodeGenerator: public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- void EmitUnaryOperation(UnaryOperation* expr, const char* comment);
-
void VisitComma(BinaryOperation* expr);
void VisitLogicalExpression(BinaryOperation* expr);
void VisitArithmeticExpression(BinaryOperation* expr);
diff --git a/chromium/v8/src/gdb-jit.cc b/chromium/v8/src/gdb-jit.cc
index 74db807fb35..21cfd223349 100644
--- a/chromium/v8/src/gdb-jit.cc
+++ b/chromium/v8/src/gdb-jit.cc
@@ -1872,7 +1872,7 @@ static void DestroyCodeEntry(JITCodeEntry* entry) {
static void RegisterCodeEntry(JITCodeEntry* entry,
bool dump_if_enabled,
const char* name_hint) {
-#if defined(DEBUG) && !defined(WIN32)
+#if defined(DEBUG) && !V8_OS_WIN
static int file_num = 0;
if (FLAG_gdbjit_dump && dump_if_enabled) {
static const int kMaxFileNameSize = 64;
@@ -2063,7 +2063,7 @@ void GDBJITInterface::AddCode(const char* name,
CompilationInfo* info) {
if (!FLAG_gdbjit) return;
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
DisallowHeapAllocation no_gc;
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
@@ -2149,7 +2149,7 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) {
void GDBJITInterface::RemoveCode(Code* code) {
if (!FLAG_gdbjit) return;
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
HashMap::Entry* e = GetEntries()->Lookup(code,
HashForCodeObject(code),
false);
@@ -2187,7 +2187,7 @@ void GDBJITInterface::RemoveCodeRange(Address start, Address end) {
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
ASSERT(!IsLineInfoTagged(line_info));
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
ASSERT(e->value == NULL);
diff --git a/chromium/v8/src/global-handles.cc b/chromium/v8/src/global-handles.cc
index 41771e6db41..1a98e49ff37 100644
--- a/chromium/v8/src/global-handles.cc
+++ b/chromium/v8/src/global-handles.cc
@@ -90,7 +90,7 @@ class GlobalHandles::Node {
set_partially_dependent(false);
set_in_new_space_list(false);
parameter_or_next_free_.next_free = NULL;
- weak_reference_callback_ = NULL;
+ weak_callback_ = NULL;
}
#endif
@@ -111,21 +111,19 @@ class GlobalHandles::Node {
set_partially_dependent(false);
set_state(NORMAL);
parameter_or_next_free_.parameter = NULL;
- weak_reference_callback_ = NULL;
+ weak_callback_ = NULL;
IncreaseBlockUses();
}
void Release() {
ASSERT(state() != FREE);
set_state(FREE);
-#ifdef ENABLE_EXTRA_CHECKS
// Zap the values for eager trapping.
object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false);
set_partially_dependent(false);
- weak_reference_callback_ = NULL;
-#endif
+ weak_callback_ = NULL;
DecreaseBlockUses();
}
@@ -171,6 +169,13 @@ class GlobalHandles::Node {
flags_ = IsInNewSpaceList::update(flags_, v);
}
+ bool is_revivable_callback() {
+ return IsRevivableCallback::decode(flags_);
+ }
+ void set_revivable_callback(bool v) {
+ flags_ = IsRevivableCallback::update(flags_, v);
+ }
+
bool IsNearDeath() const {
// Check for PENDING to ensure correct answer when processing callbacks.
return state() == PENDING || state() == NEAR_DEATH;
@@ -230,11 +235,20 @@ class GlobalHandles::Node {
}
void MakeWeak(void* parameter,
- RevivableCallback weak_reference_callback) {
+ WeakCallback weak_callback,
+ RevivableCallback revivable_callback) {
+ ASSERT((weak_callback == NULL) != (revivable_callback == NULL));
ASSERT(state() != FREE);
set_state(WEAK);
set_parameter(parameter);
- weak_reference_callback_ = weak_reference_callback;
+ if (weak_callback != NULL) {
+ weak_callback_ = weak_callback;
+ set_revivable_callback(false);
+ } else {
+ weak_callback_ =
+ reinterpret_cast<WeakCallback>(revivable_callback);
+ set_revivable_callback(true);
+ }
}
void ClearWeakness() {
@@ -245,7 +259,7 @@ class GlobalHandles::Node {
bool PostGarbageCollectionProcessing(Isolate* isolate) {
if (state() != Node::PENDING) return false;
- if (weak_reference_callback_ == NULL) {
+ if (weak_callback_ == NULL) {
Release();
return false;
}
@@ -264,9 +278,20 @@ class GlobalHandles::Node {
// Leaving V8.
VMState<EXTERNAL> state(isolate);
HandleScope handle_scope(isolate);
- weak_reference_callback_(reinterpret_cast<v8::Isolate*>(isolate),
- reinterpret_cast<Persistent<Value>*>(&object),
- par);
+ if (is_revivable_callback()) {
+ RevivableCallback revivable =
+ reinterpret_cast<RevivableCallback>(weak_callback_);
+ revivable(reinterpret_cast<v8::Isolate*>(isolate),
+ reinterpret_cast<Persistent<Value>*>(&object),
+ par);
+ } else {
+ Handle<Object> handle(*object, isolate);
+ v8::WeakCallbackData<v8::Value, void> data(
+ reinterpret_cast<v8::Isolate*>(isolate),
+ v8::Utils::ToLocal(handle),
+ par);
+ weak_callback_(data);
+ }
}
// Absence of explicit cleanup or revival of weak handle
// in most of the cases would lead to memory leak.
@@ -274,9 +299,10 @@ class GlobalHandles::Node {
return true;
}
+ inline GlobalHandles* GetGlobalHandles();
+
private:
inline NodeBlock* FindBlock();
- inline GlobalHandles* GetGlobalHandles();
inline void IncreaseBlockUses();
inline void DecreaseBlockUses();
@@ -299,11 +325,12 @@ class GlobalHandles::Node {
class IsIndependent: public BitField<bool, 4, 1> {};
class IsPartiallyDependent: public BitField<bool, 5, 1> {};
class IsInNewSpaceList: public BitField<bool, 6, 1> {};
+ class IsRevivableCallback: public BitField<bool, 7, 1> {};
uint8_t flags_;
// Handle specific callback - might be a weak reference in disguise.
- RevivableCallback weak_reference_callback_;
+ WeakCallback weak_callback_;
// Provided data for callback. In FREE state, this is used for
// the free list link.
@@ -482,6 +509,12 @@ Handle<Object> GlobalHandles::Create(Object* value) {
}
+Handle<Object> GlobalHandles::CopyGlobal(Object** location) {
+ ASSERT(location != NULL);
+ return Node::FromLocation(location)->GetGlobalHandles()->Create(*location);
+}
+
+
void GlobalHandles::Destroy(Object** location) {
if (location != NULL) Node::FromLocation(location)->Release();
}
@@ -489,9 +522,10 @@ void GlobalHandles::Destroy(Object** location) {
void GlobalHandles::MakeWeak(Object** location,
void* parameter,
- RevivableCallback weak_reference_callback) {
- ASSERT(weak_reference_callback != NULL);
- Node::FromLocation(location)->MakeWeak(parameter, weak_reference_callback);
+ WeakCallback weak_callback,
+ RevivableCallback revivable_callback) {
+ Node::FromLocation(location)->MakeWeak(
+ parameter, weak_callback, revivable_callback);
}
@@ -1019,4 +1053,68 @@ void GlobalHandles::ComputeObjectGroupsAndImplicitReferences() {
}
+EternalHandles::EternalHandles() : size_(0) {
+ for (unsigned i = 0; i < ARRAY_SIZE(singleton_handles_); i++) {
+ singleton_handles_[i] = kInvalidIndex;
+ }
+}
+
+
+EternalHandles::~EternalHandles() {
+ for (int i = 0; i < blocks_.length(); i++) delete[] blocks_[i];
+}
+
+
+void EternalHandles::IterateAllRoots(ObjectVisitor* visitor) {
+ int limit = size_;
+ for (int i = 0; i < blocks_.length(); i++) {
+ ASSERT(limit > 0);
+ Object** block = blocks_[i];
+ visitor->VisitPointers(block, block + Min(limit, kSize));
+ limit -= kSize;
+ }
+}
+
+
+void EternalHandles::IterateNewSpaceRoots(ObjectVisitor* visitor) {
+ for (int i = 0; i < new_space_indices_.length(); i++) {
+ visitor->VisitPointer(GetLocation(new_space_indices_[i]));
+ }
+}
+
+
+void EternalHandles::PostGarbageCollectionProcessing(Heap* heap) {
+ int last = 0;
+ for (int i = 0; i < new_space_indices_.length(); i++) {
+ int index = new_space_indices_[i];
+ if (heap->InNewSpace(*GetLocation(index))) {
+ new_space_indices_[last++] = index;
+ }
+ }
+ new_space_indices_.Rewind(last);
+}
+
+
+void EternalHandles::Create(Isolate* isolate, Object* object, int* index) {
+ ASSERT_EQ(kInvalidIndex, *index);
+ if (object == NULL) return;
+ ASSERT_NE(isolate->heap()->the_hole_value(), object);
+ int block = size_ >> kShift;
+ int offset = size_ & kMask;
+ // need to resize
+ if (offset == 0) {
+ Object** next_block = new Object*[kSize];
+ Object* the_hole = isolate->heap()->the_hole_value();
+ MemsetPointer(next_block, the_hole, kSize);
+ blocks_.Add(next_block);
+ }
+ ASSERT_EQ(isolate->heap()->the_hole_value(), blocks_[block][offset]);
+ blocks_[block][offset] = object;
+ if (isolate->heap()->InNewSpace(object)) {
+ new_space_indices_.Add(size_);
+ }
+ *index = size_++;
+}
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/global-handles.h b/chromium/v8/src/global-handles.h
index cd75133a243..4b46aac05d0 100644
--- a/chromium/v8/src/global-handles.h
+++ b/chromium/v8/src/global-handles.h
@@ -31,6 +31,7 @@
#include "../include/v8.h"
#include "../include/v8-profiler.h"
+#include "handles.h"
#include "list.h"
#include "v8utils.h"
@@ -127,9 +128,13 @@ class GlobalHandles {
// Creates a new global handle that is alive until Destroy is called.
Handle<Object> Create(Object* value);
+ // Copy a global handle
+ static Handle<Object> CopyGlobal(Object** location);
+
// Destroy a global handle.
static void Destroy(Object** location);
+ typedef WeakCallbackData<v8::Value, void>::Callback WeakCallback;
typedef WeakReferenceCallbacks<v8::Value, void>::Revivable RevivableCallback;
// Make the global handle weak and set the callback parameter for the
@@ -140,7 +145,14 @@ class GlobalHandles {
// reason is that Smi::FromInt(0) does not change during garage collection.
static void MakeWeak(Object** location,
void* parameter,
- RevivableCallback weak_reference_callback);
+ WeakCallback weak_callback,
+ RevivableCallback revivable_callback);
+
+ static inline void MakeWeak(Object** location,
+ void* parameter,
+ RevivableCallback revivable_callback) {
+ MakeWeak(location, parameter, NULL, revivable_callback);
+ }
void RecordStats(HeapStats* stats);
@@ -331,6 +343,75 @@ class GlobalHandles {
};
+class EternalHandles {
+ public:
+ enum SingletonHandle {
+ I18N_TEMPLATE_ONE,
+ I18N_TEMPLATE_TWO,
+
+ NUMBER_OF_SINGLETON_HANDLES
+ };
+
+ EternalHandles();
+ ~EternalHandles();
+
+ int NumberOfHandles() { return size_; }
+
+ // Create an EternalHandle, overwriting the index.
+ void Create(Isolate* isolate, Object* object, int* index);
+
+ // Grab the handle for an existing EternalHandle.
+ inline Handle<Object> Get(int index) {
+ return Handle<Object>(GetLocation(index));
+ }
+
+ // Grab the handle for an existing SingletonHandle.
+ inline Handle<Object> GetSingleton(SingletonHandle singleton) {
+ ASSERT(Exists(singleton));
+ return Get(singleton_handles_[singleton]);
+ }
+
+ // Checks whether a SingletonHandle has been assigned.
+ inline bool Exists(SingletonHandle singleton) {
+ return singleton_handles_[singleton] != kInvalidIndex;
+ }
+
+ // Assign a SingletonHandle to an empty slot and returns the handle.
+ Handle<Object> CreateSingleton(Isolate* isolate,
+ Object* object,
+ SingletonHandle singleton) {
+ Create(isolate, object, &singleton_handles_[singleton]);
+ return Get(singleton_handles_[singleton]);
+ }
+
+ // Iterates over all handles.
+ void IterateAllRoots(ObjectVisitor* visitor);
+ // Iterates over all handles which might be in new space.
+ void IterateNewSpaceRoots(ObjectVisitor* visitor);
+ // Rebuilds new space list.
+ void PostGarbageCollectionProcessing(Heap* heap);
+
+ private:
+ static const int kInvalidIndex = -1;
+ static const int kShift = 8;
+ static const int kSize = 1 << kShift;
+ static const int kMask = 0xff;
+
+ // Gets the slot for an index
+ inline Object** GetLocation(int index) {
+ ASSERT(index >= 0 && index < size_);
+ return &blocks_[index >> kShift][index & kMask];
+ }
+
+ int size_;
+ List<Object**> blocks_;
+ List<int> new_space_indices_;
+ int singleton_handles_[NUMBER_OF_SINGLETON_HANDLES];
+
+ DISALLOW_COPY_AND_ASSIGN(EternalHandles);
+};
+
+
} } // namespace v8::internal
#endif // V8_GLOBAL_HANDLES_H_
diff --git a/chromium/v8/src/globals.h b/chromium/v8/src/globals.h
index 26fd53114c6..1977e68c82e 100644
--- a/chromium/v8/src/globals.h
+++ b/chromium/v8/src/globals.h
@@ -28,37 +28,22 @@
#ifndef V8_GLOBALS_H_
#define V8_GLOBALS_H_
-// Define V8_INFINITY
-#define V8_INFINITY INFINITY
-
-// GCC specific stuff
-#ifdef __GNUC__
-
-#define __GNUC_VERSION_FOR_INFTY__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
+#include "../include/v8stdint.h"
// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
// warning flag and certain versions of GCC due to a bug:
// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
// For now, we use the more involved template-based version from <limits>, but
// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
-// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
-#if __GNUC_VERSION_FOR_INFTY__ >= 29600 && __GNUC_VERSION_FOR_INFTY__ < 40100
-#include <limits>
-#undef V8_INFINITY
-#define V8_INFINITY std::numeric_limits<double>::infinity()
-#endif
-#undef __GNUC_VERSION_FOR_INFTY__
-
-#endif // __GNUC__
-
-#ifdef _MSC_VER
-#undef V8_INFINITY
-#define V8_INFINITY HUGE_VAL
+#if V8_CC_GNU && V8_GNUC_PREREQ(2, 96, 0) && !V8_GNUC_PREREQ(4, 1, 0)
+# include <limits> // NOLINT
+# define V8_INFINITY std::numeric_limits<double>::infinity()
+#elif V8_CC_MSVC
+# define V8_INFINITY HUGE_VAL
+#else
+# define V8_INFINITY INFINITY
#endif
-
-#include "../include/v8stdint.h"
-
namespace v8 {
namespace internal {
@@ -186,27 +171,32 @@ typedef byte* Address;
// Define our own macros for writing 64-bit constants. This is less fragile
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
// works on compilers that don't have it (like MSVC).
-#if V8_HOST_ARCH_64_BIT
-#if defined(_MSC_VER)
-#define V8_UINT64_C(x) (x ## UI64)
-#define V8_INT64_C(x) (x ## I64)
-#define V8_INTPTR_C(x) (x ## I64)
-#define V8_PTR_PREFIX "ll"
-#elif defined(__MINGW64__)
-#define V8_UINT64_C(x) (x ## ULL)
-#define V8_INT64_C(x) (x ## LL)
-#define V8_INTPTR_C(x) (x ## LL)
-#define V8_PTR_PREFIX "I64"
+#if V8_CC_MSVC
+# define V8_UINT64_C(x) (x ## UI64)
+# define V8_INT64_C(x) (x ## I64)
+# if V8_HOST_ARCH_64_BIT
+# define V8_INTPTR_C(x) (x ## I64)
+# define V8_PTR_PREFIX "ll"
+# else
+# define V8_INTPTR_C(x) (x)
+# define V8_PTR_PREFIX ""
+# endif // V8_HOST_ARCH_64_BIT
+#elif V8_CC_MINGW64
+# define V8_UINT64_C(x) (x ## ULL)
+# define V8_INT64_C(x) (x ## LL)
+# define V8_INTPTR_C(x) (x ## LL)
+# define V8_PTR_PREFIX "I64"
+#elif V8_HOST_ARCH_64_BIT
+# define V8_UINT64_C(x) (x ## UL)
+# define V8_INT64_C(x) (x ## L)
+# define V8_INTPTR_C(x) (x ## L)
+# define V8_PTR_PREFIX "l"
#else
-#define V8_UINT64_C(x) (x ## UL)
-#define V8_INT64_C(x) (x ## L)
-#define V8_INTPTR_C(x) (x ## L)
-#define V8_PTR_PREFIX "l"
+# define V8_UINT64_C(x) (x ## ULL)
+# define V8_INT64_C(x) (x ## LL)
+# define V8_INTPTR_C(x) (x)
+# define V8_PTR_PREFIX ""
#endif
-#else // V8_HOST_ARCH_64_BIT
-#define V8_INTPTR_C(x) (x)
-#define V8_PTR_PREFIX ""
-#endif // V8_HOST_ARCH_64_BIT
// The following macro works on both 32 and 64-bit platforms.
// Usage: instead of writing 0x1234567890123456
@@ -292,6 +282,10 @@ const int kOneByteSize = kCharSize;
const int kUC16Size = sizeof(uc16); // NOLINT
+// Round up n to be a multiple of sz, where sz is a power of 2.
+#define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1))
+
+
// The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This
// corresponds to 'offsetof' (in stddef.h), except that it doesn't
@@ -330,18 +324,11 @@ F FUNCTION_CAST(Address addr) {
}
-#if __cplusplus >= 201103L
-#define DISALLOW_BY_DELETE = delete
-#else
-#define DISALLOW_BY_DELETE
-#endif
-
-
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&) DISALLOW_BY_DELETE; \
- void operator=(const TypeName&) DISALLOW_BY_DELETE
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&) V8_DELETE; \
+ void operator=(const TypeName&) V8_DELETE
// A macro to disallow all the implicit constructors, namely the
@@ -350,36 +337,18 @@ F FUNCTION_CAST(Address addr) {
// This should be used in the private: declarations for a class
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
-#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName() DISALLOW_BY_DELETE; \
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName() V8_DELETE; \
DISALLOW_COPY_AND_ASSIGN(TypeName)
-// Define used for helping GCC to make better inlining. Don't bother for debug
-// builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
-// errors in debug build.
-#if defined(__GNUC__) && !defined(DEBUG)
-#if (__GNUC__ >= 4)
-#define INLINE(header) inline header __attribute__((always_inline))
-#define NO_INLINE(header) header __attribute__((noinline))
-#else
-#define INLINE(header) inline __attribute__((always_inline)) header
-#define NO_INLINE(header) __attribute__((noinline)) header
-#endif
-#elif defined(_MSC_VER) && !defined(DEBUG)
-#define INLINE(header) __forceinline header
-#define NO_INLINE(header) header
-#else
-#define INLINE(header) inline header
-#define NO_INLINE(header) header
-#endif
+// Newly written code should use V8_INLINE and V8_NOINLINE directly.
+#define INLINE(declarator) V8_INLINE declarator
+#define NO_INLINE(declarator) V8_NOINLINE declarator
-#if defined(__GNUC__) && __GNUC__ >= 4
-#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
-#else
-#define MUST_USE_RESULT
-#endif
+// Newly written code should use V8_WARN_UNUSED_RESULT.
+#define MUST_USE_RESULT V8_WARN_UNUSED_RESULT
// Define DISABLE_ASAN macros.
@@ -424,18 +393,6 @@ enum LanguageMode {
};
-// A simple Maybe type, that can be passed by value.
-template<class T>
-struct Maybe {
- Maybe() : has_value(false) {}
- explicit Maybe(T t) : has_value(true), value(t) {}
- Maybe(bool has, T t) : has_value(has), value(t) {}
-
- bool has_value;
- T value;
-};
-
-
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
//
// This flag is used in the backend to represent the language mode. So far
diff --git a/chromium/v8/src/handles-inl.h b/chromium/v8/src/handles-inl.h
index 4f4490b75bf..5b879d8f088 100644
--- a/chromium/v8/src/handles-inl.h
+++ b/chromium/v8/src/handles-inl.h
@@ -32,6 +32,7 @@
#include "api.h"
#include "apiutils.h"
#include "handles.h"
+#include "heap.h"
#include "isolate.h"
namespace v8 {
@@ -85,11 +86,13 @@ bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
Object* object = *BitCast<T**>(location_);
if (object->IsSmi()) return true;
HeapObject* heap_object = HeapObject::cast(object);
- Isolate* isolate = heap_object->GetIsolate();
+ Heap* heap = heap_object->GetHeap();
Object** handle = reinterpret_cast<Object**>(location_);
- Object** roots_array_start = isolate->heap()->roots_array_start();
+ Object** roots_array_start = heap->roots_array_start();
if (roots_array_start <= handle &&
- handle < roots_array_start + Heap::kStrongRootListLength) {
+ handle < roots_array_start + Heap::kStrongRootListLength &&
+ heap->RootCanBeTreatedAsConstant(
+ static_cast<Heap::RootListIndex>(handle - roots_array_start))) {
return true;
}
if (!AllowHandleDereference::IsAllowed()) return false;
@@ -98,7 +101,7 @@ bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
// Accessing maps and internalized strings is safe.
if (heap_object->IsMap()) return true;
if (heap_object->IsInternalizedString()) return true;
- return !isolate->IsDeferredHandle(handle);
+ return !heap->isolate()->IsDeferredHandle(handle);
}
return true;
}
diff --git a/chromium/v8/src/handles.cc b/chromium/v8/src/handles.cc
index 48114d91a71..b3704df6989 100644
--- a/chromium/v8/src/handles.cc
+++ b/chromium/v8/src/handles.cc
@@ -208,17 +208,6 @@ Handle<String> FlattenGetString(Handle<String> string) {
}
-Handle<Object> SetPrototype(Handle<JSFunction> function,
- Handle<Object> prototype) {
- ASSERT(function->should_have_prototype());
- CALL_HEAP_FUNCTION(function->GetIsolate(),
- Accessors::FunctionSetPrototype(*function,
- *prototype,
- NULL),
- Object);
-}
-
-
Handle<Object> SetProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
@@ -320,11 +309,6 @@ Handle<JSObject> DeepCopy(Handle<JSObject> obj) {
}
-Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(), obj->DefineAccessor(*info), Object);
-}
-
-
// Wrappers for scripts are kept alive and cached in weak global
// handles referred from foreign objects held by the scripts as long as
// they are used. When they are not used anymore, the garbage
@@ -543,8 +527,9 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
- v8::NamedPropertyEnumerator enum_fun =
- v8::ToCData<v8::NamedPropertyEnumerator>(interceptor->enumerator());
+ v8::NamedPropertyEnumeratorCallback enum_fun =
+ v8::ToCData<v8::NamedPropertyEnumeratorCallback>(
+ interceptor->enumerator());
LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
result = args.Call(enum_fun);
}
@@ -565,8 +550,9 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
args(isolate, interceptor->data(), *receiver, *object);
v8::Handle<v8::Array> result;
if (!interceptor->enumerator()->IsUndefined()) {
- v8::IndexedPropertyEnumerator enum_fun =
- v8::ToCData<v8::IndexedPropertyEnumerator>(interceptor->enumerator());
+ v8::IndexedPropertyEnumeratorCallback enum_fun =
+ v8::ToCData<v8::IndexedPropertyEnumeratorCallback>(
+ interceptor->enumerator());
LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
result = args.Call(enum_fun);
#if ENABLE_EXTRA_CHECKS
@@ -629,8 +615,12 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
if (p->IsJSProxy()) {
Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
Handle<Object> args[] = { proxy };
- Handle<Object> names = Execution::Call(
- isolate->proxy_enumerate(), object, ARRAY_SIZE(args), args, threw);
+ Handle<Object> names = Execution::Call(isolate,
+ isolate->proxy_enumerate(),
+ object,
+ ARRAY_SIZE(args),
+ args,
+ threw);
if (*threw) return content;
content = AddKeysFromJSArray(content, Handle<JSArray>::cast(names));
break;
diff --git a/chromium/v8/src/handles.h b/chromium/v8/src/handles.h
index 90db7d12121..c3e4dca1a6e 100644
--- a/chromium/v8/src/handles.h
+++ b/chromium/v8/src/handles.h
@@ -259,8 +259,6 @@ Handle<JSObject> Copy(Handle<JSObject> obj);
Handle<JSObject> DeepCopy(Handle<JSObject> obj);
-Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info);
-
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
Handle<JSArray> array);
@@ -322,9 +320,6 @@ Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
Handle<JSFunction> constructor,
Handle<JSGlobalProxy> global);
-Handle<Object> SetPrototype(Handle<JSFunction> function,
- Handle<Object> prototype);
-
Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
Handle<Object> key);
diff --git a/chromium/v8/src/harmony-array.js b/chromium/v8/src/harmony-array.js
new file mode 100644
index 00000000000..e440299ff61
--- /dev/null
+++ b/chromium/v8/src/harmony-array.js
@@ -0,0 +1,124 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+'use strict';
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+// -------------------------------------------------------------------
+
+// ES6 draft 07-15-13, section 15.4.3.23
+function ArrayFind(predicate /* thisArg */) { // length == 1
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.find"]);
+ }
+
+ var array = ToObject(this);
+ var length = ToInteger(array.length);
+
+ if (!IS_SPEC_FUNCTION(predicate)) {
+ throw MakeTypeError('called_non_callable', [predicate]);
+ }
+
+ var thisArg;
+ if (%_ArgumentsLength() > 1) {
+ thisArg = %_Arguments(1);
+ }
+
+ if (IS_NULL_OR_UNDEFINED(thisArg)) {
+ thisArg = %GetDefaultReceiver(predicate) || thisArg;
+ } else if (!IS_SPEC_OBJECT(thisArg) && %IsClassicModeFunction(predicate)) {
+ thisArg = ToObject(thisArg);
+ }
+
+ for (var i = 0; i < length; i++) {
+ if (i in array) {
+ var element = array[i];
+ if (%_CallFunction(thisArg, element, i, array, predicate)) {
+ return element;
+ }
+ }
+ }
+
+ return;
+}
+
+
+// ES6 draft 07-15-13, section 15.4.3.24
+function ArrayFindIndex(predicate /* thisArg */) { // length == 1
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Array.prototype.findIndex"]);
+ }
+
+ var array = ToObject(this);
+ var length = ToInteger(array.length);
+
+ if (!IS_SPEC_FUNCTION(predicate)) {
+ throw MakeTypeError('called_non_callable', [predicate]);
+ }
+
+ var thisArg;
+ if (%_ArgumentsLength() > 1) {
+ thisArg = %_Arguments(1);
+ }
+
+ if (IS_NULL_OR_UNDEFINED(thisArg)) {
+ thisArg = %GetDefaultReceiver(predicate) || thisArg;
+ } else if (!IS_SPEC_OBJECT(thisArg) && %IsClassicModeFunction(predicate)) {
+ thisArg = ToObject(thisArg);
+ }
+
+ for (var i = 0; i < length; i++) {
+ if (i in array) {
+ var element = array[i];
+ if (%_CallFunction(thisArg, element, i, array, predicate)) {
+ return i;
+ }
+ }
+ }
+
+ return -1;
+}
+
+
+// -------------------------------------------------------------------
+
+function HarmonyArrayExtendArrayPrototype() {
+ %CheckIsBootstrapping();
+
+ // Set up the non-enumerable functions on the Array prototype object.
+ InstallFunctions($Array.prototype, DONT_ENUM, $Array(
+ "find", ArrayFind,
+ "findIndex", ArrayFindIndex
+ ));
+}
+
+HarmonyArrayExtendArrayPrototype(); \ No newline at end of file
diff --git a/chromium/v8/src/heap-inl.h b/chromium/v8/src/heap-inl.h
index 3c1d4d274ba..4f1960386a5 100644
--- a/chromium/v8/src/heap-inl.h
+++ b/chromium/v8/src/heap-inl.h
@@ -69,7 +69,7 @@ void PromotionQueue::insert(HeapObject* target, int size) {
*(--rear_) = size;
// Assert no overflow into live objects.
#ifdef DEBUG
- SemiSpace::AssertValidRange(HEAP->new_space()->top(),
+ SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
reinterpret_cast<Address>(rear_));
#endif
}
@@ -439,6 +439,43 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
}
+bool Heap::AllowedToBeMigrated(HeapObject* object, AllocationSpace dst) {
+ // Object migration is governed by the following rules:
+ //
+ // 1) Objects in new-space can be migrated to one of the old spaces
+ // that matches their target space or they stay in new-space.
+ // 2) Objects in old-space stay in the same space when migrating.
+ // 3) Fillers (two or more words) can migrate due to left-trimming of
+ // fixed arrays in new-space, old-data-space and old-pointer-space.
+ // 4) Fillers (one word) can never migrate, they are skipped by
+ // incremental marking explicitly to prevent invalid pattern.
+ //
+ // Since this function is used for debugging only, we do not place
+ // asserts here, but check everything explicitly.
+ if (object->map() == one_pointer_filler_map()) return false;
+ InstanceType type = object->map()->instance_type();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ AllocationSpace src = chunk->owner()->identity();
+ switch (src) {
+ case NEW_SPACE:
+ return dst == src || dst == TargetSpaceId(type);
+ case OLD_POINTER_SPACE:
+ return dst == src && (dst == TargetSpaceId(type) || object->IsFiller());
+ case OLD_DATA_SPACE:
+ return dst == src && dst == TargetSpaceId(type);
+ case CODE_SPACE:
+ return dst == src && type == CODE_TYPE;
+ case MAP_SPACE:
+ case CELL_SPACE:
+ case PROPERTY_CELL_SPACE:
+ case LO_SPACE:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
CopyWords(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
@@ -471,7 +508,7 @@ void Heap::ScavengePointer(HeapObject** p) {
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
- ASSERT(HEAP->InFromSpace(object));
+ ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
@@ -483,11 +520,13 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
// copied.
if (first_word.IsForwardingAddress()) {
HeapObject* dest = first_word.ToForwardingAddress();
- ASSERT(HEAP->InFromSpace(*p));
+ ASSERT(object->GetIsolate()->heap()->InFromSpace(*p));
*p = dest;
return;
}
+ // AllocationMementos are unrooted and shouldn't survive a scavenge
+ ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
// Call the slow part of scavenge object.
return ScavengeObjectSlow(p, object);
}
@@ -576,10 +615,10 @@ Isolate* Heap::isolate() {
#ifdef DEBUG
-#define GC_GREEDY_CHECK() \
- if (FLAG_gc_greedy) HEAP->GarbageCollectionGreedyCheck()
+#define GC_GREEDY_CHECK(ISOLATE) \
+ if (FLAG_gc_greedy) (ISOLATE)->heap()->GarbageCollectionGreedyCheck()
#else
-#define GC_GREEDY_CHECK() { }
+#define GC_GREEDY_CHECK(ISOLATE) { }
#endif
// Calls the FUNCTION_CALL function and retries it up to three times
@@ -591,7 +630,7 @@ Isolate* Heap::isolate() {
#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY, OOM)\
do { \
- GC_GREEDY_CHECK(); \
+ GC_GREEDY_CHECK(ISOLATE); \
MaybeObject* __maybe_object__ = FUNCTION_CALL; \
Object* __object__ = NULL; \
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
@@ -599,7 +638,7 @@ Isolate* Heap::isolate() {
OOM; \
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
+ (ISOLATE)->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
allocation_space(), \
"allocation failure"); \
__maybe_object__ = FUNCTION_CALL; \
@@ -608,8 +647,8 @@ Isolate* Heap::isolate() {
OOM; \
} \
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
- ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \
- ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc"); \
+ (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
+ (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
{ \
AlwaysAllocateScope __scope__; \
__maybe_object__ = FUNCTION_CALL; \
@@ -681,15 +720,13 @@ void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
Object* obj = Object::cast(new_space_strings_[i]);
- // TODO(yangguo): check that the object is indeed an external string.
ASSERT(heap_->InNewSpace(obj));
- ASSERT(obj != HEAP->the_hole_value());
+ ASSERT(obj != heap_->the_hole_value());
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
Object* obj = Object::cast(old_space_strings_[i]);
- // TODO(yangguo): check that the object is indeed an external string.
ASSERT(!heap_->InNewSpace(obj));
- ASSERT(obj != HEAP->the_hole_value());
+ ASSERT(obj != heap_->the_hole_value());
}
#endif
}
@@ -731,7 +768,7 @@ void Heap::CompletelyClearInstanceofCache() {
MaybeObject* TranscendentalCache::Get(Type type, double input) {
SubCache* cache = caches_[type];
if (cache == NULL) {
- caches_[type] = cache = new SubCache(type);
+ caches_[type] = cache = new SubCache(isolate_, type);
}
return cache->Get(input);
}
@@ -796,25 +833,29 @@ AlwaysAllocateScope::AlwaysAllocateScope() {
// non-handle code to call handle code. The code still works but
// performance will degrade, so we want to catch this situation
// in debug mode.
- ASSERT(HEAP->always_allocate_scope_depth_ == 0);
- HEAP->always_allocate_scope_depth_++;
+ Isolate* isolate = Isolate::Current();
+ ASSERT(isolate->heap()->always_allocate_scope_depth_ == 0);
+ isolate->heap()->always_allocate_scope_depth_++;
}
AlwaysAllocateScope::~AlwaysAllocateScope() {
- HEAP->always_allocate_scope_depth_--;
- ASSERT(HEAP->always_allocate_scope_depth_ == 0);
+ Isolate* isolate = Isolate::Current();
+ isolate->heap()->always_allocate_scope_depth_--;
+ ASSERT(isolate->heap()->always_allocate_scope_depth_ == 0);
}
#ifdef VERIFY_HEAP
NoWeakEmbeddedMapsVerificationScope::NoWeakEmbeddedMapsVerificationScope() {
- HEAP->no_weak_embedded_maps_verification_scope_depth_++;
+ Isolate* isolate = Isolate::Current();
+ isolate->heap()->no_weak_embedded_maps_verification_scope_depth_++;
}
NoWeakEmbeddedMapsVerificationScope::~NoWeakEmbeddedMapsVerificationScope() {
- HEAP->no_weak_embedded_maps_verification_scope_depth_--;
+ Isolate* isolate = Isolate::Current();
+ isolate->heap()->no_weak_embedded_maps_verification_scope_depth_--;
}
#endif
@@ -823,7 +864,7 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- CHECK(HEAP->Contains(object));
+ CHECK(object->GetIsolate()->heap()->Contains(object));
CHECK(object->map()->IsMap());
}
}
@@ -831,21 +872,23 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
double GCTracer::SizeOfHeapObjects() {
- return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
+ return (static_cast<double>(heap_->SizeOfObjects())) / MB;
}
DisallowAllocationFailure::DisallowAllocationFailure() {
#ifdef DEBUG
- old_state_ = HEAP->disallow_allocation_failure_;
- HEAP->disallow_allocation_failure_ = true;
+ Isolate* isolate = Isolate::Current();
+ old_state_ = isolate->heap()->disallow_allocation_failure_;
+ isolate->heap()->disallow_allocation_failure_ = true;
#endif
}
DisallowAllocationFailure::~DisallowAllocationFailure() {
#ifdef DEBUG
- HEAP->disallow_allocation_failure_ = old_state_;
+ Isolate* isolate = Isolate::Current();
+ isolate->heap()->disallow_allocation_failure_ = old_state_;
#endif
}
diff --git a/chromium/v8/src/heap-snapshot-generator.cc b/chromium/v8/src/heap-snapshot-generator.cc
index 9f9f84a01dc..bd47eec63b3 100644
--- a/chromium/v8/src/heap-snapshot-generator.cc
+++ b/chromium/v8/src/heap-snapshot-generator.cc
@@ -175,6 +175,8 @@ const char* HeapEntry::TypeAsString() {
case kHeapNumber: return "/number/";
case kNative: return "/native/";
case kSynthetic: return "/synthetic/";
+ case kConsString: return "/concatenated string/";
+ case kSlicedString: return "/sliced string/";
default: return "???";
}
}
@@ -369,6 +371,12 @@ const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
HeapObjectsMap::kGcRootsFirstSubrootId +
VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
+
+static bool AddressesMatch(void* key1, void* key2) {
+ return key1 == key2;
+}
+
+
HeapObjectsMap::HeapObjectsMap(Heap* heap)
: next_id_(kFirstAvailableObjectId),
entries_map_(AddressesMatch),
@@ -393,19 +401,20 @@ void HeapObjectsMap::MoveObject(Address from, Address to) {
ASSERT(to != NULL);
ASSERT(from != NULL);
if (from == to) return;
- void* from_value = entries_map_.Remove(from, AddressHash(from));
+ void* from_value = entries_map_.Remove(from, ComputePointerHash(from));
if (from_value == NULL) {
// It may occur that some untracked object moves to an address X and there
// is a tracked object at that address. In this case we should remove the
// entry as we know that the object has died.
- void* to_value = entries_map_.Remove(to, AddressHash(to));
+ void* to_value = entries_map_.Remove(to, ComputePointerHash(to));
if (to_value != NULL) {
int to_entry_info_index =
static_cast<int>(reinterpret_cast<intptr_t>(to_value));
entries_.at(to_entry_info_index).addr = NULL;
}
} else {
- HashMap::Entry* to_entry = entries_map_.Lookup(to, AddressHash(to), true);
+ HashMap::Entry* to_entry = entries_map_.Lookup(to, ComputePointerHash(to),
+ true);
if (to_entry->value != NULL) {
// We found the existing entry with to address for an old object.
// Without this operation we will have two EntryInfo's with the same
@@ -425,7 +434,8 @@ void HeapObjectsMap::MoveObject(Address from, Address to) {
SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
- HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
+ HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
+ false);
if (entry == NULL) return 0;
int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
EntryInfo& entry_info = entries_.at(entry_index);
@@ -437,7 +447,8 @@ SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
unsigned int size) {
ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
- HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
+ HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
+ true);
if (entry->value != NULL) {
int entry_index =
static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
@@ -461,7 +472,7 @@ void HeapObjectsMap::StopHeapObjectsTracking() {
void HeapObjectsMap::UpdateHeapObjectsMap() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"HeapSnapshotsCollection::UpdateHeapObjectsMap");
HeapIterator iterator(heap_);
for (HeapObject* obj = iterator.next();
@@ -532,13 +543,14 @@ void HeapObjectsMap::RemoveDeadEntries() {
}
entries_.at(first_free_entry).accessed = false;
HashMap::Entry* entry = entries_map_.Lookup(
- entry_info.addr, AddressHash(entry_info.addr), false);
+ entry_info.addr, ComputePointerHash(entry_info.addr), false);
ASSERT(entry);
entry->value = reinterpret_cast<void*>(first_free_entry);
++first_free_entry;
} else {
if (entry_info.addr) {
- entries_map_.Remove(entry_info.addr, AddressHash(entry_info.addr));
+ entries_map_.Remove(entry_info.addr,
+ ComputePointerHash(entry_info.addr));
}
}
}
@@ -548,12 +560,13 @@ void HeapObjectsMap::RemoveDeadEntries() {
}
-SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
+SnapshotObjectId HeapObjectsMap::GenerateId(Heap* heap,
+ v8::RetainedObjectInfo* info) {
SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
const char* label = info->GetLabel();
id ^= StringHasher::HashSequentialString(label,
static_cast<int>(strlen(label)),
- HEAP->HashSeed());
+ heap->HashSeed());
intptr_t element_count = info->GetElementCount();
if (element_count != -1)
id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
@@ -573,6 +586,7 @@ size_t HeapObjectsMap::GetUsedMemorySize() const {
HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
: is_tracking_objects_(false),
+ names_(heap),
ids_(heap) {
}
@@ -611,7 +625,7 @@ void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
SnapshotObjectId id) {
// First perform a full GC in order to avoid dead objects.
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"HeapSnapshotsCollection::FindHeapObjectById");
DisallowHeapAllocation no_allocation;
HeapObject* object = NULL;
@@ -722,7 +736,7 @@ V8HeapExplorer::V8HeapExplorer(
HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress,
v8::HeapProfiler::ObjectNameResolver* resolver)
- : heap_(Isolate::Current()->heap()),
+ : heap_(snapshot->collection()->heap()),
snapshot_(snapshot),
collection_(snapshot_->collection()),
progress_(progress),
@@ -772,6 +786,15 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
}
return AddEntry(object, HeapEntry::kObject, name);
} else if (object->IsString()) {
+ String* string = String::cast(object);
+ if (string->IsConsString())
+ return AddEntry(object,
+ HeapEntry::kConsString,
+ "(concatenated string)");
+ if (string->IsSlicedString())
+ return AddEntry(object,
+ HeapEntry::kSlicedString,
+ "(sliced string)");
return AddEntry(object,
HeapEntry::kString,
collection_->names()->GetName(String::cast(object)));
@@ -1842,7 +1865,7 @@ class GlobalObjectsEnumerator : public ObjectVisitor {
// Modifies heap. Must not be run during heap traversal.
void V8HeapExplorer::TagGlobalObjects() {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = heap_->isolate();
HandleScope scope(isolate);
GlobalObjectsEnumerator enumerator;
isolate->global_handles()->IterateAllRoots(&enumerator);
@@ -1911,14 +1934,16 @@ HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
return snapshot_->AddEntry(
entries_type_,
name,
- HeapObjectsMap::GenerateId(info),
+ HeapObjectsMap::GenerateId(collection_->heap(), info),
size != -1 ? static_cast<int>(size) : 0);
}
NativeObjectsExplorer::NativeObjectsExplorer(
- HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
- : snapshot_(snapshot),
+ HeapSnapshot* snapshot,
+ SnapshottingProgressReportingInterface* progress)
+ : isolate_(snapshot->collection()->heap()->isolate()),
+ snapshot_(snapshot),
collection_(snapshot_->collection()),
progress_(progress),
embedder_queried_(false),
@@ -1963,7 +1988,7 @@ int NativeObjectsExplorer::EstimateObjectsCount() {
void NativeObjectsExplorer::FillRetainedObjects() {
if (embedder_queried_) return;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = isolate_;
const GCType major_gc_type = kGCTypeMarkSweepCompact;
// Record objects that are joined into ObjectGroups.
isolate->heap()->CallGCPrologueCallbacks(
@@ -1990,7 +2015,7 @@ void NativeObjectsExplorer::FillRetainedObjects() {
void NativeObjectsExplorer::FillImplicitReferences() {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = isolate_;
List<ImplicitRefGroup*>* groups =
isolate->global_handles()->implicit_ref_groups();
for (int i = 0; i < groups->length(); ++i) {
@@ -2085,7 +2110,7 @@ NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
uint32_t hash = StringHasher::HashSequentialString(
label_copy,
static_cast<int>(strlen(label_copy)),
- HEAP->HashSeed());
+ isolate_->heap()->HashSeed());
HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
hash, true);
if (entry->value == NULL) {
@@ -2147,7 +2172,7 @@ void NativeObjectsExplorer::SetRootNativeRootsReference() {
void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
if (in_groups_.Contains(*p)) return;
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = isolate_;
v8::RetainedObjectInfo* info =
isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
if (info == NULL) return;
@@ -2233,15 +2258,15 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
// full GC is reachable from the root when computing dominators.
// This is not true for weakly reachable objects.
// As a temporary solution we call GC twice.
- Isolate::Current()->heap()->CollectAllGarbage(
+ heap_->CollectAllGarbage(
Heap::kMakeHeapIterableMask,
"HeapSnapshotGenerator::GenerateSnapshot");
- Isolate::Current()->heap()->CollectAllGarbage(
+ heap_->CollectAllGarbage(
Heap::kMakeHeapIterableMask,
"HeapSnapshotGenerator::GenerateSnapshot");
#ifdef VERIFY_HEAP
- Heap* debug_heap = Isolate::Current()->heap();
+ Heap* debug_heap = heap_;
CHECK(!debug_heap->old_data_space()->was_swept_conservatively());
CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively());
CHECK(!debug_heap->code_space()->was_swept_conservatively());
@@ -2573,7 +2598,9 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("regexp") ","
JSON_S("number") ","
JSON_S("native") ","
- JSON_S("synthetic")) ","
+ JSON_S("synthetic") ","
+ JSON_S("concatenated string") ","
+ JSON_S("sliced string")) ","
JSON_S("string") ","
JSON_S("number") ","
JSON_S("number") ","
diff --git a/chromium/v8/src/heap-snapshot-generator.h b/chromium/v8/src/heap-snapshot-generator.h
index 31d808856d1..7b0cf8f021e 100644
--- a/chromium/v8/src/heap-snapshot-generator.h
+++ b/chromium/v8/src/heap-snapshot-generator.h
@@ -100,7 +100,9 @@ class HeapEntry BASE_EMBEDDED {
kRegExp = v8::HeapGraphNode::kRegExp,
kHeapNumber = v8::HeapGraphNode::kHeapNumber,
kNative = v8::HeapGraphNode::kNative,
- kSynthetic = v8::HeapGraphNode::kSynthetic
+ kSynthetic = v8::HeapGraphNode::kSynthetic,
+ kConsString = v8::HeapGraphNode::kConsString,
+ kSlicedString = v8::HeapGraphNode::kSlicedString
};
static const int kNoEntry;
@@ -235,7 +237,7 @@ class HeapObjectsMap {
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
size_t GetUsedMemorySize() const;
- static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
+ static SnapshotObjectId GenerateId(Heap* heap, v8::RetainedObjectInfo* info);
static inline SnapshotObjectId GetNthGcSubrootId(int delta);
static const int kObjectIdStep = 2;
@@ -266,16 +268,6 @@ class HeapObjectsMap {
void UpdateHeapObjectsMap();
void RemoveDeadEntries();
- static bool AddressesMatch(void* key1, void* key2) {
- return key1 == key2;
- }
-
- static uint32_t AddressHash(Address addr) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)),
- v8::internal::kZeroHashSeed);
- }
-
SnapshotObjectId next_id_;
HashMap entries_map_;
List<EntryInfo> entries_;
@@ -548,7 +540,7 @@ class NativeGroupRetainedObjectInfo;
class NativeObjectsExplorer {
public:
NativeObjectsExplorer(HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress);
+ SnapshottingProgressReportingInterface* progress);
virtual ~NativeObjectsExplorer();
void AddRootEntries(SnapshotFillerInterface* filler);
int EstimateObjectsCount();
@@ -580,6 +572,7 @@ class NativeObjectsExplorer {
NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
+ Isolate* isolate_;
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
SnapshottingProgressReportingInterface* progress_;
diff --git a/chromium/v8/src/heap.cc b/chromium/v8/src/heap.cc
index c2a2707602e..f4cc421b079 100644
--- a/chromium/v8/src/heap.cc
+++ b/chromium/v8/src/heap.cc
@@ -38,6 +38,7 @@
#include "global-handles.h"
#include "heap-profiler.h"
#include "incremental-marking.h"
+#include "isolate-inl.h"
#include "mark-compact.h"
#include "natives.h"
#include "objects-visiting.h"
@@ -47,6 +48,7 @@
#include "scopeinfo.h"
#include "snapshot.h"
#include "store-buffer.h"
+#include "utils/random-number-generator.h"
#include "v8threads.h"
#include "v8utils.h"
#include "vm-state-inl.h"
@@ -703,6 +705,17 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
+int Heap::NotifyContextDisposed() {
+ if (FLAG_concurrent_recompilation) {
+ // Flush the queued recompilation tasks.
+ isolate()->optimizing_compiler_thread()->Flush();
+ }
+ flush_monomorphic_ics_ = true;
+ AgeInlineCaches();
+ return ++contexts_disposed_;
+}
+
+
void Heap::PerformScavenge() {
GCTracer tracer(this, NULL, NULL);
if (incremental_marking()->IsStopped()) {
@@ -719,7 +732,7 @@ void Heap::MoveElements(FixedArray* array,
int len) {
if (len == 0) return;
- ASSERT(array->map() != HEAP->fixed_cow_array_map());
+ ASSERT(array->map() != fixed_cow_array_map());
Object** dst_objects = array->data_start() + dst_index;
OS::MemMove(dst_objects,
array->data_start() + src_index,
@@ -753,9 +766,9 @@ class StringTableVerifier : public ObjectVisitor {
};
-static void VerifyStringTable() {
+static void VerifyStringTable(Heap* heap) {
StringTableVerifier verifier;
- HEAP->string_table()->IterateElements(&verifier);
+ heap->string_table()->IterateElements(&verifier);
}
#endif // VERIFY_HEAP
@@ -910,7 +923,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifyStringTable();
+ VerifyStringTable(this);
}
#endif
@@ -1014,8 +1027,10 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
}
gc_post_processing_depth_--;
+ isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
+
// Update relocatables.
- Relocatable::PostGarbageCollectionProcessing();
+ Relocatable::PostGarbageCollectionProcessing(isolate_);
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
@@ -1032,7 +1047,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifyStringTable();
+ VerifyStringTable(this);
}
#endif
@@ -1083,8 +1098,6 @@ void Heap::MarkCompact(GCTracer* tracer) {
isolate_->counters()->objs_since_last_full()->Set(0);
- contexts_disposed_ = 0;
-
flush_monomorphic_ics_ = false;
}
@@ -1140,29 +1153,33 @@ class ScavengeVisitor: public ObjectVisitor {
// new space.
class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
public:
+ explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
void VisitPointers(Object** start, Object**end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
- CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
+ CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
}
}
}
+
+ private:
+ Heap* heap_;
};
-static void VerifyNonPointerSpacePointers() {
+static void VerifyNonPointerSpacePointers(Heap* heap) {
// Verify that there are no pointers to new space in spaces where we
// do not expect them.
- VerifyNonPointerSpacePointersVisitor v;
- HeapObjectIterator code_it(HEAP->code_space());
+ VerifyNonPointerSpacePointersVisitor v(heap);
+ HeapObjectIterator code_it(heap->code_space());
for (HeapObject* object = code_it.Next();
object != NULL; object = code_it.Next())
object->Iterate(&v);
// The old data space was normally swept conservatively so that the iterator
// doesn't work, so we normally skip the next bit.
- if (!HEAP->old_data_space()->was_swept_conservatively()) {
- HeapObjectIterator data_it(HEAP->old_data_space());
+ if (!heap->old_data_space()->was_swept_conservatively()) {
+ HeapObjectIterator data_it(heap->old_data_space());
for (HeapObject* object = data_it.Next();
object != NULL; object = data_it.Next())
object->Iterate(&v);
@@ -1309,7 +1326,7 @@ void Heap::Scavenge() {
RelocationLock relocation_lock(this);
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
+ if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
gc_state_ = SCAVENGE;
@@ -1604,6 +1621,29 @@ struct WeakListVisitor<JSFunction> {
template<>
+struct WeakListVisitor<Code> {
+ static void SetWeakNext(Code* code, Object* next) {
+ code->set_next_code_link(next);
+ }
+
+ static Object* WeakNext(Code* code) {
+ return code->next_code_link();
+ }
+
+ static int WeakNextOffset() {
+ return Code::kNextCodeLinkOffset;
+ }
+
+ static void VisitLiveObject(Heap*, Code*,
+ WeakObjectRetainer*, bool) {
+ }
+
+ static void VisitPhantomObject(Heap*, Code*) {
+ }
+};
+
+
+template<>
struct WeakListVisitor<Context> {
static void SetWeakNext(Context* context, Object* next) {
context->set(Context::NEXT_CONTEXT_LINK,
@@ -1619,22 +1659,34 @@ struct WeakListVisitor<Context> {
Context* context,
WeakObjectRetainer* retainer,
bool record_slots) {
- // Process the weak list of optimized functions for the context.
- Object* function_list_head =
- VisitWeakList<JSFunction>(
- heap,
- context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
- retainer,
- record_slots);
- context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
- function_list_head,
- UPDATE_WRITE_BARRIER);
+ // Process the three weak lists linked off the context.
+ DoWeakList<JSFunction>(heap, context, retainer, record_slots,
+ Context::OPTIMIZED_FUNCTIONS_LIST);
+ DoWeakList<Code>(heap, context, retainer, record_slots,
+ Context::OPTIMIZED_CODE_LIST);
+ DoWeakList<Code>(heap, context, retainer, record_slots,
+ Context::DEOPTIMIZED_CODE_LIST);
+ }
+
+ template<class T>
+ static void DoWeakList(Heap* heap,
+ Context* context,
+ WeakObjectRetainer* retainer,
+ bool record_slots,
+ int index) {
+ // Visit the weak list, removing dead intermediate elements.
+ Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer,
+ record_slots);
+
+ // Update the list head.
+ context->set(index, list_head, UPDATE_WRITE_BARRIER);
+
if (record_slots) {
- Object** optimized_functions =
- HeapObject::RawField(
- context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Record the updated slot if necessary.
+ Object** head_slot = HeapObject::RawField(
+ context, FixedArray::SizeFor(index));
heap->mark_compact_collector()->RecordSlot(
- optimized_functions, optimized_functions, function_list_head);
+ head_slot, head_slot, list_head);
}
}
@@ -2006,7 +2058,6 @@ class ScavengingVisitor : public StaticVisitorBase {
private:
enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
- enum SizeRestriction { SMALL, UNKNOWN_SIZE };
static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
bool should_record = false;
@@ -2058,15 +2109,12 @@ class ScavengingVisitor : public StaticVisitorBase {
}
- template<ObjectContents object_contents,
- SizeRestriction size_restriction,
- int alignment>
+ template<ObjectContents object_contents, int alignment>
static inline void EvacuateObject(Map* map,
HeapObject** slot,
HeapObject* object,
int object_size) {
- SLOW_ASSERT((size_restriction != SMALL) ||
- (object_size <= Page::kMaxNonCodeHeapObjectSize));
+ SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
SLOW_ASSERT(object->Size() == object_size);
int allocation_size = object_size;
@@ -2079,17 +2127,14 @@ class ScavengingVisitor : public StaticVisitorBase {
if (heap->ShouldBePromoted(object->address(), object_size)) {
MaybeObject* maybe_result;
- if ((size_restriction != SMALL) &&
- (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
- maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
- NOT_EXECUTABLE);
+ if (object_contents == DATA_OBJECT) {
+ // TODO(mstarzinger): Turn this check into a regular assert soon!
+ CHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
+ maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
} else {
- if (object_contents == DATA_OBJECT) {
- maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
- } else {
- maybe_result =
- heap->old_pointer_space()->AllocateRaw(allocation_size);
- }
+ // TODO(mstarzinger): Turn this check into a regular assert soon!
+ CHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+ maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
}
Object* result = NULL; // Initialization to please compiler.
@@ -2119,6 +2164,8 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
}
+ // TODO(mstarzinger): Turn this check into a regular assert soon!
+ CHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
@@ -2163,10 +2210,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object) {
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
- EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
- slot,
- object,
- object_size);
+ EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
+ map, slot, object, object_size);
}
@@ -2175,11 +2220,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
int object_size = FixedDoubleArray::SizeFor(length);
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
- map,
- slot,
- object,
- object_size);
+ EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
+ map, slot, object, object_size);
}
@@ -2187,7 +2229,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
+ EvacuateObject<DATA_OBJECT, kObjectAlignment>(
map, slot, object, object_size);
}
@@ -2197,7 +2239,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int object_size = SeqOneByteString::cast(object)->
SeqOneByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
+ EvacuateObject<DATA_OBJECT, kObjectAlignment>(
map, slot, object, object_size);
}
@@ -2207,7 +2249,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int object_size = SeqTwoByteString::cast(object)->
SeqTwoByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
+ EvacuateObject<DATA_OBJECT, kObjectAlignment>(
map, slot, object, object_size);
}
@@ -2251,7 +2293,7 @@ class ScavengingVisitor : public StaticVisitorBase {
}
int object_size = ConsString::kSize;
- EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
+ EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
map, slot, object, object_size);
}
@@ -2262,7 +2304,7 @@ class ScavengingVisitor : public StaticVisitorBase {
static inline void VisitSpecialized(Map* map,
HeapObject** slot,
HeapObject* object) {
- EvacuateObject<object_contents, SMALL, kObjectAlignment>(
+ EvacuateObject<object_contents, kObjectAlignment>(
map, slot, object, object_size);
}
@@ -2270,7 +2312,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object) {
int object_size = map->instance_size();
- EvacuateObject<object_contents, SMALL, kObjectAlignment>(
+ EvacuateObject<object_contents, kObjectAlignment>(
map, slot, object, object_size);
}
};
@@ -2338,7 +2380,7 @@ void Heap::SelectScavengingVisitorsTable() {
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- SLOW_ASSERT(HEAP->InFromSpace(object));
+ SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
MapWord first_word = object->map_word();
SLOW_ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
@@ -2427,6 +2469,7 @@ MaybeObject* Heap::AllocateAccessorPair() {
}
accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
+ accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER);
return accessors;
}
@@ -2935,7 +2978,7 @@ MaybeObject* Heap::CreateOddball(const char* to_string,
{ MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- return Oddball::cast(result)->Initialize(to_string, to_number, kind);
+ return Oddball::cast(result)->Initialize(this, to_string, to_number, kind);
}
@@ -3039,15 +3082,16 @@ bool Heap::CreateInitialObjects() {
// Finish initializing oddballs after creating the string table.
{ MaybeObject* maybe_obj =
- undefined_value()->Initialize("undefined",
+ undefined_value()->Initialize(this,
+ "undefined",
nan_value(),
Oddball::kUndefined);
if (!maybe_obj->ToObject(&obj)) return false;
}
// Initialize the null_value.
- { MaybeObject* maybe_obj =
- null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
+ { MaybeObject* maybe_obj = null_value()->Initialize(
+ this, "null", Smi::FromInt(0), Oddball::kNull);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -3218,9 +3262,6 @@ bool Heap::CreateInitialObjects() {
}
set_observed_symbol(Symbol::cast(obj));
- set_i18n_template_one(the_hole_value());
- set_i18n_template_two(the_hole_value());
-
// Handling of script id generation is in Factory::NewScript.
set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
@@ -3269,6 +3310,12 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
}
+bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
+ return !RootCanBeWrittenAfterInitialization(root_index) &&
+ !InNewSpace(roots_array_start()[root_index]);
+}
+
+
Object* RegExpResultsCache::Lookup(Heap* heap,
String* key_string,
Object* key_pattern,
@@ -3408,7 +3455,7 @@ void Heap::FlushNumberStringCache() {
// Flush the number to string cache.
int len = number_string_cache()->length();
for (int i = 0; i < len; i++) {
- number_string_cache()->set_undefined(this, i);
+ number_string_cache()->set_undefined(i);
}
}
@@ -3639,7 +3686,7 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_function_token_position(0);
// All compiler hints default to false or 0.
share->set_compiler_hints(0);
- share->set_opt_count(0);
+ share->set_opt_count_and_bailout_reason(0);
return share;
}
@@ -4013,10 +4060,10 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
return AllocateByteArray(length);
}
int size = ByteArray::SizeFor(length);
+ AllocationSpace space =
+ (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_DATA_SPACE;
Object* result;
- { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
- ? old_data_space_->AllocateRaw(size)
- : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, space);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -4323,7 +4370,7 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
if (!function->shared()->is_generator()) {
MaybeObject* maybe_failure =
- JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
+ JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributesTrampoline(
constructor_string(), function, DONT_ENUM);
if (maybe_failure->IsFailure()) return maybe_failure;
}
@@ -4481,7 +4528,8 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
}
-MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
+MaybeObject* Heap::AllocateJSObjectFromMap(
+ Map* map, PretenureFlag pretenure, bool allocate_properties) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
@@ -4492,11 +4540,15 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
- int prop_size = map->InitialPropertiesLength();
- ASSERT(prop_size >= 0);
- Object* properties;
- { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
- if (!maybe_properties->ToObject(&properties)) return maybe_properties;
+ FixedArray* properties;
+ if (allocate_properties) {
+ int prop_size = map->InitialPropertiesLength();
+ ASSERT(prop_size >= 0);
+ { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
+ if (!maybe_properties->To(&properties)) return maybe_properties;
+ }
+ } else {
+ properties = empty_fixed_array();
}
// Allocate the JSObject.
@@ -4508,17 +4560,15 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
if (!maybe_obj->To(&obj)) return maybe_obj;
// Initialize the JSObject.
- InitializeJSObjectFromMap(JSObject::cast(obj),
- FixedArray::cast(properties),
- map);
+ InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
ASSERT(JSObject::cast(obj)->HasFastElements() ||
JSObject::cast(obj)->HasExternalArrayElements());
return obj;
}
-MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
- Handle<AllocationSite> allocation_site) {
+MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
+ Map* map, Handle<AllocationSite> allocation_site) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
@@ -4531,9 +4581,9 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
// Allocate the backing storage for the properties.
int prop_size = map->InitialPropertiesLength();
ASSERT(prop_size >= 0);
- Object* properties;
+ FixedArray* properties;
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
- if (!maybe_properties->ToObject(&properties)) return maybe_properties;
+ if (!maybe_properties->To(&properties)) return maybe_properties;
}
// Allocate the JSObject.
@@ -4545,9 +4595,7 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
if (!maybe_obj->To(&obj)) return maybe_obj;
// Initialize the JSObject.
- InitializeJSObjectFromMap(JSObject::cast(obj),
- FixedArray::cast(properties),
- map);
+ InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
ASSERT(JSObject::cast(obj)->HasFastElements());
return obj;
}
@@ -4964,7 +5012,7 @@ MaybeObject* Heap::CopyJSObjectWithAllocationSite(
int object_size = map->instance_size();
Object* clone;
- ASSERT(map->CanTrackAllocationSite());
+ ASSERT(AllocationSite::CanTrack(map->instance_type()));
ASSERT(map->instance_type() == JS_ARRAY_TYPE);
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
@@ -5719,7 +5767,7 @@ MaybeObject* Heap::AllocateSymbol() {
int hash;
int attempts = 0;
do {
- hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
+ hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
attempts++;
} while (hash == 0 && attempts < 30);
if (hash == 0) hash = 1; // never return 0
@@ -5951,12 +5999,7 @@ bool Heap::IdleNotification(int hint) {
size_factor * IncrementalMarking::kAllocatedThreshold;
if (contexts_disposed_ > 0) {
- if (hint >= kMaxHint) {
- // The embedder is requesting a lot of GC work after context disposal,
- // we age inline caches so that they don't keep objects from
- // the old context alive.
- AgeInlineCaches();
- }
+ contexts_disposed_ = 0;
int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
if (hint >= mark_sweep_time && !FLAG_expose_gc &&
incremental_marking()->IsStopped()) {
@@ -5965,8 +6008,8 @@ bool Heap::IdleNotification(int hint) {
"idle notification: contexts disposed");
} else {
AdvanceIdleIncrementalMarking(step_size);
- contexts_disposed_ = 0;
}
+
// After context disposal there is likely a lot of garbage remaining, reset
// the idle notification counters in order to trigger more incremental GCs
// on subsequent idle notifications.
@@ -6108,12 +6151,12 @@ void Heap::Print() {
void Heap::ReportCodeStatistics(const char* title) {
PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
- PagedSpace::ResetCodeStatistics();
+ PagedSpace::ResetCodeStatistics(isolate());
// We do not look for code in new space, map space, or old space. If code
// somehow ends up in those spaces, we would miss it here.
code_space_->CollectCodeStatistics();
lo_space_->CollectCodeStatistics();
- PagedSpace::ReportCodeStatistics();
+ PagedSpace::ReportCodeStatistics(isolate());
}
@@ -6161,7 +6204,7 @@ bool Heap::Contains(HeapObject* value) {
bool Heap::Contains(Address addr) {
- if (OS::IsOutsideAllocatedSpace(addr)) return false;
+ if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
return HasBeenSetUp() &&
(new_space_.ToSpaceContains(addr) ||
old_pointer_space_->Contains(addr) ||
@@ -6180,7 +6223,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
bool Heap::InSpace(Address addr, AllocationSpace space) {
- if (OS::IsOutsideAllocatedSpace(addr)) return false;
+ if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
if (!HasBeenSetUp()) return false;
switch (space) {
@@ -6567,7 +6610,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->Synchronize(VisitorSynchronization::kBootstrapper);
isolate_->Iterate(v);
v->Synchronize(VisitorSynchronization::kTop);
- Relocatable::Iterate(v);
+ Relocatable::Iterate(isolate_, v);
v->Synchronize(VisitorSynchronization::kRelocatable);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -6608,6 +6651,14 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
}
v->Synchronize(VisitorSynchronization::kGlobalHandles);
+ // Iterate over eternal handles.
+ if (mode == VISIT_ALL_IN_SCAVENGE) {
+ isolate_->eternal_handles()->IterateNewSpaceRoots(v);
+ } else {
+ isolate_->eternal_handles()->IterateAllRoots(v);
+ }
+ v->Synchronize(VisitorSynchronization::kEternalHandles);
+
// Iterate over pointers being held by inactive threads.
isolate_->thread_manager()->Iterate(v);
v->Synchronize(VisitorSynchronization::kThreadManager);
@@ -6620,7 +6671,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// serialization this does nothing, since the partial snapshot cache is
// empty. However the next thing we do is create the partial snapshot,
// filling up the partial snapshot cache with objects it needs as we go.
- SerializerDeserializer::Iterate(v);
+ SerializerDeserializer::Iterate(isolate_, v);
// We don't do a v->Synchronize call here, because in debug mode that will
// output a flag to the snapshot. However at this point the serializer and
// deserializer are deliberately a little unsynchronized (see above) so the
@@ -6701,6 +6752,12 @@ bool Heap::ConfigureHeap(int max_semispace_size,
RoundUp(max_old_generation_size_,
Page::kPageSize));
+ // We rely on being able to allocate new arrays in paged spaces.
+ ASSERT(MaxRegularSpaceAllocationSize() >=
+ (JSArray::kSize +
+ FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
+ AllocationMemento::kSize));
+
configured_ = true;
return true;
}
@@ -6870,8 +6927,8 @@ bool Heap::SetUp() {
ASSERT(hash_seed() == 0);
if (FLAG_randomize_hashes) {
if (FLAG_hash_seed == 0) {
- set_hash_seed(
- Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
+ int rnd = isolate()->random_number_generator()->NextInt();
+ set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
} else {
set_hash_seed(Smi::FromInt(FLAG_hash_seed));
}
@@ -6882,7 +6939,7 @@ bool Heap::SetUp() {
store_buffer()->SetUp();
- if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
+ if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
#ifdef DEBUG
relocation_mutex_locked_by_optimizer_thread_ = false;
#endif // DEBUG
@@ -7218,12 +7275,12 @@ class HeapObjectsFilter {
class UnreachableObjectsFilter : public HeapObjectsFilter {
public:
- UnreachableObjectsFilter() {
+ explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
MarkReachableObjects();
}
~UnreachableObjectsFilter() {
- Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
+ heap_->mark_compact_collector()->ClearMarkbits();
}
bool SkipObject(HeapObject* object) {
@@ -7260,12 +7317,12 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
};
void MarkReachableObjects() {
- Heap* heap = Isolate::Current()->heap();
MarkingVisitor visitor;
- heap->IterateRoots(&visitor, VISIT_ALL);
+ heap_->IterateRoots(&visitor, VISIT_ALL);
visitor.TransitiveClosure();
}
+ Heap* heap_;
DisallowHeapAllocation no_allocation_;
};
@@ -7297,7 +7354,7 @@ void HeapIterator::Init() {
space_iterator_ = new SpaceIterator(heap_);
switch (filtering_) {
case kFilterUnreachable:
- filter_ = new UnreachableObjectsFilter;
+ filter_ = new UnreachableObjectsFilter(heap_);
break;
default:
break;
@@ -7782,7 +7839,7 @@ int KeyedLookupCache::Lookup(Map* map, Name* name) {
void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
if (!name->IsUniqueName()) {
String* internalized_string;
- if (!HEAP->InternalizeStringIfExists(
+ if (!map->GetIsolate()->heap()->InternalizeStringIfExists(
String::cast(name), &internalized_string)) {
return;
}
@@ -7790,7 +7847,7 @@ void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
}
// This cache is cleared only between mark compact passes, so we expect the
// cache to only contain old space names.
- ASSERT(!HEAP->InNewSpace(name));
+ ASSERT(!map->GetIsolate()->heap()->InNewSpace(name));
int index = (Hash(map, name) & kHashMask);
// After a GC there will be free slots, so we use them in order (this may
@@ -7842,9 +7899,9 @@ void Heap::GarbageCollectionGreedyCheck() {
#endif
-TranscendentalCache::SubCache::SubCache(Type t)
+TranscendentalCache::SubCache::SubCache(Isolate* isolate, Type t)
: type_(t),
- isolate_(Isolate::Current()) {
+ isolate_(isolate) {
uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
uint32_t in1 = 0xffffffffu; // generated by the FPU.
for (int i = 0; i < kCacheSize; i++) {
@@ -7871,6 +7928,7 @@ void ExternalStringTable::CleanUp() {
if (new_space_strings_[i] == heap_->the_hole_value()) {
continue;
}
+ ASSERT(new_space_strings_[i]->IsExternalString());
if (heap_->InNewSpace(new_space_strings_[i])) {
new_space_strings_[last++] = new_space_strings_[i];
} else {
@@ -7885,6 +7943,7 @@ void ExternalStringTable::CleanUp() {
if (old_space_strings_[i] == heap_->the_hole_value()) {
continue;
}
+ ASSERT(old_space_strings_[i]->IsExternalString());
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
old_space_strings_[last++] = old_space_strings_[i];
}
@@ -7987,7 +8046,7 @@ static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
void Heap::CheckpointObjectStats() {
- ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
+ LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
Counters* counters = isolate()->counters();
#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
counters->count_of_##name()->Increment( \
@@ -8033,7 +8092,7 @@ void Heap::CheckpointObjectStats() {
Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
- if (FLAG_parallel_recompilation) {
+ if (FLAG_concurrent_recompilation) {
heap_->relocation_mutex_->Lock();
#ifdef DEBUG
heap_->relocation_mutex_locked_by_optimizer_thread_ =
diff --git a/chromium/v8/src/heap.h b/chromium/v8/src/heap.h
index 672b8c16325..4dfa076ebd7 100644
--- a/chromium/v8/src/heap.h
+++ b/chromium/v8/src/heap.h
@@ -78,9 +78,9 @@ namespace internal {
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Smi, stack_limit, StackLimit) \
V(Oddball, arguments_marker, ArgumentsMarker) \
- /* The first 32 roots above this line should be boring from a GC point of */ \
- /* view. This means they are never in new space and never on a page that */ \
- /* is being compacted. */ \
+ /* The roots above this line should be boring from a GC point of view. */ \
+ /* This means they are never in new space and never on a page that is */ \
+ /* being compacted. */ \
V(FixedArray, number_string_cache, NumberStringCache) \
V(Object, instanceof_cache_function, InstanceofCacheFunction) \
V(Object, instanceof_cache_map, InstanceofCacheMap) \
@@ -189,9 +189,7 @@ namespace internal {
V(Symbol, elements_transition_symbol, ElementsTransitionSymbol) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
- V(Symbol, observed_symbol, ObservedSymbol) \
- V(HeapObject, i18n_template_one, I18nTemplateOne) \
- V(HeapObject, i18n_template_two, I18nTemplateTwo)
+ V(Symbol, observed_symbol, ObservedSymbol)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@@ -201,7 +199,6 @@ namespace internal {
V(Array_string, "Array") \
V(Object_string, "Object") \
V(proto_string, "__proto__") \
- V(StringImpl_string, "StringImpl") \
V(arguments_string, "arguments") \
V(Arguments_string, "Arguments") \
V(call_string, "call") \
@@ -211,12 +208,10 @@ namespace internal {
V(Boolean_string, "Boolean") \
V(callee_string, "callee") \
V(constructor_string, "constructor") \
- V(code_string, ".code") \
V(result_string, ".result") \
V(dot_for_string, ".for.") \
- V(catch_var_string, ".catch-var") \
- V(empty_string, "") \
V(eval_string, "eval") \
+ V(empty_string, "") \
V(function_string, "function") \
V(length_string, "length") \
V(module_string, "module") \
@@ -235,12 +230,10 @@ namespace internal {
V(index_string, "index") \
V(last_index_string, "lastIndex") \
V(object_string, "object") \
- V(payload_string, "payload") \
V(literals_string, "literals") \
V(prototype_string, "prototype") \
V(string_string, "string") \
V(String_string, "String") \
- V(unknown_field_string, "unknownField") \
V(symbol_string, "symbol") \
V(Symbol_string, "Symbol") \
V(Date_string, "Date") \
@@ -259,7 +252,6 @@ namespace internal {
"KeyedStoreElementMonomorphic") \
V(stack_overflow_string, "kStackOverflowBoilerplate") \
V(illegal_access_string, "illegal access") \
- V(out_of_memory_string, "out-of-memory") \
V(illegal_execution_state_string, "illegal execution state") \
V(get_string, "get") \
V(set_string, "set") \
@@ -268,8 +260,6 @@ namespace internal {
V(length_field_string, "%length") \
V(cell_value_string, "%cell_value") \
V(function_class_string, "Function") \
- V(properties_field_symbol, "%properties") \
- V(payload_field_symbol, "%payload") \
V(illegal_argument_string, "illegal argument") \
V(MakeReferenceError_string, "MakeReferenceError") \
V(MakeSyntaxError_string, "MakeSyntaxError") \
@@ -283,7 +273,6 @@ namespace internal {
V(illegal_continue_string, "illegal_continue") \
V(unknown_label_string, "unknown_label") \
V(redeclaration_string, "redeclaration") \
- V(failure_string, "<failure>") \
V(space_string, " ") \
V(exec_string, "exec") \
V(zero_string, "0") \
@@ -525,7 +514,7 @@ class Heap {
int InitialSemiSpaceSize() { return initial_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; }
- int MaxRegularSpaceAllocationSize() { return InitialSemiSpaceSize() * 3/4; }
+ int MaxRegularSpaceAllocationSize() { return InitialSemiSpaceSize() * 4/5; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@@ -738,7 +727,7 @@ class Heap {
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure = NOT_TENURED);
+ Map* map, PretenureFlag pretenure = NOT_TENURED, bool alloc_props = true);
MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMapWithAllocationSite(
Map* map, Handle<AllocationSite> allocation_site);
@@ -1256,10 +1245,7 @@ class Heap {
void EnsureHeapIsIterable();
// Notify the heap that a context has been disposed.
- int NotifyContextDisposed() {
- flush_monomorphic_ics_ = true;
- return ++contexts_disposed_;
- }
+ int NotifyContextDisposed();
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
@@ -1302,12 +1288,6 @@ class Heap {
ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
global_gc_epilogue_callback_ = callback;
}
- void SetI18nTemplateOne(ObjectTemplateInfo* tmpl) {
- set_i18n_template_one(tmpl);
- }
- void SetI18nTemplateTwo(ObjectTemplateInfo* tmpl) {
- set_i18n_template_two(tmpl);
- }
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
@@ -1402,6 +1382,10 @@ class Heap {
inline OldSpace* TargetSpace(HeapObject* object);
static inline AllocationSpace TargetSpaceId(InstanceType type);
+ // Checks whether the given object is allowed to be migrated from it's
+ // current space into the given destination space. Used for debugging.
+ inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
+
// Sets the stub_cache_ (only used when expanding the dictionary).
void public_set_code_stubs(UnseededNumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
@@ -1501,6 +1485,10 @@ class Heap {
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
#ifdef DEBUG
+ void set_allocation_timeout(int timeout) {
+ allocation_timeout_ = timeout;
+ }
+
bool disallow_allocation_failure() {
return disallow_allocation_failure_;
}
@@ -1626,6 +1614,8 @@ class Heap {
// Generated code can embed direct references to non-writable roots if
// they are in new space.
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
+ // Generated code can treat direct references to this root as constant.
+ bool RootCanBeTreatedAsConstant(RootListIndex root_index);
MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true,
@@ -1872,14 +1862,14 @@ class Heap {
void CheckpointObjectStats();
- // We don't use a ScopedLock here since we want to lock the heap
- // only when FLAG_parallel_recompilation is true.
+ // We don't use a LockGuard here since we want to lock the heap
+ // only when FLAG_concurrent_recompilation is true.
class RelocationLock {
public:
explicit RelocationLock(Heap* heap);
~RelocationLock() {
- if (FLAG_parallel_recompilation) {
+ if (FLAG_concurrent_recompilation) {
#ifdef DEBUG
heap_->relocation_mutex_locked_by_optimizer_thread_ = false;
#endif // DEBUG
@@ -2882,7 +2872,7 @@ class TranscendentalCache {
class SubCache {
static const int kCacheSize = 512;
- explicit SubCache(Type t);
+ explicit SubCache(Isolate* isolate, Type t);
MUST_USE_RESULT inline MaybeObject* Get(double input);
@@ -2919,7 +2909,7 @@ class TranscendentalCache {
DISALLOW_COPY_AND_ASSIGN(SubCache);
};
- TranscendentalCache() {
+ explicit TranscendentalCache(Isolate* isolate) : isolate_(isolate) {
for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
}
@@ -2937,6 +2927,7 @@ class TranscendentalCache {
// Allow access to the caches_ array as an ExternalReference.
friend class ExternalReference;
+ Isolate* isolate_;
SubCache* caches_[kNumberOfCaches];
DISALLOW_COPY_AND_ASSIGN(TranscendentalCache);
};
diff --git a/chromium/v8/src/hydrogen-alias-analysis.h b/chromium/v8/src/hydrogen-alias-analysis.h
new file mode 100644
index 00000000000..73e116e63e1
--- /dev/null
+++ b/chromium/v8/src/hydrogen-alias-analysis.h
@@ -0,0 +1,105 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_ALIAS_ANALYSIS_H_
+#define V8_HYDROGEN_ALIAS_ANALYSIS_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+enum HAliasing {
+ kMustAlias,
+ kMayAlias,
+ kNoAlias
+};
+
+
+// Defines the interface to alias analysis for the rest of the compiler.
+// A simple implementation can use only local reasoning, but a more powerful
+// analysis might employ points-to analysis.
+class HAliasAnalyzer : public ZoneObject {
+ public:
+ // Simple alias analysis distinguishes allocations, parameters,
+ // and constants using only local reasoning.
+ HAliasing Query(HValue* a, HValue* b) {
+ // The same SSA value always references the same object.
+ if (a == b) return kMustAlias;
+
+ if (a->IsAllocate() || a->IsInnerAllocatedObject()) {
+ // Two non-identical allocations can never be aliases.
+ if (b->IsAllocate()) return kNoAlias;
+ if (b->IsInnerAllocatedObject()) return kNoAlias;
+ // An allocation can never alias a parameter or a constant.
+ if (b->IsParameter()) return kNoAlias;
+ if (b->IsConstant()) return kNoAlias;
+ }
+ if (b->IsAllocate() || b->IsInnerAllocatedObject()) {
+ // An allocation can never alias a parameter or a constant.
+ if (a->IsParameter()) return kNoAlias;
+ if (a->IsConstant()) return kNoAlias;
+ }
+
+ // Constant objects can be distinguished statically.
+ if (a->IsConstant()) {
+ // TODO(titzer): DataEquals() is more efficient, but that's protected.
+ return a->Equals(b) ? kMustAlias : kNoAlias;
+ }
+ return kMayAlias;
+ }
+
+ // Checks whether the objects referred to by the given instructions may
+ // ever be aliases. Note that this is more conservative than checking
+ // {Query(a, b) == kMayAlias}, since this method considers kMustAlias
+ // objects to also be may-aliasing.
+ inline bool MayAlias(HValue* a, HValue* b) {
+ return Query(a, b) != kNoAlias;
+ }
+
+ inline bool MustAlias(HValue* a, HValue* b) {
+ return Query(a, b) == kMustAlias;
+ }
+
+ inline bool NoAlias(HValue* a, HValue* b) {
+ return Query(a, b) == kNoAlias;
+ }
+
+ // Returns the actual value of an instruction. In the case of a chain
+ // of informative definitions, return the root of the chain.
+ HValue* ActualValue(HValue* obj) {
+ while (obj->IsInformativeDefinition()) { // Walk a chain of idefs.
+ obj = obj->RedefinedOperand();
+ }
+ return obj;
+ }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_ALIAS_ANALYSIS_H_
diff --git a/chromium/v8/src/hydrogen-bce.cc b/chromium/v8/src/hydrogen-bce.cc
index 7c81ec145cb..869db54a2f4 100644
--- a/chromium/v8/src/hydrogen-bce.cc
+++ b/chromium/v8/src/hydrogen-bce.cc
@@ -318,12 +318,54 @@ void BoundsCheckTable::Delete(BoundsCheckKey* key) {
}
+class HBoundsCheckEliminationState {
+ public:
+ HBasicBlock* block_;
+ BoundsCheckBbData* bb_data_list_;
+ int index_;
+};
+
+
// Eliminates checks in bb and recursively in the dominated blocks.
// Also replace the results of check instructions with the original value, if
// the result is used. This is safe now, since we don't do code motion after
// this point. It enables better register allocation since the value produced
// by check instructions is really a copy of the original value.
void HBoundsCheckEliminationPhase::EliminateRedundantBoundsChecks(
+ HBasicBlock* entry) {
+ // Allocate the stack.
+ HBoundsCheckEliminationState* stack =
+ zone()->NewArray<HBoundsCheckEliminationState>(graph()->blocks()->length());
+
+ // Explicitly push the entry block.
+ stack[0].block_ = entry;
+ stack[0].bb_data_list_ = PreProcessBlock(entry);
+ stack[0].index_ = 0;
+ int stack_depth = 1;
+
+ // Implement depth-first traversal with a stack.
+ while (stack_depth > 0) {
+ int current = stack_depth - 1;
+ HBoundsCheckEliminationState* state = &stack[current];
+ const ZoneList<HBasicBlock*>* children = state->block_->dominated_blocks();
+
+ if (state->index_ < children->length()) {
+ // Recursively visit children blocks.
+ HBasicBlock* child = children->at(state->index_++);
+ int next = stack_depth++;
+ stack[next].block_ = child;
+ stack[next].bb_data_list_ = PreProcessBlock(child);
+ stack[next].index_ = 0;
+ } else {
+ // Finished with all children; post process the block.
+ PostProcessBlock(state->block_, state->bb_data_list_);
+ stack_depth--;
+ }
+ }
+}
+
+
+BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
HBasicBlock* bb) {
BoundsCheckBbData* bb_data_list = NULL;
@@ -375,19 +417,20 @@ void HBoundsCheckEliminationPhase::EliminateRedundantBoundsChecks(
}
}
- for (int i = 0; i < bb->dominated_blocks()->length(); ++i) {
- EliminateRedundantBoundsChecks(bb->dominated_blocks()->at(i));
- }
+ return bb_data_list;
+}
+
- for (BoundsCheckBbData* data = bb_data_list;
- data != NULL;
- data = data->NextInBasicBlock()) {
+void HBoundsCheckEliminationPhase::PostProcessBlock(
+ HBasicBlock* block, BoundsCheckBbData* data) {
+ while (data != NULL) {
data->RemoveZeroOperations();
if (data->FatherInDominatorTree()) {
table_.Insert(data->Key(), data->FatherInDominatorTree(), zone());
} else {
table_.Delete(data->Key());
}
+ data = data->NextInBasicBlock();
}
}
diff --git a/chromium/v8/src/hydrogen-bce.h b/chromium/v8/src/hydrogen-bce.h
index d91997bda01..c55dea7b7a5 100644
--- a/chromium/v8/src/hydrogen-bce.h
+++ b/chromium/v8/src/hydrogen-bce.h
@@ -60,6 +60,8 @@ class HBoundsCheckEliminationPhase : public HPhase {
private:
void EliminateRedundantBoundsChecks(HBasicBlock* bb);
+ BoundsCheckBbData* PreProcessBlock(HBasicBlock* bb);
+ void PostProcessBlock(HBasicBlock* bb, BoundsCheckBbData* data);
BoundsCheckTable table_;
diff --git a/chromium/v8/src/hydrogen-bch.cc b/chromium/v8/src/hydrogen-bch.cc
index 137d6295477..a0a0fee7105 100644
--- a/chromium/v8/src/hydrogen-bch.cc
+++ b/chromium/v8/src/hydrogen-bch.cc
@@ -102,10 +102,11 @@ class InductionVariableBlocksTable BASE_EMBEDDED {
int current_dominated_block_;
};
- HGraph* graph() { return graph_; }
- HBasicBlock* loop_header() { return loop_header_; }
- Element* at(int index) { return &(elements_.at(index)); }
- Element* at(HBasicBlock* block) { return at(block->block_id()); }
+ HGraph* graph() const { return graph_; }
+ Counters* counters() const { return graph()->isolate()->counters(); }
+ HBasicBlock* loop_header() const { return loop_header_; }
+ Element* at(int index) const { return &(elements_.at(index)); }
+ Element* at(HBasicBlock* block) const { return at(block->block_id()); }
void AddCheckAt(HBasicBlock* block) {
at(block->block_id())->set_has_check();
@@ -258,23 +259,17 @@ class InductionVariableBlocksTable BASE_EMBEDDED {
// all checks are done on constants: if all check are done against the same
// constant limit we will use that instead of the induction limit.
bool has_upper_constant_limit = true;
- InductionVariableData::InductionVariableCheck* current_check = check;
int32_t upper_constant_limit =
- current_check != NULL && current_check->HasUpperLimit() ?
- current_check->upper_limit() : 0;
- while (current_check != NULL) {
- if (check->HasUpperLimit()) {
- if (check->upper_limit() != upper_constant_limit) {
- has_upper_constant_limit = false;
- }
- } else {
- has_upper_constant_limit = false;
- }
-
- current_check->check()->block()->graph()->isolate()->counters()->
- bounds_checks_eliminated()->Increment();
+ check != NULL && check->HasUpperLimit() ? check->upper_limit() : 0;
+ for (InductionVariableData::InductionVariableCheck* current_check = check;
+ current_check != NULL;
+ current_check = current_check->next()) {
+ has_upper_constant_limit =
+ has_upper_constant_limit &&
+ check->HasUpperLimit() &&
+ check->upper_limit() == upper_constant_limit;
+ counters()->bounds_checks_eliminated()->Increment();
current_check->check()->set_skip_check();
- current_check = current_check->next();
}
// Choose the appropriate limit.
@@ -303,8 +298,7 @@ class InductionVariableBlocksTable BASE_EMBEDDED {
zone, context, limit, check->check()->length());
hoisted_check->InsertBefore(pre_header->end());
hoisted_check->set_allow_equality(true);
- hoisted_check->block()->graph()->isolate()->counters()->
- bounds_checks_hoisted()->Increment();
+ counters()->bounds_checks_hoisted()->Increment();
}
void CollectInductionVariableData(HBasicBlock* bb) {
@@ -341,8 +335,7 @@ class InductionVariableBlocksTable BASE_EMBEDDED {
// TODO(mmassi): skip OSR values for check->length().
if (check->length() == data->limit() ||
check->length() == data->additional_upper_limit()) {
- check->block()->graph()->isolate()->counters()->
- bounds_checks_eliminated()->Increment();
+ counters()->bounds_checks_eliminated()->Increment();
check->set_skip_check();
continue;
}
@@ -407,4 +400,3 @@ void HBoundsCheckHoistingPhase::HoistRedundantBoundsChecks() {
}
} } // namespace v8::internal
-
diff --git a/chromium/v8/src/hydrogen-canonicalize.cc b/chromium/v8/src/hydrogen-canonicalize.cc
index 643234392d0..4d96415e6a8 100644
--- a/chromium/v8/src/hydrogen-canonicalize.cc
+++ b/chromium/v8/src/hydrogen-canonicalize.cc
@@ -48,6 +48,10 @@ void HCanonicalizePhase::Run() {
if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
HInstruction::kTruncatingToSmi)) {
instr->SetFlag(HInstruction::kAllUsesTruncatingToSmi);
+ } else if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+ HInstruction::kTruncatingToInt32)) {
+ // Avoid redundant minus zero check
+ instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
}
}
}
diff --git a/chromium/v8/src/hydrogen-dehoist.cc b/chromium/v8/src/hydrogen-dehoist.cc
index 67e67189983..bdf2cfb2584 100644
--- a/chromium/v8/src/hydrogen-dehoist.cc
+++ b/chromium/v8/src/hydrogen-dehoist.cc
@@ -53,7 +53,7 @@ static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
int32_t value = constant->Integer32Value() * sign;
// We limit offset values to 30 bits because we want to avoid the risk of
// overflows when the offset is added to the object header size.
- if (value >= 1 << 30 || value < 0) return;
+ if (value >= 1 << array_operation->MaxIndexOffsetBits() || value < 0) return;
array_operation->SetKey(subexpression);
if (binary_operation->HasNoUses()) {
binary_operation->DeleteAndReplaceWith(NULL);
diff --git a/chromium/v8/src/hydrogen-environment-liveness.cc b/chromium/v8/src/hydrogen-environment-liveness.cc
index 9efa47bd34d..fad9755e5c7 100644
--- a/chromium/v8/src/hydrogen-environment-liveness.cc
+++ b/chromium/v8/src/hydrogen-environment-liveness.cc
@@ -163,11 +163,7 @@ void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtInstruction(
live->Clear();
for (int i = 0; i < enter->return_targets()->length(); ++i) {
int return_id = enter->return_targets()->at(i)->block_id();
- // When an AbnormalExit is involved, it can happen that the return
- // target block doesn't actually exist.
- if (return_id < live_at_block_start_.length()) {
- live->Union(*live_at_block_start_[return_id]);
- }
+ live->Union(*live_at_block_start_[return_id]);
}
last_simulate_ = NULL;
break;
diff --git a/chromium/v8/src/hydrogen-escape-analysis.cc b/chromium/v8/src/hydrogen-escape-analysis.cc
index 961bb94e9c1..997e4f9445f 100644
--- a/chromium/v8/src/hydrogen-escape-analysis.cc
+++ b/chromium/v8/src/hydrogen-escape-analysis.cc
@@ -31,21 +31,33 @@ namespace v8 {
namespace internal {
-void HEscapeAnalysisPhase::CollectIfNoEscapingUses(HInstruction* instr) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+bool HEscapeAnalysisPhase::HasNoEscapingUses(HValue* value, int size) {
+ for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (use->HasEscapingOperandAt(it.index())) {
if (FLAG_trace_escape_analysis) {
- PrintF("#%d (%s) escapes through #%d (%s) @%d\n", instr->id(),
- instr->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+ PrintF("#%d (%s) escapes through #%d (%s) @%d\n", value->id(),
+ value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
}
- return;
+ return false;
+ }
+ if (use->HasOutOfBoundsAccess(size)) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) out of bounds at #%d (%s) @%d\n", value->id(),
+ value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+ }
+ return false;
+ }
+ int redefined_index = use->RedefinedOperandIndex();
+ if (redefined_index == it.index() && !HasNoEscapingUses(use, size)) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) escapes redefinition #%d (%s) @%d\n", value->id(),
+ value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+ }
+ return false;
}
}
- if (FLAG_trace_escape_analysis) {
- PrintF("#%d (%s) is being captured\n", instr->id(), instr->Mnemonic());
- }
- captured_.Add(instr, zone());
+ return true;
}
@@ -55,12 +67,263 @@ void HEscapeAnalysisPhase::CollectCapturedValues() {
HBasicBlock* block = graph()->blocks()->at(i);
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- if (instr->IsAllocate()) {
- CollectIfNoEscapingUses(instr);
+ if (!instr->IsAllocate()) continue;
+ HAllocate* allocate = HAllocate::cast(instr);
+ if (!allocate->size()->IsInteger32Constant()) continue;
+ int size_in_bytes = allocate->size()->GetInteger32Constant();
+ if (HasNoEscapingUses(instr, size_in_bytes)) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) is being captured\n", instr->id(),
+ instr->Mnemonic());
+ }
+ captured_.Add(instr, zone());
+ }
+ }
+ }
+}
+
+
+HCapturedObject* HEscapeAnalysisPhase::NewState(HInstruction* previous) {
+ Zone* zone = graph()->zone();
+ HCapturedObject* state =
+ new(zone) HCapturedObject(number_of_values_, number_of_objects_, zone);
+ state->InsertAfter(previous);
+ return state;
+}
+
+
+// Create a new state for replacing HAllocate instructions.
+HCapturedObject* HEscapeAnalysisPhase::NewStateForAllocation(
+ HInstruction* previous) {
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HCapturedObject* state = NewState(previous);
+ for (int index = 0; index < number_of_values_; index++) {
+ state->SetOperandAt(index, undefined);
+ }
+ return state;
+}
+
+
+// Create a new state full of phis for loop header entries.
+HCapturedObject* HEscapeAnalysisPhase::NewStateForLoopHeader(
+ HInstruction* previous,
+ HCapturedObject* old_state) {
+ HBasicBlock* block = previous->block();
+ HCapturedObject* state = NewState(previous);
+ for (int index = 0; index < number_of_values_; index++) {
+ HValue* operand = old_state->OperandAt(index);
+ HPhi* phi = NewPhiAndInsert(block, operand, index);
+ state->SetOperandAt(index, phi);
+ }
+ return state;
+}
+
+
+// Create a new state by copying an existing one.
+HCapturedObject* HEscapeAnalysisPhase::NewStateCopy(
+ HInstruction* previous,
+ HCapturedObject* old_state) {
+ HCapturedObject* state = NewState(previous);
+ for (int index = 0; index < number_of_values_; index++) {
+ HValue* operand = old_state->OperandAt(index);
+ state->SetOperandAt(index, operand);
+ }
+ return state;
+}
+
+
+// Insert a newly created phi into the given block and fill all incoming
+// edges with the given value.
+HPhi* HEscapeAnalysisPhase::NewPhiAndInsert(HBasicBlock* block,
+ HValue* incoming_value,
+ int index) {
+ Zone* zone = graph()->zone();
+ HPhi* phi = new(zone) HPhi(HPhi::kInvalidMergedIndex, zone);
+ for (int i = 0; i < block->predecessors()->length(); i++) {
+ phi->AddInput(incoming_value);
+ }
+ block->AddPhi(phi);
+ return phi;
+}
+
+
+// Insert a newly created value check as a replacement for map checks.
+HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
+ HCheckMaps* mapcheck) {
+ Zone* zone = graph()->zone();
+ HValue* value = state->map_value();
+ // TODO(mstarzinger): This will narrow a map check against a set of maps
+ // down to the first element in the set. Revisit and fix this.
+ Handle<Map> map_object = mapcheck->map_set()->first();
+ UniqueValueId map_id = mapcheck->map_unique_ids()->first();
+ HCheckValue* check = HCheckValue::New(zone, NULL, value, map_object, map_id);
+ check->InsertBefore(mapcheck);
+ return check;
+}
+
+
+// Performs a forward data-flow analysis of all loads and stores on the
+// given captured allocation. This uses a reverse post-order iteration
+// over affected basic blocks. All non-escaping instructions are handled
+// and replaced during the analysis.
+void HEscapeAnalysisPhase::AnalyzeDataFlow(HInstruction* allocate) {
+ HBasicBlock* allocate_block = allocate->block();
+ block_states_.AddBlock(NULL, graph()->blocks()->length(), zone());
+
+ // Iterate all blocks starting with the allocation block, since the
+ // allocation cannot dominate blocks that come before.
+ int start = allocate_block->block_id();
+ for (int i = start; i < graph()->blocks()->length(); i++) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ HCapturedObject* state = StateAt(block);
+
+ // Skip blocks that are not dominated by the captured allocation.
+ if (!allocate_block->Dominates(block) && allocate_block != block) continue;
+ if (FLAG_trace_escape_analysis) {
+ PrintF("Analyzing data-flow in B%d\n", block->block_id());
+ }
+
+ // Go through all instructions of the current block.
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ switch (instr->opcode()) {
+ case HValue::kAllocate: {
+ if (instr != allocate) continue;
+ state = NewStateForAllocation(allocate);
+ break;
+ }
+ case HValue::kLoadNamedField: {
+ HLoadNamedField* load = HLoadNamedField::cast(instr);
+ int index = load->access().offset() / kPointerSize;
+ if (load->object() != allocate) continue;
+ ASSERT(load->access().IsInobject());
+ HValue* replacement = state->OperandAt(index);
+ load->DeleteAndReplaceWith(replacement);
+ if (FLAG_trace_escape_analysis) {
+ PrintF("Replacing load #%d with #%d (%s)\n", instr->id(),
+ replacement->id(), replacement->Mnemonic());
+ }
+ break;
+ }
+ case HValue::kStoreNamedField: {
+ HStoreNamedField* store = HStoreNamedField::cast(instr);
+ int index = store->access().offset() / kPointerSize;
+ if (store->object() != allocate) continue;
+ ASSERT(store->access().IsInobject());
+ state = NewStateCopy(store->previous(), state);
+ state->SetOperandAt(index, store->value());
+ if (store->has_transition()) {
+ state->SetOperandAt(0, store->transition());
+ }
+ if (store->HasObservableSideEffects()) {
+ state->ReuseSideEffectsFromStore(store);
+ }
+ store->DeleteAndReplaceWith(store->ActualValue());
+ if (FLAG_trace_escape_analysis) {
+ PrintF("Replacing store #%d%s\n", instr->id(),
+ store->has_transition() ? " (with transition)" : "");
+ }
+ break;
+ }
+ case HValue::kArgumentsObject:
+ case HValue::kCapturedObject:
+ case HValue::kSimulate: {
+ for (int i = 0; i < instr->OperandCount(); i++) {
+ if (instr->OperandAt(i) != allocate) continue;
+ instr->SetOperandAt(i, state);
+ }
+ break;
+ }
+ case HValue::kCheckHeapObject: {
+ HCheckHeapObject* check = HCheckHeapObject::cast(instr);
+ if (check->value() != allocate) continue;
+ check->DeleteAndReplaceWith(check->ActualValue());
+ break;
+ }
+ case HValue::kCheckMaps: {
+ HCheckMaps* mapcheck = HCheckMaps::cast(instr);
+ if (mapcheck->value() != allocate) continue;
+ NewMapCheckAndInsert(state, mapcheck);
+ mapcheck->DeleteAndReplaceWith(mapcheck->ActualValue());
+ break;
+ }
+ default:
+ // Nothing to see here, move along ...
+ break;
}
}
+
+ // Propagate the block state forward to all successor blocks.
+ for (int i = 0; i < block->end()->SuccessorCount(); i++) {
+ HBasicBlock* succ = block->end()->SuccessorAt(i);
+ if (!allocate_block->Dominates(succ)) continue;
+ if (succ->predecessors()->length() == 1) {
+ // Case 1: This is the only predecessor, just reuse state.
+ SetStateAt(succ, state);
+ } else if (StateAt(succ) == NULL && succ->IsLoopHeader()) {
+ // Case 2: This is a state that enters a loop header, be
+ // pessimistic about loop headers, add phis for all values.
+ SetStateAt(succ, NewStateForLoopHeader(succ->first(), state));
+ } else if (StateAt(succ) == NULL) {
+ // Case 3: This is the first state propagated forward to the
+ // successor, leave a copy of the current state.
+ SetStateAt(succ, NewStateCopy(succ->first(), state));
+ } else {
+ // Case 4: This is a state that needs merging with previously
+ // propagated states, potentially introducing new phis lazily or
+ // adding values to existing phis.
+ HCapturedObject* succ_state = StateAt(succ);
+ for (int index = 0; index < number_of_values_; index++) {
+ HValue* operand = state->OperandAt(index);
+ HValue* succ_operand = succ_state->OperandAt(index);
+ if (succ_operand->IsPhi() && succ_operand->block() == succ) {
+ // Phi already exists, add operand.
+ HPhi* phi = HPhi::cast(succ_operand);
+ phi->SetOperandAt(succ->PredecessorIndexOf(block), operand);
+ } else if (succ_operand != operand) {
+ // Phi does not exist, introduce one.
+ HPhi* phi = NewPhiAndInsert(succ, succ_operand, index);
+ phi->SetOperandAt(succ->PredecessorIndexOf(block), operand);
+ succ_state->SetOperandAt(index, phi);
+ }
+ }
+ }
+ }
+ }
+
+ // All uses have been handled.
+ ASSERT(allocate->HasNoUses());
+ allocate->DeleteAndReplaceWith(NULL);
+}
+
+
+void HEscapeAnalysisPhase::PerformScalarReplacement() {
+ for (int i = 0; i < captured_.length(); i++) {
+ HAllocate* allocate = HAllocate::cast(captured_.at(i));
+
+ // Compute number of scalar values and start with clean slate.
+ int size_in_bytes = allocate->size()->GetInteger32Constant();
+ number_of_values_ = size_in_bytes / kPointerSize;
+ number_of_objects_++;
+ block_states_.Clear();
+
+ // Perform actual analysis steps.
+ AnalyzeDataFlow(allocate);
+
+ cumulative_values_ += number_of_values_;
+ ASSERT(allocate->HasNoUses());
+ ASSERT(!allocate->IsLinked());
}
}
+void HEscapeAnalysisPhase::Run() {
+ // TODO(mstarzinger): We disable escape analysis with OSR for now, because
+ // spill slots might be uninitialized. Needs investigation.
+ if (graph()->has_osr()) return;
+ CollectCapturedValues();
+ PerformScalarReplacement();
+}
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/hydrogen-escape-analysis.h b/chromium/v8/src/hydrogen-escape-analysis.h
index 6ba6e823c54..3e27cc1b48b 100644
--- a/chromium/v8/src/hydrogen-escape-analysis.h
+++ b/chromium/v8/src/hydrogen-escape-analysis.h
@@ -38,17 +38,51 @@ namespace internal {
class HEscapeAnalysisPhase : public HPhase {
public:
explicit HEscapeAnalysisPhase(HGraph* graph)
- : HPhase("H_Escape analysis", graph), captured_(0, zone()) { }
+ : HPhase("H_Escape analysis", graph),
+ captured_(0, zone()),
+ number_of_objects_(0),
+ number_of_values_(0),
+ cumulative_values_(0),
+ block_states_(graph->blocks()->length(), zone()) { }
- void Run() {
- CollectCapturedValues();
- }
+ void Run();
private:
void CollectCapturedValues();
- void CollectIfNoEscapingUses(HInstruction* instr);
+ bool HasNoEscapingUses(HValue* value, int size);
+ void PerformScalarReplacement();
+ void AnalyzeDataFlow(HInstruction* instr);
+
+ HCapturedObject* NewState(HInstruction* prev);
+ HCapturedObject* NewStateForAllocation(HInstruction* prev);
+ HCapturedObject* NewStateForLoopHeader(HInstruction* prev, HCapturedObject*);
+ HCapturedObject* NewStateCopy(HInstruction* prev, HCapturedObject* state);
+
+ HPhi* NewPhiAndInsert(HBasicBlock* block, HValue* incoming_value, int index);
+
+ HValue* NewMapCheckAndInsert(HCapturedObject* state, HCheckMaps* mapcheck);
+
+ HCapturedObject* StateAt(HBasicBlock* block) {
+ return block_states_.at(block->block_id());
+ }
+
+ void SetStateAt(HBasicBlock* block, HCapturedObject* state) {
+ block_states_.Set(block->block_id(), state);
+ }
+
+ // List of allocations captured during collection phase.
+ ZoneList<HInstruction*> captured_;
+
+ // Number of captured objects on which scalar replacement was done.
+ int number_of_objects_;
+
+ // Number of scalar values tracked during scalar replacement phase.
+ int number_of_values_;
+ int cumulative_values_;
- ZoneList<HValue*> captured_;
+ // Map of block IDs to the data-flow state at block entry during the
+ // scalar replacement phase.
+ ZoneList<HCapturedObject*> block_states_;
};
diff --git a/chromium/v8/src/hydrogen-infer-representation.cc b/chromium/v8/src/hydrogen-infer-representation.cc
index 95c341285cc..f61649a68f4 100644
--- a/chromium/v8/src/hydrogen-infer-representation.cc
+++ b/chromium/v8/src/hydrogen-infer-representation.cc
@@ -82,24 +82,36 @@ void HInferRepresentationPhase::Run() {
if (done.Contains(i)) continue;
// Check if all uses of all connected phis in this group are truncating.
- bool all_uses_everywhere_truncating = true;
+ bool all_uses_everywhere_truncating_int32 = true;
+ bool all_uses_everywhere_truncating_smi = true;
for (BitVector::Iterator it(connected_phis[i]);
!it.Done();
it.Advance()) {
int index = it.Current();
- all_uses_everywhere_truncating &=
+ all_uses_everywhere_truncating_int32 &=
phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
+ all_uses_everywhere_truncating_smi &=
+ phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToSmi);
done.Add(index);
}
- if (all_uses_everywhere_truncating) {
- continue; // Great, nothing to do.
+
+ if (!all_uses_everywhere_truncating_int32) {
+ // Clear truncation flag of this group of connected phis.
+ for (BitVector::Iterator it(connected_phis[i]);
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
+ }
}
- // Clear truncation flag of this group of connected phis.
- for (BitVector::Iterator it(connected_phis[i]);
- !it.Done();
- it.Advance()) {
- int index = it.Current();
- phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
+ if (!all_uses_everywhere_truncating_smi) {
+ // Clear truncation flag of this group of connected phis.
+ for (BitVector::Iterator it(connected_phis[i]);
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToSmi);
+ }
}
}
}
@@ -140,8 +152,8 @@ void HInferRepresentationPhase::Run() {
// Do a fixed point iteration, trying to improve representations
while (!worklist_.is_empty()) {
HValue* current = worklist_.RemoveLast();
- in_worklist_.Remove(current->id());
current->InferRepresentation(this);
+ in_worklist_.Remove(current->id());
}
// Lastly: any instruction that we don't have representation information
diff --git a/chromium/v8/src/hydrogen-instructions.cc b/chromium/v8/src/hydrogen-instructions.cc
index d2f16f46acc..833f00b1b91 100644
--- a/chromium/v8/src/hydrogen-instructions.cc
+++ b/chromium/v8/src/hydrogen-instructions.cc
@@ -149,116 +149,6 @@ void HValue::AddDependantsToWorklist(HInferRepresentationPhase* h_infer) {
}
-// This method is recursive but it is guaranteed to terminate because
-// RedefinedOperand() always dominates "this".
-bool HValue::IsRelationTrue(NumericRelation relation,
- HValue* other,
- int offset,
- int scale) {
- if (this == other) {
- return scale == 0 && relation.IsExtendable(offset);
- }
-
- // Test the direct relation.
- if (IsRelationTrueInternal(relation, other, offset, scale)) return true;
-
- // If scale is 0 try the reversed relation.
- if (scale == 0 &&
- // TODO(mmassi): do we need the full, recursive IsRelationTrue?
- other->IsRelationTrueInternal(relation.Reversed(), this, -offset)) {
- return true;
- }
-
- // Try decomposition (but do not accept scaled compounds).
- DecompositionResult decomposition;
- if (TryDecompose(&decomposition) &&
- decomposition.scale() == 0 &&
- decomposition.base()->IsRelationTrue(relation, other,
- offset + decomposition.offset(),
- scale)) {
- return true;
- }
-
- // Pass the request to the redefined value.
- HValue* redefined = RedefinedOperand();
- return redefined != NULL && redefined->IsRelationTrue(relation, other,
- offset, scale);
-}
-
-
-bool HValue::TryGuaranteeRange(HValue* upper_bound) {
- RangeEvaluationContext context = RangeEvaluationContext(this, upper_bound);
- TryGuaranteeRangeRecursive(&context);
- bool result = context.is_range_satisfied();
- if (result) {
- context.lower_bound_guarantee()->SetResponsibilityForRange(DIRECTION_LOWER);
- context.upper_bound_guarantee()->SetResponsibilityForRange(DIRECTION_UPPER);
- }
- return result;
-}
-
-
-void HValue::TryGuaranteeRangeRecursive(RangeEvaluationContext* context) {
- // Check if we already know that this value satisfies the lower bound.
- if (context->lower_bound_guarantee() == NULL) {
- if (IsRelationTrueInternal(NumericRelation::Ge(), context->lower_bound(),
- context->offset(), context->scale())) {
- context->set_lower_bound_guarantee(this);
- }
- }
-
- // Check if we already know that this value satisfies the upper bound.
- if (context->upper_bound_guarantee() == NULL) {
- if (IsRelationTrueInternal(NumericRelation::Lt(), context->upper_bound(),
- context->offset(), context->scale()) ||
- (context->scale() == 0 &&
- context->upper_bound()->IsRelationTrue(NumericRelation::Gt(),
- this, -context->offset()))) {
- context->set_upper_bound_guarantee(this);
- }
- }
-
- if (context->is_range_satisfied()) return;
-
- // See if our RedefinedOperand() satisfies the constraints.
- if (RedefinedOperand() != NULL) {
- RedefinedOperand()->TryGuaranteeRangeRecursive(context);
- }
- if (context->is_range_satisfied()) return;
-
- // See if the constraints can be satisfied by decomposition.
- DecompositionResult decomposition;
- if (TryDecompose(&decomposition)) {
- context->swap_candidate(&decomposition);
- context->candidate()->TryGuaranteeRangeRecursive(context);
- context->swap_candidate(&decomposition);
- }
- if (context->is_range_satisfied()) return;
-
- // Try to modify this to satisfy the constraint.
-
- TryGuaranteeRangeChanging(context);
-}
-
-
-RangeEvaluationContext::RangeEvaluationContext(HValue* value, HValue* upper)
- : lower_bound_(upper->block()->graph()->GetConstant0()),
- lower_bound_guarantee_(NULL),
- candidate_(value),
- upper_bound_(upper),
- upper_bound_guarantee_(NULL),
- offset_(0),
- scale_(0) {
-}
-
-
-HValue* RangeEvaluationContext::ConvertGuarantee(HValue* guarantee) {
- return guarantee->IsBoundsCheckBaseIndexInformation()
- ? HBoundsCheckBaseIndexInformation::cast(guarantee)->bounds_check()
- : guarantee;
-}
-
-
static int32_t ConvertAndSetOverflow(Representation r,
int64_t result,
bool* overflow) {
@@ -484,55 +374,6 @@ HType HType::TypeFromValue(Handle<Object> value) {
}
-bool HValue::Dominates(HValue* dominator, HValue* dominated) {
- if (dominator->block() != dominated->block()) {
- // If they are in different blocks we can use the dominance relation
- // between the blocks.
- return dominator->block()->Dominates(dominated->block());
- } else {
- // Otherwise we must see which instruction comes first, considering
- // that phis always precede regular instructions.
- if (dominator->IsInstruction()) {
- if (dominated->IsInstruction()) {
- for (HInstruction* next = HInstruction::cast(dominator)->next();
- next != NULL;
- next = next->next()) {
- if (next == dominated) return true;
- }
- return false;
- } else if (dominated->IsPhi()) {
- return false;
- } else {
- UNREACHABLE();
- }
- } else if (dominator->IsPhi()) {
- if (dominated->IsInstruction()) {
- return true;
- } else {
- // We cannot compare which phi comes first.
- UNREACHABLE();
- }
- } else {
- UNREACHABLE();
- }
- return false;
- }
-}
-
-
-bool HValue::TestDominanceUsingProcessedFlag(HValue* dominator,
- HValue* dominated) {
- if (dominator->block() != dominated->block()) {
- return dominator->block()->Dominates(dominated->block());
- } else {
- // If both arguments are in the same block we check if dominator is a phi
- // or if dominated has not already been processed: in either case we know
- // that dominator precedes dominated.
- return dominator->IsPhi() || !dominated->CheckFlag(kIDefsProcessingDone);
- }
-}
-
-
bool HValue::IsDefinedAfter(HBasicBlock* other) const {
return block()->block_id() > other->block_id();
}
@@ -547,7 +388,7 @@ HUseListNode* HUseListNode::tail() {
}
-bool HValue::CheckUsesForFlag(Flag f) {
+bool HValue::CheckUsesForFlag(Flag f) const {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
if (it.value()->IsSimulate()) continue;
if (!it.value()->CheckFlag(f)) return false;
@@ -556,7 +397,19 @@ bool HValue::CheckUsesForFlag(Flag f) {
}
-bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) {
+bool HValue::CheckUsesForFlag(Flag f, HValue** value) const {
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ if (it.value()->IsSimulate()) continue;
+ if (!it.value()->CheckFlag(f)) {
+ *value = it.value();
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const {
bool return_value = false;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
if (it.value()->IsSimulate()) continue;
@@ -960,58 +813,6 @@ void HInstruction::Verify() {
#endif
-HNumericConstraint* HNumericConstraint::AddToGraph(
- HValue* constrained_value,
- NumericRelation relation,
- HValue* related_value,
- HInstruction* insertion_point) {
- if (insertion_point == NULL) {
- if (constrained_value->IsInstruction()) {
- insertion_point = HInstruction::cast(constrained_value);
- } else if (constrained_value->IsPhi()) {
- insertion_point = constrained_value->block()->first();
- } else {
- UNREACHABLE();
- }
- }
- HNumericConstraint* result =
- new(insertion_point->block()->zone()) HNumericConstraint(
- constrained_value, relation, related_value);
- result->InsertAfter(insertion_point);
- return result;
-}
-
-
-void HNumericConstraint::PrintDataTo(StringStream* stream) {
- stream->Add("(");
- constrained_value()->PrintNameTo(stream);
- stream->Add(" %s ", relation().Mnemonic());
- related_value()->PrintNameTo(stream);
- stream->Add(")");
-}
-
-
-HInductionVariableAnnotation* HInductionVariableAnnotation::AddToGraph(
- HPhi* phi,
- NumericRelation relation,
- int operand_index) {
- HInductionVariableAnnotation* result =
- new(phi->block()->zone()) HInductionVariableAnnotation(phi, relation,
- operand_index);
- result->InsertAfter(phi->block()->first());
- return result;
-}
-
-
-void HInductionVariableAnnotation::PrintDataTo(StringStream* stream) {
- stream->Add("(");
- RedefinedOperand()->PrintNameTo(stream);
- stream->Add(" %s ", relation().Mnemonic());
- induction_base()->PrintNameTo(stream);
- stream->Add(")");
-}
-
-
void HDummyUse::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
@@ -1038,40 +839,6 @@ void HBinaryCall::PrintDataTo(StringStream* stream) {
}
-void HBoundsCheck::TryGuaranteeRangeChanging(RangeEvaluationContext* context) {
- if (context->candidate()->ActualValue() != base()->ActualValue() ||
- context->scale() < scale()) {
- return;
- }
-
- // TODO(mmassi)
- // Instead of checking for "same basic block" we should check for
- // "dominates and postdominates".
- if (context->upper_bound() == length() &&
- context->lower_bound_guarantee() != NULL &&
- context->lower_bound_guarantee() != this &&
- context->lower_bound_guarantee()->block() != block() &&
- offset() < context->offset() &&
- index_can_increase() &&
- context->upper_bound_guarantee() == NULL) {
- offset_ = context->offset();
- SetResponsibilityForRange(DIRECTION_UPPER);
- context->set_upper_bound_guarantee(this);
- isolate()->counters()->bounds_checks_eliminated()->Increment();
- } else if (context->upper_bound_guarantee() != NULL &&
- context->upper_bound_guarantee() != this &&
- context->upper_bound_guarantee()->block() != block() &&
- offset() > context->offset() &&
- index_can_decrease() &&
- context->lower_bound_guarantee() == NULL) {
- offset_ = context->offset();
- SetResponsibilityForRange(DIRECTION_LOWER);
- context->set_lower_bound_guarantee(this);
- isolate()->counters()->bounds_checks_eliminated()->Increment();
- }
-}
-
-
void HBoundsCheck::ApplyIndexChange() {
if (skip_check()) return;
@@ -1119,40 +886,6 @@ void HBoundsCheck::ApplyIndexChange() {
base_ = NULL;
offset_ = 0;
scale_ = 0;
- responsibility_direction_ = DIRECTION_NONE;
-}
-
-
-void HBoundsCheck::AddInformativeDefinitions() {
- // TODO(mmassi): Executing this code during AddInformativeDefinitions
- // is a hack. Move it to some other HPhase.
- if (FLAG_array_bounds_checks_elimination) {
- if (index()->TryGuaranteeRange(length())) {
- set_skip_check();
- }
- if (DetectCompoundIndex()) {
- HBoundsCheckBaseIndexInformation* base_index_info =
- new(block()->graph()->zone())
- HBoundsCheckBaseIndexInformation(this);
- base_index_info->InsertAfter(this);
- }
- }
-}
-
-
-bool HBoundsCheck::IsRelationTrueInternal(NumericRelation relation,
- HValue* related_value,
- int offset,
- int scale) {
- if (related_value == length()) {
- // A HBoundsCheck is smaller than the length it compared against.
- return NumericRelation::Lt().CompoundImplies(relation, 0, 0, offset, scale);
- } else if (related_value == block()->graph()->GetConstant0()) {
- // A HBoundsCheck is greater than or equal to zero.
- return NumericRelation::Ge().CompoundImplies(relation, 0, 0, offset, scale);
- } else {
- return false;
- }
}
@@ -1195,25 +928,6 @@ void HBoundsCheck::InferRepresentation(HInferRepresentationPhase* h_infer) {
}
-bool HBoundsCheckBaseIndexInformation::IsRelationTrueInternal(
- NumericRelation relation,
- HValue* related_value,
- int offset,
- int scale) {
- if (related_value == bounds_check()->length()) {
- return NumericRelation::Lt().CompoundImplies(
- relation,
- bounds_check()->offset(), bounds_check()->scale(), offset, scale);
- } else if (related_value == block()->graph()->GetConstant0()) {
- return NumericRelation::Ge().CompoundImplies(
- relation,
- bounds_check()->offset(), bounds_check()->scale(), offset, scale);
- } else {
- return false;
- }
-}
-
-
void HBoundsCheckBaseIndexInformation::PrintDataTo(StringStream* stream) {
stream->Add("base: ");
base_index()->PrintNameTo(stream);
@@ -1453,6 +1167,29 @@ void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
}
+static bool MatchLeftIsOnes(HValue* l, HValue* r, HValue** negated) {
+ if (!l->EqualsInteger32Constant(~0)) return false;
+ *negated = r;
+ return true;
+}
+
+
+static bool MatchNegationViaXor(HValue* instr, HValue** negated) {
+ if (!instr->IsBitwise()) return false;
+ HBitwise* b = HBitwise::cast(instr);
+ return (b->op() == Token::BIT_XOR) &&
+ (MatchLeftIsOnes(b->left(), b->right(), negated) ||
+ MatchLeftIsOnes(b->right(), b->left(), negated));
+}
+
+
+static bool MatchDoubleNegation(HValue* instr, HValue** arg) {
+ HValue* negated;
+ return MatchNegationViaXor(instr, &negated) &&
+ MatchNegationViaXor(negated, arg);
+}
+
+
HValue* HBitwise::Canonicalize() {
if (!representation().IsSmiOrInteger32()) return this;
// If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
@@ -1465,18 +1202,10 @@ HValue* HBitwise::Canonicalize() {
!left()->CheckFlag(kUint32)) {
return left();
}
- return this;
-}
-
-
-HValue* HBitNot::Canonicalize() {
- // Optimize ~~x, a common pattern used for ToInt32(x).
- if (value()->IsBitNot()) {
- HValue* result = HBitNot::cast(value())->value();
- ASSERT(result->representation().IsInteger32());
- if (!result->CheckFlag(kUint32)) {
- return result;
- }
+ // Optimize double negation, a common pattern used for ToInt32(x).
+ HValue* arg;
+ if (MatchDoubleNegation(this, &arg) && !arg->CheckFlag(kUint32)) {
+ return arg;
}
return this;
}
@@ -1489,8 +1218,15 @@ static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
HValue* HAdd::Canonicalize() {
- if (IsIdentityOperation(left(), right(), 0)) return left();
- if (IsIdentityOperation(right(), left(), 0)) return right();
+ // Adding 0 is an identity operation except in case of -0: -0 + 0 = +0
+ if (IsIdentityOperation(left(), right(), 0) &&
+ !left()->representation().IsDouble()) { // Left could be -0.
+ return left();
+ }
+ if (IsIdentityOperation(right(), left(), 0) &&
+ !left()->representation().IsDouble()) { // Right could be -0.
+ return right();
+ }
return this;
}
@@ -1514,6 +1250,7 @@ HValue* HMod::Canonicalize() {
HValue* HDiv::Canonicalize() {
+ if (IsIdentityOperation(left(), right(), 1)) return left();
return this;
}
@@ -1568,16 +1305,16 @@ static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
HValue* HUnaryMathOperation::Canonicalize() {
- if (op() == kMathRound) {
+ if (op() == kMathRound || op() == kMathFloor) {
HValue* val = value();
if (val->IsChange()) val = HChange::cast(val)->value();
- // If the input is integer32 then we replace the round instruction
- // with its input.
+ // If the input is smi or integer32 then we replace the instruction with its
+ // input.
if (val->representation().IsSmiOrInteger32()) {
if (!val->representation().Equals(representation())) {
HChange* result = new(block()->zone()) HChange(
- val, representation(), false, false, false);
+ val, representation(), false, false);
result->InsertBefore(this);
return result;
}
@@ -1588,19 +1325,6 @@ HValue* HUnaryMathOperation::Canonicalize() {
if (op() == kMathFloor) {
HValue* val = value();
if (val->IsChange()) val = HChange::cast(val)->value();
-
- // If the input is integer32 then we replace the floor instruction
- // with its input.
- if (val->representation().IsSmiOrInteger32()) {
- if (!val->representation().Equals(representation())) {
- HChange* result = new(block()->zone()) HChange(
- val, representation(), false, false, false);
- result->InsertBefore(this);
- return result;
- }
- return val;
- }
-
if (val->IsDiv() && (val->UseCount() == 1)) {
HDiv* hdiv = HDiv::cast(val);
HValue* left = hdiv->left();
@@ -1610,7 +1334,7 @@ HValue* HUnaryMathOperation::Canonicalize() {
if (new_left == NULL &&
hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
new_left = new(block()->zone()) HChange(
- left, Representation::Integer32(), false, false, false);
+ left, Representation::Integer32(), false, false);
HChange::cast(new_left)->InsertBefore(this);
}
HValue* new_right =
@@ -1621,7 +1345,7 @@ HValue* HUnaryMathOperation::Canonicalize() {
#endif
hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
new_right = new(block()->zone()) HChange(
- right, Representation::Integer32(), false, false, false);
+ right, Representation::Integer32(), false, false);
HChange::cast(new_right)->InsertBefore(this);
}
@@ -1712,10 +1436,10 @@ void HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect,
// for which the map is known.
if (HasNoUses() && dominator->IsStoreNamedField()) {
HStoreNamedField* store = HStoreNamedField::cast(dominator);
- UniqueValueId map_unique_id = store->transition_unique_id();
- if (!map_unique_id.IsInitialized() || store->object() != value()) return;
+ if (!store->has_transition() || store->object() != value()) return;
+ HConstant* transition = HConstant::cast(store->transition());
for (int i = 0; i < map_set()->length(); i++) {
- if (map_unique_id == map_unique_ids_.at(i)) {
+ if (transition->UniqueValueIdsMatch(map_unique_ids_.at(i))) {
DeleteAndReplaceWith(NULL);
return;
}
@@ -1734,15 +1458,16 @@ void HCheckMaps::PrintDataTo(StringStream* stream) {
}
-void HCheckFunction::PrintDataTo(StringStream* stream) {
+void HCheckValue::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" %p", *target());
+ stream->Add(" ");
+ object()->ShortPrint(stream);
}
-HValue* HCheckFunction::Canonicalize() {
+HValue* HCheckValue::Canonicalize() {
return (value()->IsConstant() &&
- HConstant::cast(value())->UniqueValueIdsMatch(target_unique_id_))
+ HConstant::cast(value())->UniqueValueIdsMatch(object_unique_id_))
? NULL
: this;
}
@@ -1766,13 +1491,6 @@ void HCheckInstanceType::PrintDataTo(StringStream* stream) {
}
-void HCheckPrototypeMaps::PrintDataTo(StringStream* stream) {
- stream->Add("[receiver_prototype=%p,holder=%p]%s",
- *prototypes_.first(), *prototypes_.last(),
- CanOmitPrototypeChecks() ? " (omitted)" : "");
-}
-
-
void HCallStub::PrintDataTo(StringStream* stream) {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
@@ -1780,6 +1498,15 @@ void HCallStub::PrintDataTo(StringStream* stream) {
}
+void HUnknownOSRValue::PrintDataTo(StringStream *stream) {
+ const char* type = "expression";
+ if (environment_->is_local_index(index_)) type = "local";
+ if (environment_->is_special_index(index_)) type = "special";
+ if (environment_->is_parameter_index(index_)) type = "parameter";
+ stream->Add("%s @ %d", type, index_);
+}
+
+
void HInstanceOf::PrintDataTo(StringStream* stream) {
left()->PrintNameTo(stream);
stream->Add(" ");
@@ -1975,60 +1702,6 @@ Range* HMod::InferRange(Zone* zone) {
}
-void HPhi::AddInformativeDefinitions() {
- if (OperandCount() == 2) {
- // If one of the operands is an OSR block give up (this cannot be an
- // induction variable).
- if (OperandAt(0)->block()->is_osr_entry() ||
- OperandAt(1)->block()->is_osr_entry()) return;
-
- for (int operand_index = 0; operand_index < 2; operand_index++) {
- int other_operand_index = (operand_index + 1) % 2;
-
- static NumericRelation relations[] = {
- NumericRelation::Ge(),
- NumericRelation::Le()
- };
-
- // Check if this phi is an induction variable. If, e.g., we know that
- // its first input is greater than the phi itself, then that must be
- // the back edge, and the phi is always greater than its second input.
- for (int relation_index = 0; relation_index < 2; relation_index++) {
- if (OperandAt(operand_index)->IsRelationTrue(relations[relation_index],
- this)) {
- HInductionVariableAnnotation::AddToGraph(this,
- relations[relation_index],
- other_operand_index);
- }
- }
- }
- }
-}
-
-
-bool HPhi::IsRelationTrueInternal(NumericRelation relation,
- HValue* other,
- int offset,
- int scale) {
- if (CheckFlag(kNumericConstraintEvaluationInProgress)) return false;
-
- SetFlag(kNumericConstraintEvaluationInProgress);
- bool result = true;
- for (int i = 0; i < OperandCount(); i++) {
- // Skip OSR entry blocks
- if (OperandAt(i)->block()->is_osr_entry()) continue;
-
- if (!OperandAt(i)->IsRelationTrue(relation, other, offset, scale)) {
- result = false;
- break;
- }
- }
- ClearFlag(kNumericConstraintEvaluationInProgress);
-
- return result;
-}
-
-
InductionVariableData* InductionVariableData::ExaminePhi(HPhi* phi) {
if (phi->block()->loop_information() == NULL) return NULL;
if (phi->OperandCount() != 2) return NULL;
@@ -2646,6 +2319,38 @@ void HSimulate::PrintDataTo(StringStream* stream) {
}
+void HSimulate::ReplayEnvironment(HEnvironment* env) {
+ ASSERT(env != NULL);
+ env->set_ast_id(ast_id());
+ env->Drop(pop_count());
+ for (int i = values()->length() - 1; i >= 0; --i) {
+ HValue* value = values()->at(i);
+ if (HasAssignedIndexAt(i)) {
+ env->Bind(GetAssignedIndexAt(i), value);
+ } else {
+ env->Push(value);
+ }
+ }
+}
+
+
+// Replay captured objects by replacing all captured objects with the
+// same capture id in the current and all outer environments.
+void HCapturedObject::ReplayEnvironment(HEnvironment* env) {
+ ASSERT(env != NULL);
+ while (env != NULL) {
+ for (int i = 0; i < env->length(); ++i) {
+ HValue* value = env->values()->at(i);
+ if (value->IsCapturedObject() &&
+ HCapturedObject::cast(value)->capture_id() == this->capture_id()) {
+ env->SetValueAt(i, this);
+ }
+ }
+ env = env->outer();
+ }
+}
+
+
void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
Zone* zone) {
ASSERT(return_target->IsInlineReturnTarget());
@@ -2723,6 +2428,24 @@ HConstant::HConstant(Handle<Object> handle,
}
+HConstant::HConstant(Handle<Map> handle,
+ UniqueValueId unique_id)
+ : HTemplateInstruction<0>(HType::Tagged()),
+ handle_(handle),
+ unique_id_(unique_id),
+ has_smi_value_(false),
+ has_int32_value_(false),
+ has_double_value_(false),
+ has_external_reference_value_(false),
+ is_internalized_string_(false),
+ is_not_in_new_space_(true),
+ is_cell_(false),
+ boolean_value_(false) {
+ ASSERT(!handle.is_null());
+ Initialize(Representation::Tagged());
+}
+
+
HConstant::HConstant(int32_t integer_value,
Representation r,
bool is_not_in_new_space,
@@ -2780,9 +2503,17 @@ HConstant::HConstant(ExternalReference reference)
}
+static void PrepareConstant(Handle<Object> object) {
+ if (!object->IsJSObject()) return;
+ Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+ if (!js_object->map()->is_deprecated()) return;
+ JSObject::TryMigrateInstance(js_object);
+}
+
+
void HConstant::Initialize(Representation r) {
if (r.IsNone()) {
- if (has_smi_value_ && kSmiValueSize == 31) {
+ if (has_smi_value_ && SmiValuesAre31Bits()) {
r = Representation::Smi();
} else if (has_int32_value_) {
r = Representation::Integer32();
@@ -2791,6 +2522,7 @@ void HConstant::Initialize(Representation r) {
} else if (has_external_reference_value_) {
r = Representation::External();
} else {
+ PrepareConstant(handle_);
r = Representation::Tagged();
}
}
@@ -2859,12 +2591,13 @@ Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) {
Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Zone* zone) {
HConstant* res = NULL;
- if (handle()->IsBoolean()) {
- res = handle()->BooleanValue() ?
+ Handle<Object> handle = this->handle(zone->isolate());
+ if (handle->IsBoolean()) {
+ res = handle->BooleanValue() ?
new(zone) HConstant(1) : new(zone) HConstant(0);
- } else if (handle()->IsUndefined()) {
+ } else if (handle->IsUndefined()) {
res = new(zone) HConstant(OS::nan_value());
- } else if (handle()->IsNull()) {
+ } else if (handle->IsNull()) {
res = new(zone) HConstant(0);
}
return Maybe<HConstant*>(res != NULL, res);
@@ -2880,7 +2613,7 @@ void HConstant::PrintDataTo(StringStream* stream) {
stream->Add("%p ", reinterpret_cast<void*>(
external_reference_value_.address()));
} else {
- handle()->ShortPrint(stream);
+ handle(Isolate::Current())->ShortPrint(stream);
}
}
@@ -3115,16 +2848,6 @@ void HStringCompareAndBranch::PrintDataTo(StringStream* stream) {
}
-void HCompareNumericAndBranch::AddInformativeDefinitions() {
- NumericRelation r = NumericRelation::FromToken(token());
- if (r.IsNone()) return;
-
- HNumericConstraint::AddToGraph(left(), r, right(), SuccessorAt(0)->first());
- HNumericConstraint::AddToGraph(
- left(), r.Negated(), right(), SuccessorAt(1)->first());
-}
-
-
void HCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));
stream->Add(" ");
@@ -3143,6 +2866,18 @@ void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
}
+void HCompareHoleAndBranch::PrintDataTo(StringStream* stream) {
+ object()->PrintNameTo(stream);
+ HControlInstruction::PrintDataTo(stream);
+}
+
+
+void HCompareHoleAndBranch::InferRepresentation(
+ HInferRepresentationPhase* h_infer) {
+ ChangeRepresentation(object()->representation());
+}
+
+
void HGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", SuccessorAt(0)->block_id());
}
@@ -3195,123 +2930,6 @@ void HParameter::PrintDataTo(StringStream* stream) {
void HLoadNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
access_.PrintTo(stream);
- if (HasTypeCheck()) {
- stream->Add(" ");
- typecheck()->PrintNameTo(stream);
- }
-}
-
-
-// Returns true if an instance of this map can never find a property with this
-// name in its prototype chain. This means all prototypes up to the top are
-// fast and don't have the name in them. It would be good if we could optimize
-// polymorphic loads where the property is sometimes found in the prototype
-// chain.
-static bool PrototypeChainCanNeverResolve(
- Handle<Map> map, Handle<String> name) {
- Isolate* isolate = map->GetIsolate();
- Object* current = map->prototype();
- while (current != isolate->heap()->null_value()) {
- if (current->IsJSGlobalProxy() ||
- current->IsGlobalObject() ||
- !current->IsJSObject() ||
- JSObject::cast(current)->map()->has_named_interceptor() ||
- JSObject::cast(current)->IsAccessCheckNeeded() ||
- !JSObject::cast(current)->HasFastProperties()) {
- return false;
- }
-
- LookupResult lookup(isolate);
- Map* map = JSObject::cast(current)->map();
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsFound()) return false;
- if (!lookup.IsCacheable()) return false;
- current = JSObject::cast(current)->GetPrototype();
- }
- return true;
-}
-
-
-HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
- HValue* object,
- SmallMapList* types,
- Handle<String> name,
- Zone* zone)
- : types_(Min(types->length(), kMaxLoadPolymorphism), zone),
- name_(name),
- types_unique_ids_(0, zone),
- name_unique_id_(),
- need_generic_(false) {
- SetOperandAt(0, context);
- SetOperandAt(1, object);
- set_representation(Representation::Tagged());
- SetGVNFlag(kDependsOnMaps);
- SmallMapList negative_lookups;
- for (int i = 0;
- i < types->length() && types_.length() < kMaxLoadPolymorphism;
- ++i) {
- Handle<Map> map = types->at(i);
- // Deprecated maps are updated to the current map in the type oracle.
- ASSERT(!map->is_deprecated());
- LookupResult lookup(map->GetIsolate());
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsFound()) {
- switch (lookup.type()) {
- case FIELD: {
- int index = lookup.GetLocalFieldIndexFromMap(*map);
- if (index < 0) {
- SetGVNFlag(kDependsOnInobjectFields);
- } else {
- SetGVNFlag(kDependsOnBackingStoreFields);
- }
- if (FLAG_track_double_fields &&
- lookup.representation().IsDouble()) {
- // Since the value needs to be boxed, use a generic handler for
- // loading doubles.
- continue;
- }
- types_.Add(types->at(i), zone);
- break;
- }
- case CONSTANT:
- types_.Add(types->at(i), zone);
- break;
- case CALLBACKS:
- break;
- case TRANSITION:
- case INTERCEPTOR:
- case NONEXISTENT:
- case NORMAL:
- case HANDLER:
- UNREACHABLE();
- break;
- }
- } else if (lookup.IsCacheable() &&
- // For dicts the lookup on the map will fail, but the object may
- // contain the property so we cannot generate a negative lookup
- // (which would just be a map check and return undefined).
- !map->is_dictionary_map() &&
- !map->has_named_interceptor() &&
- PrototypeChainCanNeverResolve(map, name)) {
- negative_lookups.Add(types->at(i), zone);
- }
- }
-
- bool need_generic =
- (types->length() != negative_lookups.length() + types_.length());
- if (!need_generic && FLAG_deoptimize_uncommon_cases) {
- SetFlag(kUseGVN);
- for (int i = 0; i < negative_lookups.length(); i++) {
- types_.Add(negative_lookups.at(i), zone);
- }
- } else {
- // We don't have an easy way to handle both a call (to the generic stub) and
- // a deopt in the same hydrogen instruction, so in this case we don't add
- // the negative lookups which can deopt - just let the generic stub handle
- // them.
- SetAllSideEffects();
- need_generic_ = true;
- }
}
@@ -3322,10 +2940,10 @@ HCheckMaps* HCheckMaps::New(Zone* zone,
CompilationInfo* info,
HValue* typecheck) {
HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
- check_map->map_set_.Add(map, zone);
+ check_map->Add(map, zone);
if (map->CanOmitMapChecks() &&
value->IsConstant() &&
- HConstant::cast(value)->InstanceOf(map)) {
+ HConstant::cast(value)->HasMap(map)) {
check_map->omit(info);
}
return check_map;
@@ -3342,46 +2960,6 @@ void HCheckMaps::FinalizeUniqueValueId() {
}
-void HLoadNamedFieldPolymorphic::FinalizeUniqueValueId() {
- if (!types_unique_ids_.is_empty()) return;
- Zone* zone = block()->zone();
- types_unique_ids_.Initialize(types_.length(), zone);
- for (int i = 0; i < types_.length(); i++) {
- types_unique_ids_.Add(UniqueValueId(types_.at(i)), zone);
- }
- name_unique_id_ = UniqueValueId(name_);
-}
-
-
-bool HLoadNamedFieldPolymorphic::DataEquals(HValue* value) {
- ASSERT_EQ(types_.length(), types_unique_ids_.length());
- HLoadNamedFieldPolymorphic* other = HLoadNamedFieldPolymorphic::cast(value);
- if (name_unique_id_ != other->name_unique_id_) return false;
- if (types_unique_ids_.length() != other->types_unique_ids_.length()) {
- return false;
- }
- if (need_generic_ != other->need_generic_) return false;
- for (int i = 0; i < types_unique_ids_.length(); i++) {
- bool found = false;
- for (int j = 0; j < types_unique_ids_.length(); j++) {
- if (types_unique_ids_.at(j) == other->types_unique_ids_.at(i)) {
- found = true;
- break;
- }
- }
- if (!found) return false;
- }
- return true;
-}
-
-
-void HLoadNamedFieldPolymorphic::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
-}
-
-
void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
@@ -3454,18 +3032,8 @@ bool HLoadKeyed::UsesMustHandleHole() const {
bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const {
- if (!IsFastDoubleElementsKind(elements_kind())) {
- return false;
- }
-
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (!use->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
- return false;
- }
- }
-
- return true;
+ return IsFastDoubleElementsKind(elements_kind()) &&
+ CheckUsesForFlag(HValue::kAllowUndefinedAsNaN);
}
@@ -3547,8 +3115,8 @@ void HStoreNamedField::PrintDataTo(StringStream* stream) {
if (NeedsWriteBarrier()) {
stream->Add(" (write-barrier)");
}
- if (!transition().is_null()) {
- stream->Add(" (transition map %p)", *transition());
+ if (has_transition()) {
+ stream->Add(" (transition map %p)", *transition_map());
}
}
@@ -3594,6 +3162,7 @@ void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
ElementsAccessor::ForKind(from_kind)->name(),
*transitioned_map(),
ElementsAccessor::ForKind(to_kind)->name());
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) stream->Add(" (simple)");
}
@@ -3684,8 +3253,6 @@ Representation HUnaryMathOperation::RepresentationFromInputs() {
Representation input_rep = value()->representation();
if (!input_rep.IsTagged()) {
rep = rep.generalize(input_rep);
- } else if (flexible_int()) {
- rep = Representation::Integer32();
}
return rep;
}
@@ -3694,6 +3261,7 @@ Representation HUnaryMathOperation::RepresentationFromInputs() {
void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
ASSERT(side_effect == kChangesNewSpacePromotion);
+ Zone* zone = block()->zone();
if (!FLAG_use_allocation_folding) return;
// Try to fold allocations together with their dominating allocations.
@@ -3705,31 +3273,44 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
return;
}
- HAllocate* dominator_allocate_instr = HAllocate::cast(dominator);
- HValue* dominator_size = dominator_allocate_instr->size();
+ HAllocate* dominator_allocate = HAllocate::cast(dominator);
+ HValue* dominator_size = dominator_allocate->size();
HValue* current_size = size();
- // We can just fold allocations that are guaranteed in new space.
+
// TODO(hpayer): Add support for non-constant allocation in dominator.
- if (!IsNewSpaceAllocation() || !current_size->IsInteger32Constant() ||
- !dominator_allocate_instr->IsNewSpaceAllocation() ||
+ if (!current_size->IsInteger32Constant() ||
!dominator_size->IsInteger32Constant()) {
if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s)\n",
+ PrintF("#%d (%s) cannot fold into #%d (%s), dynamic allocation size\n",
id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
}
return;
}
+ dominator_allocate = GetFoldableDominator(dominator_allocate);
+ if (dominator_allocate == NULL) {
+ return;
+ }
+
+ ASSERT((IsNewSpaceAllocation() &&
+ dominator_allocate->IsNewSpaceAllocation()) ||
+ (IsOldDataSpaceAllocation() &&
+ dominator_allocate->IsOldDataSpaceAllocation()) ||
+ (IsOldPointerSpaceAllocation() &&
+ dominator_allocate->IsOldPointerSpaceAllocation()));
+
// First update the size of the dominator allocate instruction.
- int32_t dominator_size_constant =
+ dominator_size = dominator_allocate->size();
+ int32_t original_object_size =
HConstant::cast(dominator_size)->GetInteger32Constant();
+ int32_t dominator_size_constant = original_object_size;
int32_t current_size_constant =
HConstant::cast(current_size)->GetInteger32Constant();
int32_t new_dominator_size = dominator_size_constant + current_size_constant;
if (MustAllocateDoubleAligned()) {
- if (!dominator_allocate_instr->MustAllocateDoubleAligned()) {
- dominator_allocate_instr->MakeDoubleAligned();
+ if (!dominator_allocate->MustAllocateDoubleAligned()) {
+ dominator_allocate->MakeDoubleAligned();
}
if ((dominator_size_constant & kDoubleAlignmentMask) != 0) {
dominator_size_constant += kDoubleSize / 2;
@@ -3740,36 +3321,180 @@ void HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
if (new_dominator_size > Page::kMaxNonCodeHeapObjectSize) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
- id(), Mnemonic(), dominator->id(), dominator->Mnemonic(),
- new_dominator_size);
+ id(), Mnemonic(), dominator_allocate->id(),
+ dominator_allocate->Mnemonic(), new_dominator_size);
}
return;
}
- HBasicBlock* block = dominator->block();
- Zone* zone = block->zone();
- HInstruction* new_dominator_size_constant =
- HConstant::New(zone, context(), new_dominator_size);
- new_dominator_size_constant->InsertBefore(dominator_allocate_instr);
- dominator_allocate_instr->UpdateSize(new_dominator_size_constant);
+
+ HInstruction* new_dominator_size_constant = HConstant::CreateAndInsertBefore(
+ zone,
+ context(),
+ new_dominator_size,
+ Representation::None(),
+ dominator_allocate);
+ dominator_allocate->UpdateSize(new_dominator_size_constant);
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- dominator_allocate_instr->MakePrefillWithFiller();
+ if (FLAG_verify_heap && dominator_allocate->IsNewSpaceAllocation()) {
+ dominator_allocate->MakePrefillWithFiller();
+ } else {
+ // TODO(hpayer): This is a short-term hack to make allocation mementos
+ // work again in new space.
+ dominator_allocate->ClearNextMapWord(original_object_size);
}
+#else
+ // TODO(hpayer): This is a short-term hack to make allocation mementos
+ // work again in new space.
+ dominator_allocate->ClearNextMapWord(original_object_size);
#endif
+ dominator_allocate->clear_next_map_word_ = clear_next_map_word_;
+
// After that replace the dominated allocate instruction.
HInstruction* dominated_allocate_instr =
HInnerAllocatedObject::New(zone,
context(),
- dominator_allocate_instr,
+ dominator_allocate,
dominator_size_constant,
type());
dominated_allocate_instr->InsertBefore(this);
DeleteAndReplaceWith(dominated_allocate_instr);
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) folded into #%d (%s)\n",
- id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ id(), Mnemonic(), dominator_allocate->id(),
+ dominator_allocate->Mnemonic());
+ }
+}
+
+
+HAllocate* HAllocate::GetFoldableDominator(HAllocate* dominator) {
+ if (!IsFoldable(dominator)) {
+ // We cannot hoist old space allocations over new space allocations.
+ if (IsNewSpaceAllocation() || dominator->IsNewSpaceAllocation()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), new space hoisting\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return NULL;
+ }
+
+ HAllocate* dominator_dominator = dominator->dominating_allocate_;
+
+ // We can hoist old data space allocations over an old pointer space
+ // allocation and vice versa. For that we have to check the dominator
+ // of the dominator allocate instruction.
+ if (dominator_dominator == NULL) {
+ dominating_allocate_ = dominator;
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), different spaces\n",
+ id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ }
+ return NULL;
+ }
+
+ // We can just fold old space allocations that are in the same basic block,
+ // since it is not guaranteed that we fill up the whole allocated old
+ // space memory.
+ // TODO(hpayer): Remove this limitation and add filler maps for each each
+ // allocation as soon as we have store elimination.
+ if (block()->block_id() != dominator_dominator->block()->block_id()) {
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) cannot fold into #%d (%s), different basic blocks\n",
+ id(), Mnemonic(), dominator_dominator->id(),
+ dominator_dominator->Mnemonic());
+ }
+ return NULL;
+ }
+
+ ASSERT((IsOldDataSpaceAllocation() &&
+ dominator_dominator->IsOldDataSpaceAllocation()) ||
+ (IsOldPointerSpaceAllocation() &&
+ dominator_dominator->IsOldPointerSpaceAllocation()));
+
+ int32_t current_size = HConstant::cast(size())->GetInteger32Constant();
+ HStoreNamedField* dominator_free_space_size =
+ dominator->filler_free_space_size_;
+ if (dominator_free_space_size != NULL) {
+ // We already hoisted one old space allocation, i.e., we already installed
+ // a filler map. Hence, we just have to update the free space size.
+ dominator->UpdateFreeSpaceFiller(current_size);
+ } else {
+ // This is the first old space allocation that gets hoisted. We have to
+ // install a filler map since the follwing allocation may cause a GC.
+ dominator->CreateFreeSpaceFiller(current_size);
+ }
+
+ // We can hoist the old space allocation over the actual dominator.
+ return dominator_dominator;
+ }
+ return dominator;
+}
+
+
+void HAllocate::UpdateFreeSpaceFiller(int32_t free_space_size) {
+ ASSERT(filler_free_space_size_ != NULL);
+ Zone* zone = block()->zone();
+ // We must explicitly force Smi representation here because on x64 we
+ // would otherwise automatically choose int32, but the actual store
+ // requires a Smi-tagged value.
+ HConstant* new_free_space_size = HConstant::CreateAndInsertBefore(
+ zone,
+ context(),
+ filler_free_space_size_->value()->GetInteger32Constant() +
+ free_space_size,
+ Representation::Smi(),
+ filler_free_space_size_);
+ filler_free_space_size_->UpdateValue(new_free_space_size);
+}
+
+
+void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
+ ASSERT(filler_free_space_size_ == NULL);
+ Zone* zone = block()->zone();
+ int32_t dominator_size =
+ HConstant::cast(dominating_allocate_->size())->GetInteger32Constant();
+ HInstruction* free_space_instr =
+ HInnerAllocatedObject::New(zone, context(), dominating_allocate_,
+ dominator_size, type());
+ free_space_instr->InsertBefore(this);
+ HConstant* filler_map = HConstant::New(
+ zone,
+ context(),
+ isolate()->factory()->free_space_map(),
+ UniqueValueId::free_space_map(isolate()->heap()));
+ filler_map->InsertAfter(free_space_instr);
+ HInstruction* store_map = HStoreNamedField::New(zone, context(),
+ free_space_instr, HObjectAccess::ForMap(), filler_map);
+ store_map->SetFlag(HValue::kHasNoObservableSideEffects);
+ store_map->InsertAfter(filler_map);
+
+ // We must explicitly force Smi representation here because on x64 we
+ // would otherwise automatically choose int32, but the actual store
+ // requires a Smi-tagged value.
+ HConstant* filler_size = HConstant::CreateAndInsertAfter(
+ zone, context(), free_space_size, Representation::Smi(), store_map);
+ // Must force Smi representation for x64 (see comment above).
+ HObjectAccess access =
+ HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset,
+ Representation::Smi());
+ HStoreNamedField* store_size = HStoreNamedField::New(zone, context(),
+ free_space_instr, access, filler_size);
+ store_size->SetFlag(HValue::kHasNoObservableSideEffects);
+ store_size->InsertAfter(filler_size);
+ filler_free_space_size_ = store_size;
+}
+
+
+void HAllocate::ClearNextMapWord(int offset) {
+ if (clear_next_map_word_) {
+ Zone* zone = block()->zone();
+ HObjectAccess access = HObjectAccess::ForJSObjectOffset(offset);
+ HStoreNamedField* clear_next_map =
+ HStoreNamedField::New(zone, context(), this, access,
+ block()->graph()->GetConstantNull());
+ clear_next_map->ClearAllSideEffects();
+ clear_next_map->InsertAfter(this);
}
}
@@ -3960,7 +3685,7 @@ HInstruction* HStringCharFromCode::New(
Zone* zone, HValue* context, HValue* char_code) {
if (FLAG_fold_constants && char_code->IsConstant()) {
HConstant* c_code = HConstant::cast(char_code);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = zone->isolate();
if (c_code->HasNumberValue()) {
if (std::isfinite(c_code->DoubleValue())) {
uint32_t code = c_code->NumberValueAsInteger32() & 0xffff;
@@ -4242,10 +3967,10 @@ void HPhi::SimplifyConstantInputs() {
DoubleToInt32(operand->DoubleValue()));
integer_input->InsertAfter(operand);
SetOperandAt(i, integer_input);
- } else if (operand == graph->GetConstantTrue()) {
- SetOperandAt(i, graph->GetConstant1());
- } else {
- // This catches |false|, |undefined|, strings and objects.
+ } else if (operand->HasBooleanValue()) {
+ SetOperandAt(i, operand->BooleanValue() ? graph->GetConstant1()
+ : graph->GetConstant0());
+ } else if (operand->ImmortalImmovable()) {
SetOperandAt(i, graph->GetConstant0());
}
}
@@ -4285,6 +4010,9 @@ Representation HPhi::RepresentationFromInputs() {
Representation HValue::RepresentationFromUseRequirements() {
Representation rep = Representation::None();
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ // Ignore the use requirement from never run code
+ if (it.value()->block()->IsDeoptimizing()) continue;
+
// We check for observed_input_representation elsewhere.
Representation use_rep =
it.value()->RequiredInputRepresentation(it.index());
@@ -4345,7 +4073,7 @@ void HCheckHeapObject::Verify() {
}
-void HCheckFunction::Verify() {
+void HCheckValue::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
@@ -4375,6 +4103,15 @@ HObjectAccess HObjectAccess::ForJSObjectOffset(int offset,
}
+HObjectAccess HObjectAccess::ForContextSlot(int index) {
+ ASSERT(index >= 0);
+ Portion portion = kInobject;
+ int offset = Context::kHeaderSize + index * kPointerSize;
+ ASSERT_EQ(offset, Context::SlotOffset(index) + kHeapObjectTag);
+ return HObjectAccess(portion, offset, Representation::Tagged());
+}
+
+
HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
ASSERT(offset >= 0);
Portion portion = kInobject;
diff --git a/chromium/v8/src/hydrogen-instructions.h b/chromium/v8/src/hydrogen-instructions.h
index 78e9a6b192a..7c4c921a851 100644
--- a/chromium/v8/src/hydrogen-instructions.h
+++ b/chromium/v8/src/hydrogen-instructions.h
@@ -49,11 +49,11 @@ class HEnvironment;
class HInferRepresentationPhase;
class HInstruction;
class HLoopInformation;
+class HStoreNamedField;
class HValue;
class LInstruction;
class LChunkBuilder;
-
#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
V(ArithmeticBinaryOperation) \
V(BinaryOperation) \
@@ -63,7 +63,6 @@ class LChunkBuilder;
#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
- V(AbnormalExit) \
V(AccessArgumentsAt) \
V(Add) \
V(Allocate) \
@@ -72,7 +71,6 @@ class LChunkBuilder;
V(ArgumentsLength) \
V(ArgumentsObject) \
V(Bitwise) \
- V(BitNot) \
V(BlockEntry) \
V(BoundsCheck) \
V(BoundsCheckBaseIndexInformation) \
@@ -87,17 +85,18 @@ class LChunkBuilder;
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
+ V(CapturedObject) \
V(Change) \
- V(CheckFunction) \
V(CheckHeapObject) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
- V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampToUint8) \
V(ClassOfTestAndBranch) \
V(CompareNumericAndBranch) \
+ V(CompareHoleAndBranch) \
V(CompareGeneric) \
V(CompareObjectEqAndBranch) \
V(CompareMap) \
@@ -122,7 +121,6 @@ class LChunkBuilder;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
- V(InductionVariableAnnotation) \
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
@@ -144,14 +142,12 @@ class LChunkBuilder;
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(Mod) \
V(Mul) \
- V(NumericConstraint) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -167,6 +163,7 @@ class LChunkBuilder;
V(Shr) \
V(Simulate) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
@@ -213,24 +210,27 @@ class LChunkBuilder;
V(ExternalMemory)
-#define DECLARE_ABSTRACT_INSTRUCTION(type) \
- virtual bool Is##type() const { return true; } \
- static H##type* cast(HValue* value) { \
- ASSERT(value->Is##type()); \
- return reinterpret_cast<H##type*>(value); \
+#define DECLARE_ABSTRACT_INSTRUCTION(type) \
+ virtual bool Is##type() const V8_FINAL V8_OVERRIDE { return true; } \
+ static H##type* cast(HValue* value) { \
+ ASSERT(value->Is##type()); \
+ return reinterpret_cast<H##type*>(value); \
}
-#define DECLARE_CONCRETE_INSTRUCTION(type) \
- virtual LInstruction* CompileToLithium(LChunkBuilder* builder); \
- static H##type* cast(HValue* value) { \
- ASSERT(value->Is##type()); \
- return reinterpret_cast<H##type*>(value); \
- } \
- virtual Opcode opcode() const { return HValue::k##type; }
+#define DECLARE_CONCRETE_INSTRUCTION(type) \
+ virtual LInstruction* CompileToLithium( \
+ LChunkBuilder* builder) V8_FINAL V8_OVERRIDE; \
+ static H##type* cast(HValue* value) { \
+ ASSERT(value->Is##type()); \
+ return reinterpret_cast<H##type*>(value); \
+ } \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return HValue::k##type; \
+ }
-class Range: public ZoneObject {
+class Range V8_FINAL : public ZoneObject {
public:
Range()
: lower_(kMinInt),
@@ -305,16 +305,12 @@ class Range: public ZoneObject {
};
-class UniqueValueId {
+class UniqueValueId V8_FINAL {
public:
UniqueValueId() : raw_address_(NULL) { }
- explicit UniqueValueId(Object* object) {
- raw_address_ = reinterpret_cast<Address>(object);
- ASSERT(IsInitialized());
- }
-
explicit UniqueValueId(Handle<Object> handle) {
+ ASSERT(!AllowHeapAllocation::IsAllowed());
static const Address kEmptyHandleSentinel = reinterpret_cast<Address>(1);
if (handle.is_null()) {
raw_address_ = kEmptyHandleSentinel;
@@ -342,12 +338,32 @@ class UniqueValueId {
return reinterpret_cast<intptr_t>(raw_address_);
}
+#define IMMOVABLE_UNIQUE_VALUE_ID(name) \
+ static UniqueValueId name(Heap* heap) { return UniqueValueId(heap->name()); }
+
+ IMMOVABLE_UNIQUE_VALUE_ID(free_space_map)
+ IMMOVABLE_UNIQUE_VALUE_ID(minus_zero_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(nan_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(undefined_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(null_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(true_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(false_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(the_hole_value)
+ IMMOVABLE_UNIQUE_VALUE_ID(empty_string)
+
+#undef IMMOVABLE_UNIQUE_VALUE_ID
+
private:
Address raw_address_;
+
+ explicit UniqueValueId(Object* object) {
+ raw_address_ = reinterpret_cast<Address>(object);
+ ASSERT(IsInitialized());
+ }
};
-class HType {
+class HType V8_FINAL {
public:
static HType None() { return HType(kNone); }
static HType Tagged() { return HType(kTagged); }
@@ -374,10 +390,6 @@ class HType {
return Combine(other).Equals(other);
}
- bool IsTagged() const {
- return ((type_ & kTagged) == kTagged);
- }
-
bool IsTaggedPrimitive() const {
return ((type_ & kTaggedPrimitive) == kTaggedPrimitive);
}
@@ -498,7 +510,7 @@ class HUseListNode: public ZoneObject {
// We reuse use list nodes behind the scenes as uses are added and deleted.
// This class is the safe way to iterate uses while deleting them.
-class HUseIterator BASE_EMBEDDED {
+class HUseIterator V8_FINAL BASE_EMBEDDED {
public:
bool Done() { return current_ == NULL; }
void Advance();
@@ -542,159 +554,7 @@ enum GVNFlag {
};
-class NumericRelation {
- public:
- enum Kind { NONE, EQ, GT, GE, LT, LE, NE };
- static const char* MnemonicFromKind(Kind kind) {
- switch (kind) {
- case NONE: return "NONE";
- case EQ: return "EQ";
- case GT: return "GT";
- case GE: return "GE";
- case LT: return "LT";
- case LE: return "LE";
- case NE: return "NE";
- }
- UNREACHABLE();
- return NULL;
- }
- const char* Mnemonic() const { return MnemonicFromKind(kind_); }
-
- static NumericRelation None() { return NumericRelation(NONE); }
- static NumericRelation Eq() { return NumericRelation(EQ); }
- static NumericRelation Gt() { return NumericRelation(GT); }
- static NumericRelation Ge() { return NumericRelation(GE); }
- static NumericRelation Lt() { return NumericRelation(LT); }
- static NumericRelation Le() { return NumericRelation(LE); }
- static NumericRelation Ne() { return NumericRelation(NE); }
-
- bool IsNone() { return kind_ == NONE; }
-
- static NumericRelation FromToken(Token::Value token) {
- switch (token) {
- case Token::EQ: return Eq();
- case Token::EQ_STRICT: return Eq();
- case Token::LT: return Lt();
- case Token::GT: return Gt();
- case Token::LTE: return Le();
- case Token::GTE: return Ge();
- case Token::NE: return Ne();
- case Token::NE_STRICT: return Ne();
- default: return None();
- }
- }
-
- // The semantics of "Reversed" is that if "x rel y" is true then also
- // "y rel.Reversed() x" is true, and that rel.Reversed().Reversed() == rel.
- NumericRelation Reversed() {
- switch (kind_) {
- case NONE: return None();
- case EQ: return Eq();
- case GT: return Lt();
- case GE: return Le();
- case LT: return Gt();
- case LE: return Ge();
- case NE: return Ne();
- }
- UNREACHABLE();
- return None();
- }
-
- // The semantics of "Negated" is that if "x rel y" is true then also
- // "!(x rel.Negated() y)" is true.
- NumericRelation Negated() {
- switch (kind_) {
- case NONE: return None();
- case EQ: return Ne();
- case GT: return Le();
- case GE: return Lt();
- case LT: return Ge();
- case LE: return Gt();
- case NE: return Eq();
- }
- UNREACHABLE();
- return None();
- }
-
- // The semantics of "Implies" is that if "x rel y" is true
- // then also "x other_relation y" is true.
- bool Implies(NumericRelation other_relation) {
- switch (kind_) {
- case NONE: return false;
- case EQ: return (other_relation.kind_ == EQ)
- || (other_relation.kind_ == GE)
- || (other_relation.kind_ == LE);
- case GT: return (other_relation.kind_ == GT)
- || (other_relation.kind_ == GE)
- || (other_relation.kind_ == NE);
- case LT: return (other_relation.kind_ == LT)
- || (other_relation.kind_ == LE)
- || (other_relation.kind_ == NE);
- case GE: return (other_relation.kind_ == GE);
- case LE: return (other_relation.kind_ == LE);
- case NE: return (other_relation.kind_ == NE);
- }
- UNREACHABLE();
- return false;
- }
-
- // The semantics of "IsExtendable" is that if
- // "rel.IsExtendable(direction)" is true then
- // "x rel y" implies "(x + direction) rel y" .
- bool IsExtendable(int direction) {
- switch (kind_) {
- case NONE: return false;
- case EQ: return false;
- case GT: return (direction >= 0);
- case GE: return (direction >= 0);
- case LT: return (direction <= 0);
- case LE: return (direction <= 0);
- case NE: return false;
- }
- UNREACHABLE();
- return false;
- }
-
- // CompoundImplies returns true when
- // "((x + my_offset) >> my_scale) rel y" implies
- // "((x + other_offset) >> other_scale) other_relation y".
- bool CompoundImplies(NumericRelation other_relation,
- int my_offset,
- int my_scale,
- int other_offset = 0,
- int other_scale = 0) {
- return Implies(other_relation) && ComponentsImply(
- my_offset, my_scale, other_offset, other_scale);
- }
-
- private:
- // ComponentsImply returns true when
- // "((x + my_offset) >> my_scale) rel y" implies
- // "((x + other_offset) >> other_scale) rel y".
- bool ComponentsImply(int my_offset,
- int my_scale,
- int other_offset,
- int other_scale) {
- switch (kind_) {
- case NONE: break; // Fall through to UNREACHABLE().
- case EQ:
- case NE: return my_offset == other_offset && my_scale == other_scale;
- case GT:
- case GE: return my_offset <= other_offset && my_scale >= other_scale;
- case LT:
- case LE: return my_offset >= other_offset && my_scale <= other_scale;
- }
- UNREACHABLE();
- return false;
- }
-
- explicit NumericRelation(Kind kind) : kind_(kind) {}
-
- Kind kind_;
-};
-
-
-class DecompositionResult BASE_EMBEDDED {
+class DecompositionResult V8_FINAL BASE_EMBEDDED {
public:
DecompositionResult() : base_(NULL), offset_(0), scale_(0) {}
@@ -739,50 +599,10 @@ class DecompositionResult BASE_EMBEDDED {
};
-class RangeEvaluationContext BASE_EMBEDDED {
- public:
- RangeEvaluationContext(HValue* value, HValue* upper);
-
- HValue* lower_bound() { return lower_bound_; }
- HValue* lower_bound_guarantee() { return lower_bound_guarantee_; }
- HValue* candidate() { return candidate_; }
- HValue* upper_bound() { return upper_bound_; }
- HValue* upper_bound_guarantee() { return upper_bound_guarantee_; }
- int offset() { return offset_; }
- int scale() { return scale_; }
-
- bool is_range_satisfied() {
- return lower_bound_guarantee() != NULL && upper_bound_guarantee() != NULL;
- }
-
- void set_lower_bound_guarantee(HValue* guarantee) {
- lower_bound_guarantee_ = ConvertGuarantee(guarantee);
- }
- void set_upper_bound_guarantee(HValue* guarantee) {
- upper_bound_guarantee_ = ConvertGuarantee(guarantee);
- }
-
- void swap_candidate(DecompositionResult* other_candicate) {
- other_candicate->SwapValues(&candidate_, &offset_, &scale_);
- }
-
- private:
- HValue* ConvertGuarantee(HValue* guarantee);
-
- HValue* lower_bound_;
- HValue* lower_bound_guarantee_;
- HValue* candidate_;
- HValue* upper_bound_;
- HValue* upper_bound_guarantee_;
- int offset_;
- int scale_;
-};
-
-
typedef EnumSet<GVNFlag> GVNFlagSet;
-class HValue: public ZoneObject {
+class HValue : public ZoneObject {
public:
static const int kNoNumber = -1;
@@ -816,12 +636,6 @@ class HValue: public ZoneObject {
// HGraph::ComputeSafeUint32Operations is responsible for setting this
// flag.
kUint32,
- // If a phi is involved in the evaluation of a numeric constraint the
- // recursion can cause an endless cycle: we use this flag to exit the loop.
- kNumericConstraintEvaluationInProgress,
- // This flag is set to true after the SetupInformativeDefinitions() pass
- // has processed this instruction.
- kIDefsProcessingDone,
kHasNoObservableSideEffects,
// Indicates the instruction is live during dead code elimination.
kIsLive,
@@ -959,8 +773,8 @@ class HValue: public ZoneObject {
return RedefinedOperandIndex() != kNoRedefinedOperand;
}
HValue* RedefinedOperand() {
- return IsInformativeDefinition() ? OperandAt(RedefinedOperandIndex())
- : NULL;
+ int index = RedefinedOperandIndex();
+ return index == kNoRedefinedOperand ? NULL : OperandAt(index);
}
// A purely informative definition is an idef that will not emit code and
@@ -971,17 +785,8 @@ class HValue: public ZoneObject {
// This method must always return the original HValue SSA definition
// (regardless of any iDef of this value).
HValue* ActualValue() {
- return IsInformativeDefinition() ? RedefinedOperand()->ActualValue()
- : this;
- }
-
- virtual void AddInformativeDefinitions() {}
-
- void UpdateRedefinedUsesWhileSettingUpInformativeDefinitions() {
- UpdateRedefinedUsesInner<TestDominanceUsingProcessedFlag>();
- }
- void UpdateRedefinedUses() {
- UpdateRedefinedUsesInner<Dominates>();
+ int index = RedefinedOperandIndex();
+ return index == kNoRedefinedOperand ? this : OperandAt(index);
}
bool IsInteger32Constant();
@@ -1012,10 +817,12 @@ class HValue: public ZoneObject {
bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
// Returns true if the flag specified is set for all uses, false otherwise.
- bool CheckUsesForFlag(Flag f);
+ bool CheckUsesForFlag(Flag f) const;
+ // Same as before and the first one without the flag is returned in value.
+ bool CheckUsesForFlag(Flag f, HValue** value) const;
// Returns true if the flag specified is set for all uses, and this set
// of uses is non-empty.
- bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f);
+ bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const;
GVNFlagSet gvn_flags() const { return gvn_flags_; }
void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
@@ -1072,6 +879,7 @@ class HValue: public ZoneObject {
// Escape analysis helpers.
virtual bool HasEscapingOperandAt(int index) { return true; }
+ virtual bool HasOutOfBoundsAccess(int size) { return false; }
// Representation helpers.
virtual Representation observed_input_representation(int index) {
@@ -1089,7 +897,7 @@ class HValue: public ZoneObject {
bool Equals(HValue* other);
virtual intptr_t Hashcode();
- // Compute unique ids upfront that is safe wrt GC and parallel recompilation.
+ // Compute unique ids upfront that is safe wrt GC and concurrent compilation.
virtual void FinalizeUniqueValueId() { }
// Printing support.
@@ -1132,12 +940,6 @@ class HValue: public ZoneObject {
virtual void Verify() = 0;
#endif
- bool IsRelationTrue(NumericRelation relation,
- HValue* other,
- int offset = 0,
- int scale = 0);
-
- bool TryGuaranteeRange(HValue* upper_bound);
virtual bool TryDecompose(DecompositionResult* decomposition) {
if (RedefinedOperand() != NULL) {
return RedefinedOperand()->TryDecompose(decomposition);
@@ -1158,18 +960,12 @@ class HValue: public ZoneObject {
return type().ToStringOrToNumberCanBeObserved(representation());
}
- protected:
- void TryGuaranteeRangeRecursive(RangeEvaluationContext* context);
-
- enum RangeGuaranteeDirection {
- DIRECTION_NONE = 0,
- DIRECTION_UPPER = 1,
- DIRECTION_LOWER = 2,
- DIRECTION_BOTH = DIRECTION_UPPER | DIRECTION_LOWER
- };
- virtual void SetResponsibilityForRange(RangeGuaranteeDirection direction) {}
- virtual void TryGuaranteeRangeChanging(RangeEvaluationContext* context) {}
+ MinusZeroMode GetMinusZeroMode() {
+ return CheckFlag(kBailoutOnMinusZero)
+ ? FAIL_ON_MINUS_ZERO : TREAT_MINUS_ZERO_AS_ZERO;
+ }
+ protected:
// This function must be overridden for instructions with flag kUseGVN, to
// compare the non-Operand parts of the instruction.
virtual bool DataEquals(HValue* other) {
@@ -1203,47 +999,6 @@ class HValue: public ZoneObject {
representation_ = r;
}
- // Signature of a function testing if a HValue properly dominates another.
- typedef bool (*DominanceTest)(HValue*, HValue*);
-
- // Simple implementation of DominanceTest implemented walking the chain
- // of Hinstructions (used in UpdateRedefinedUsesInner).
- static bool Dominates(HValue* dominator, HValue* dominated);
-
- // A fast implementation of DominanceTest that works only for the
- // "current" instruction in the SetupInformativeDefinitions() phase.
- // During that phase we use a flag to mark processed instructions, and by
- // checking the flag we can quickly test if an instruction comes before or
- // after the "current" one.
- static bool TestDominanceUsingProcessedFlag(HValue* dominator,
- HValue* dominated);
-
- // If we are redefining an operand, update all its dominated uses (the
- // function that checks if a use is dominated is the template argument).
- template<DominanceTest TestDominance>
- void UpdateRedefinedUsesInner() {
- HValue* input = RedefinedOperand();
- if (input != NULL) {
- for (HUseIterator uses = input->uses(); !uses.Done(); uses.Advance()) {
- HValue* use = uses.value();
- if (TestDominance(this, use)) {
- use->SetOperandAt(uses.index(), this);
- }
- }
- }
- }
-
- // Informative definitions can override this method to state any numeric
- // relation they provide on the redefined value.
- // Returns true if it is guaranteed that:
- // ((this + offset) >> scale) relation other
- virtual bool IsRelationTrueInternal(NumericRelation relation,
- HValue* other,
- int offset = 0,
- int scale = 0) {
- return false;
- }
-
static GVNFlagSet AllDependsOnFlagSet() {
GVNFlagSet result;
// Create changes mask.
@@ -1350,12 +1105,12 @@ class HValue: public ZoneObject {
}
-class HInstruction: public HValue {
+class HInstruction : public HValue {
public:
HInstruction* next() const { return next_; }
HInstruction* previous() const { return previous_; }
- virtual void PrintTo(StringStream* stream);
+ virtual void PrintTo(StringStream* stream) V8_OVERRIDE;
virtual void PrintDataTo(StringStream* stream);
bool IsLinked() const { return block() != NULL; }
@@ -1377,7 +1132,7 @@ class HInstruction: public HValue {
virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
#endif
virtual bool IsCall() { return false; }
@@ -1393,7 +1148,7 @@ class HInstruction: public HValue {
SetGVNFlag(kDependsOnOsrEntries);
}
- virtual void DeleteFromGraph() { Unlink(); }
+ virtual void DeleteFromGraph() V8_OVERRIDE { Unlink(); }
private:
void InitializeAsFirst(HBasicBlock* block) {
@@ -1414,26 +1169,30 @@ class HInstruction: public HValue {
template<int V>
class HTemplateInstruction : public HInstruction {
public:
- int OperandCount() { return V; }
- HValue* OperandAt(int i) const { return inputs_[i]; }
+ virtual int OperandCount() V8_FINAL V8_OVERRIDE { return V; }
+ virtual HValue* OperandAt(int i) const V8_FINAL V8_OVERRIDE {
+ return inputs_[i];
+ }
protected:
HTemplateInstruction(HType type = HType::Tagged()) : HInstruction(type) {}
- void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
+ virtual void InternalSetOperandAt(int i, HValue* value) V8_FINAL V8_OVERRIDE {
+ inputs_[i] = value;
+ }
private:
EmbeddedContainer<HValue*, V> inputs_;
};
-class HControlInstruction: public HInstruction {
+class HControlInstruction : public HInstruction {
public:
virtual HBasicBlock* SuccessorAt(int i) = 0;
virtual int SuccessorCount() = 0;
virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HBasicBlock* FirstSuccessor() {
return SuccessorCount() > 0 ? SuccessorAt(0) : NULL;
@@ -1446,7 +1205,7 @@ class HControlInstruction: public HInstruction {
};
-class HSuccessorIterator BASE_EMBEDDED {
+class HSuccessorIterator V8_FINAL BASE_EMBEDDED {
public:
explicit HSuccessorIterator(HControlInstruction* instr)
: instr_(instr), current_(0) { }
@@ -1462,18 +1221,22 @@ class HSuccessorIterator BASE_EMBEDDED {
template<int S, int V>
-class HTemplateControlInstruction: public HControlInstruction {
+class HTemplateControlInstruction : public HControlInstruction {
public:
- int SuccessorCount() { return S; }
- HBasicBlock* SuccessorAt(int i) { return successors_[i]; }
- void SetSuccessorAt(int i, HBasicBlock* block) { successors_[i] = block; }
+ int SuccessorCount() V8_OVERRIDE { return S; }
+ HBasicBlock* SuccessorAt(int i) V8_OVERRIDE { return successors_[i]; }
+ void SetSuccessorAt(int i, HBasicBlock* block) V8_OVERRIDE {
+ successors_[i] = block;
+ }
- int OperandCount() { return V; }
- HValue* OperandAt(int i) const { return inputs_[i]; }
+ int OperandCount() V8_OVERRIDE { return V; }
+ HValue* OperandAt(int i) const V8_OVERRIDE { return inputs_[i]; }
protected:
- void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
+ void InternalSetOperandAt(int i, HValue* value) V8_OVERRIDE {
+ inputs_[i] = value;
+ }
private:
EmbeddedContainer<HBasicBlock*, S> successors_;
@@ -1481,9 +1244,9 @@ class HTemplateControlInstruction: public HControlInstruction {
};
-class HBlockEntry: public HTemplateInstruction<0> {
+class HBlockEntry V8_FINAL : public HTemplateInstruction<0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -1491,7 +1254,7 @@ class HBlockEntry: public HTemplateInstruction<0> {
};
-class HDummyUse: public HTemplateInstruction<1> {
+class HDummyUse V8_FINAL : public HTemplateInstruction<1> {
public:
explicit HDummyUse(HValue* value)
: HTemplateInstruction<1>(HType::Smi()) {
@@ -1503,86 +1266,44 @@ class HDummyUse: public HTemplateInstruction<1> {
HValue* value() { return OperandAt(0); }
- virtual bool HasEscapingOperandAt(int index) { return false; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(DummyUse);
};
-class HNumericConstraint : public HTemplateInstruction<2> {
- public:
- static HNumericConstraint* AddToGraph(HValue* constrained_value,
- NumericRelation relation,
- HValue* related_value,
- HInstruction* insertion_point = NULL);
-
- HValue* constrained_value() { return OperandAt(0); }
- HValue* related_value() { return OperandAt(1); }
- NumericRelation relation() { return relation_; }
-
- virtual int RedefinedOperandIndex() { return 0; }
- virtual bool IsPurelyInformativeDefinition() { return true; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return representation();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual bool IsRelationTrueInternal(NumericRelation other_relation,
- HValue* other_related_value,
- int offset = 0,
- int scale = 0) {
- if (related_value() == other_related_value) {
- return relation().CompoundImplies(other_relation, offset, scale);
- } else {
- return false;
- }
- }
-
- DECLARE_CONCRETE_INSTRUCTION(NumericConstraint)
-
- private:
- HNumericConstraint(HValue* constrained_value,
- NumericRelation relation,
- HValue* related_value)
- : relation_(relation) {
- SetOperandAt(0, constrained_value);
- SetOperandAt(1, related_value);
- }
-
- NumericRelation relation_;
-};
-
-
-class HDeoptimize: public HTemplateInstruction<0> {
+class HDeoptimize V8_FINAL : public HTemplateInstruction<0> {
public:
- DECLARE_INSTRUCTION_FACTORY_P1(HDeoptimize, Deoptimizer::BailoutType);
+ DECLARE_INSTRUCTION_FACTORY_P2(HDeoptimize, const char*,
+ Deoptimizer::BailoutType);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
+ const char* reason() const { return reason_; }
Deoptimizer::BailoutType type() { return type_; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
private:
- explicit HDeoptimize(Deoptimizer::BailoutType type) : type_(type) {}
+ explicit HDeoptimize(const char* reason, Deoptimizer::BailoutType type)
+ : reason_(reason), type_(type) {}
+ const char* reason_;
Deoptimizer::BailoutType type_;
};
// Inserts an int3/stop break instruction for debugging purposes.
-class HDebugBreak: public HTemplateInstruction<0> {
+class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -1590,23 +1311,23 @@ class HDebugBreak: public HTemplateInstruction<0> {
};
-class HGoto: public HTemplateControlInstruction<1, 0> {
+class HGoto V8_FINAL : public HTemplateControlInstruction<1, 0> {
public:
explicit HGoto(HBasicBlock* target) {
SetSuccessorAt(0, target);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto)
};
-class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
+class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> {
public:
HUnaryControlInstruction(HValue* value,
HBasicBlock* true_target,
@@ -1616,13 +1337,13 @@ class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
SetSuccessorAt(1, false_target);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
};
-class HBranch: public HUnaryControlInstruction {
+class HBranch V8_FINAL : public HUnaryControlInstruction {
public:
HBranch(HValue* value,
ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
@@ -1633,10 +1354,10 @@ class HBranch: public HUnaryControlInstruction {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual Representation observed_input_representation(int index);
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE;
ToBooleanStub::Types expected_input_types() const {
return expected_input_types_;
@@ -1649,7 +1370,7 @@ class HBranch: public HUnaryControlInstruction {
};
-class HCompareMap: public HUnaryControlInstruction {
+class HCompareMap V8_FINAL : public HUnaryControlInstruction {
public:
HCompareMap(HValue* value,
Handle<Map> map,
@@ -1660,35 +1381,38 @@ class HCompareMap: public HUnaryControlInstruction {
ASSERT(!map.is_null());
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> map() const { return map_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(CompareMap)
+ protected:
+ virtual int RedefinedOperandIndex() { return 0; }
+
private:
Handle<Map> map_;
};
-class HContext: public HTemplateInstruction<0> {
+class HContext V8_FINAL : public HTemplateInstruction<0> {
public:
static HContext* New(Zone* zone) {
return new(zone) HContext();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(Context)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HContext() {
@@ -1696,11 +1420,11 @@ class HContext: public HTemplateInstruction<0> {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HReturn: public HTemplateControlInstruction<0, 3> {
+class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -1715,11 +1439,11 @@ class HReturn: public HTemplateControlInstruction<0, 3> {
return new(zone) HReturn(value, context, 0);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
HValue* context() { return OperandAt(1); }
@@ -1736,17 +1460,7 @@ class HReturn: public HTemplateControlInstruction<0, 3> {
};
-class HAbnormalExit: public HTemplateControlInstruction<0, 0> {
- public:
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AbnormalExit)
-};
-
-
-class HUnaryOperation: public HTemplateInstruction<1> {
+class HUnaryOperation : public HTemplateInstruction<1> {
public:
HUnaryOperation(HValue* value, HType type = HType::Tagged())
: HTemplateInstruction<1>(type) {
@@ -1758,11 +1472,11 @@ class HUnaryOperation: public HTemplateInstruction<1> {
}
HValue* value() const { return OperandAt(0); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class HThrow: public HTemplateInstruction<2> {
+class HThrow V8_FINAL : public HTemplateInstruction<2> {
public:
static HThrow* New(Zone* zone,
HValue* context,
@@ -1770,7 +1484,7 @@ class HThrow: public HTemplateInstruction<2> {
return new(zone) HThrow(context, value);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -1788,11 +1502,11 @@ class HThrow: public HTemplateInstruction<2> {
};
-class HUseConst: public HUnaryOperation {
+class HUseConst V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HUseConst, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -1803,19 +1517,20 @@ class HUseConst: public HUnaryOperation {
};
-class HForceRepresentation: public HTemplateInstruction<1> {
+class HForceRepresentation V8_FINAL : public HTemplateInstruction<1> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HForceRepresentation, HValue*, Representation);
HValue* value() { return OperandAt(0); }
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation(); // Same as the output representation.
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
@@ -1827,21 +1542,22 @@ class HForceRepresentation: public HTemplateInstruction<1> {
};
-class HChange: public HUnaryOperation {
+class HChange V8_FINAL : public HUnaryOperation {
public:
HChange(HValue* value,
Representation to,
bool is_truncating_to_smi,
- bool is_truncating_to_int32,
- bool allow_undefined_as_nan)
+ bool is_truncating_to_int32)
: HUnaryOperation(value) {
ASSERT(!value->representation().IsNone());
ASSERT(!to.IsNone());
ASSERT(!value->representation().Equals(to));
set_representation(to);
SetFlag(kUseGVN);
- if (allow_undefined_as_nan) SetFlag(kAllowUndefinedAsNaN);
- if (is_truncating_to_smi) SetFlag(kTruncatingToSmi);
+ if (is_truncating_to_smi) {
+ SetFlag(kTruncatingToSmi);
+ SetFlag(kTruncatingToInt32);
+ }
if (is_truncating_to_int32) SetFlag(kTruncatingToInt32);
if (value->representation().IsSmi() || value->type().IsSmi()) {
set_type(HType::Smi());
@@ -1851,50 +1567,52 @@ class HChange: public HUnaryOperation {
}
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- virtual HType CalculateInferredType();
- virtual HValue* Canonicalize();
+ bool can_convert_undefined_to_nan() {
+ return CheckUsesForFlag(kAllowUndefinedAsNaN);
+ }
+
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
+ virtual HType CalculateInferredType() V8_OVERRIDE;
+ virtual HValue* Canonicalize() V8_OVERRIDE;
Representation from() const { return value()->representation(); }
Representation to() const { return representation(); }
- bool allow_undefined_as_nan() const {
- return CheckFlag(kAllowUndefinedAsNaN);
- }
bool deoptimize_on_minus_zero() const {
return CheckFlag(kBailoutOnMinusZero);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return from();
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Change)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const {
+ virtual bool IsDeletable() const V8_OVERRIDE {
return !from().IsTagged() || value()->type().IsSmi();
}
};
-class HClampToUint8: public HUnaryOperation {
+class HClampToUint8 V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HClampToUint8, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HClampToUint8(HValue* value)
@@ -1904,7 +1622,7 @@ class HClampToUint8: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
@@ -1914,7 +1632,7 @@ enum RemovableSimulate {
};
-class HSimulate: public HInstruction {
+class HSimulate V8_FINAL : public HInstruction {
public:
HSimulate(BailoutId ast_id,
int pop_count,
@@ -1926,9 +1644,9 @@ class HSimulate: public HInstruction {
assigned_indexes_(2, zone),
zone_(zone),
removable_(removable) {}
- virtual ~HSimulate() {}
+ ~HSimulate() {}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool HasAstId() const { return !ast_id_.IsNone(); }
BailoutId ast_id() const { return ast_id_; }
@@ -1958,27 +1676,32 @@ class HSimulate: public HInstruction {
}
return -1;
}
- virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) const { return values_[index]; }
+ virtual int OperandCount() V8_OVERRIDE { return values_.length(); }
+ virtual HValue* OperandAt(int index) const V8_OVERRIDE {
+ return values_[index];
+ }
- virtual bool HasEscapingOperandAt(int index) { return false; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
void MergeWith(ZoneList<HSimulate*>* list);
bool is_candidate_for_removal() { return removable_ == REMOVABLE_SIMULATE; }
+ // Replay effects of this instruction on the given environment.
+ void ReplayEnvironment(HEnvironment* env);
+
DECLARE_CONCRETE_INSTRUCTION(Simulate)
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
Handle<JSFunction> closure() const { return closure_; }
#endif
protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
+ virtual void InternalSetOperandAt(int index, HValue* value) V8_OVERRIDE {
values_[index] = value;
}
@@ -2011,7 +1734,7 @@ class HSimulate: public HInstruction {
};
-class HEnvironmentMarker: public HTemplateInstruction<1> {
+class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> {
public:
enum Kind { BIND, LOOKUP };
@@ -2025,11 +1748,11 @@ class HEnvironmentMarker: public HTemplateInstruction<1> {
next_simulate_ = simulate;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
#ifdef DEBUG
void set_closure(Handle<JSFunction> closure) {
@@ -2053,7 +1776,7 @@ class HEnvironmentMarker: public HTemplateInstruction<1> {
};
-class HStackCheck: public HTemplateInstruction<1> {
+class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
public:
enum Type {
kFunctionEntry,
@@ -2064,7 +1787,7 @@ class HStackCheck: public HTemplateInstruction<1> {
HValue* context() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -2103,7 +1826,7 @@ enum InliningKind {
class HArgumentsObject;
-class HEnterInlined: public HTemplateInstruction<0> {
+class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
public:
static HEnterInlined* New(Zone* zone,
HValue* context,
@@ -2122,7 +1845,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
ZoneList<HBasicBlock*>* return_targets() { return &return_targets_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> closure() const { return closure_; }
int arguments_count() const { return arguments_count_; }
@@ -2132,7 +1855,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
InliningKind inlining_kind() const { return inlining_kind_; }
bool undefined_receiver() const { return undefined_receiver_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -2173,11 +1896,11 @@ class HEnterInlined: public HTemplateInstruction<0> {
};
-class HLeaveInlined: public HTemplateInstruction<0> {
+class HLeaveInlined V8_FINAL : public HTemplateInstruction<0> {
public:
HLeaveInlined() { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -2185,11 +1908,11 @@ class HLeaveInlined: public HTemplateInstruction<0> {
};
-class HPushArgument: public HUnaryOperation {
+class HPushArgument V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HPushArgument, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -2204,39 +1927,39 @@ class HPushArgument: public HUnaryOperation {
};
-class HThisFunction: public HTemplateInstruction<0> {
+class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
public:
HThisFunction() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HOuterContext: public HUnaryOperation {
+class HOuterContext V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HOuterContext, HValue*);
DECLARE_CONCRETE_INSTRUCTION(OuterContext);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HOuterContext(HValue* inner) : HUnaryOperation(inner) {
@@ -2244,11 +1967,11 @@ class HOuterContext: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HDeclareGlobals: public HUnaryOperation {
+class HDeclareGlobals V8_FINAL : public HUnaryOperation {
public:
HDeclareGlobals(HValue* context,
Handle<FixedArray> pairs,
@@ -2273,7 +1996,7 @@ class HDeclareGlobals: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -2283,7 +2006,7 @@ class HDeclareGlobals: public HUnaryOperation {
};
-class HGlobalObject: public HUnaryOperation {
+class HGlobalObject V8_FINAL : public HUnaryOperation {
public:
explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
set_representation(Representation::Tagged());
@@ -2296,30 +2019,30 @@ class HGlobalObject: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HGlobalReceiver: public HUnaryOperation {
+class HGlobalReceiver V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HGlobalReceiver, HValue*);
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HGlobalReceiver(HValue* global_object)
@@ -2328,12 +2051,12 @@ class HGlobalReceiver: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
template <int V>
-class HCall: public HTemplateInstruction<V> {
+class HCall : public HTemplateInstruction<V> {
public:
// The argument count includes the receiver.
explicit HCall<V>(int argument_count) : argument_count_(argument_count) {
@@ -2341,35 +2064,38 @@ class HCall: public HTemplateInstruction<V> {
this->SetAllSideEffects();
}
- virtual HType CalculateInferredType() { return HType::Tagged(); }
+ virtual HType CalculateInferredType() V8_FINAL V8_OVERRIDE {
+ return HType::Tagged();
+ }
virtual int argument_count() const { return argument_count_; }
- virtual bool IsCall() { return true; }
+ virtual bool IsCall() V8_FINAL V8_OVERRIDE { return true; }
private:
int argument_count_;
};
-class HUnaryCall: public HCall<1> {
+class HUnaryCall : public HCall<1> {
public:
HUnaryCall(HValue* value, int argument_count)
: HCall<1>(argument_count) {
SetOperandAt(0, value);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
};
-class HBinaryCall: public HCall<2> {
+class HBinaryCall : public HCall<2> {
public:
HBinaryCall(HValue* first, HValue* second, int argument_count)
: HCall<2>(argument_count) {
@@ -2377,9 +2103,10 @@ class HBinaryCall: public HCall<2> {
SetOperandAt(1, second);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
return Representation::Tagged();
}
@@ -2388,7 +2115,7 @@ class HBinaryCall: public HCall<2> {
};
-class HInvokeFunction: public HBinaryCall {
+class HInvokeFunction V8_FINAL : public HBinaryCall {
public:
HInvokeFunction(HValue* context, HValue* function, int argument_count)
: HBinaryCall(context, function, argument_count) {
@@ -2420,10 +2147,6 @@ class HInvokeFunction: public HBinaryCall {
known_function, argument_count);
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
HValue* context() { return first(); }
HValue* function() { return second(); }
Handle<JSFunction> known_function() { return known_function_; }
@@ -2437,7 +2160,7 @@ class HInvokeFunction: public HBinaryCall {
};
-class HCallConstantFunction: public HCall<0> {
+class HCallConstantFunction V8_FINAL : public HCall<0> {
public:
HCallConstantFunction(Handle<JSFunction> function, int argument_count)
: HCall<0>(argument_count),
@@ -2449,12 +2172,12 @@ class HCallConstantFunction: public HCall<0> {
bool IsApplyFunction() const {
return function_->code() ==
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply);
+ function_->GetIsolate()->builtins()->builtin(Builtins::kFunctionApply);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -2466,16 +2189,12 @@ class HCallConstantFunction: public HCall<0> {
};
-class HCallKeyed: public HBinaryCall {
+class HCallKeyed V8_FINAL : public HBinaryCall {
public:
HCallKeyed(HValue* context, HValue* key, int argument_count)
: HBinaryCall(context, key, argument_count) {
}
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
HValue* context() { return first(); }
HValue* key() { return second(); }
@@ -2483,29 +2202,25 @@ class HCallKeyed: public HBinaryCall {
};
-class HCallNamed: public HUnaryCall {
+class HCallNamed V8_FINAL : public HUnaryCall {
public:
HCallNamed(HValue* context, Handle<String> name, int argument_count)
: HUnaryCall(context, argument_count), name_(name) {
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* context() { return value(); }
Handle<String> name() const { return name_; }
DECLARE_CONCRETE_INSTRUCTION(CallNamed)
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
private:
Handle<String> name_;
};
-class HCallFunction: public HBinaryCall {
+class HCallFunction V8_FINAL : public HBinaryCall {
public:
HCallFunction(HValue* context, HValue* function, int argument_count)
: HBinaryCall(context, function, argument_count) {
@@ -2521,15 +2236,11 @@ class HCallFunction: public HBinaryCall {
HValue* context() { return first(); }
HValue* function() { return second(); }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
DECLARE_CONCRETE_INSTRUCTION(CallFunction)
};
-class HCallGlobal: public HUnaryCall {
+class HCallGlobal V8_FINAL : public HUnaryCall {
public:
HCallGlobal(HValue* context, Handle<String> name, int argument_count)
: HUnaryCall(context, argument_count), name_(name) {
@@ -2542,15 +2253,11 @@ class HCallGlobal: public HUnaryCall {
return new(zone) HCallGlobal(context, name, argument_count);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* context() { return value(); }
Handle<String> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
private:
@@ -2558,19 +2265,19 @@ class HCallGlobal: public HUnaryCall {
};
-class HCallKnownGlobal: public HCall<0> {
+class HCallKnownGlobal V8_FINAL : public HCall<0> {
public:
HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
: HCall<0>(argument_count),
target_(target),
formal_parameter_count_(target->shared()->formal_parameter_count()) { }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> target() const { return target_; }
int formal_parameter_count() const { return formal_parameter_count_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -2582,15 +2289,10 @@ class HCallKnownGlobal: public HCall<0> {
};
-class HCallNew: public HBinaryCall {
+class HCallNew V8_FINAL : public HBinaryCall {
public:
HCallNew(HValue* context, HValue* constructor, int argument_count)
- : HBinaryCall(context, constructor, argument_count) {
- }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
+ : HBinaryCall(context, constructor, argument_count) {}
HValue* context() { return first(); }
HValue* constructor() { return second(); }
@@ -2599,15 +2301,18 @@ class HCallNew: public HBinaryCall {
};
-class HCallNewArray: public HCallNew {
+class HCallNewArray V8_FINAL : public HBinaryCall {
public:
HCallNewArray(HValue* context, HValue* constructor, int argument_count,
Handle<Cell> type_cell, ElementsKind elements_kind)
- : HCallNew(context, constructor, argument_count),
+ : HBinaryCall(context, constructor, argument_count),
elements_kind_(elements_kind),
type_cell_(type_cell) {}
- virtual void PrintDataTo(StringStream* stream);
+ HValue* context() { return first(); }
+ HValue* constructor() { return second(); }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Cell> property_cell() const {
return type_cell_;
@@ -2623,7 +2328,7 @@ class HCallNewArray: public HCallNew {
};
-class HCallRuntime: public HCall<1> {
+class HCallRuntime V8_FINAL : public HCall<1> {
public:
static HCallRuntime* New(Zone* zone,
HValue* context,
@@ -2633,13 +2338,13 @@ class HCallRuntime: public HCall<1> {
return new(zone) HCallRuntime(context, name, c_function, argument_count);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* context() { return OperandAt(0); }
const Runtime::Function* function() const { return c_function_; }
Handle<String> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -2659,18 +2364,18 @@ class HCallRuntime: public HCall<1> {
};
-class HMapEnumLength: public HUnaryOperation {
+class HMapEnumLength V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HMapEnumLength, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HMapEnumLength(HValue* value)
@@ -2680,11 +2385,11 @@ class HMapEnumLength: public HUnaryOperation {
SetGVNFlag(kDependsOnMaps);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HElementsKind: public HUnaryOperation {
+class HElementsKind V8_FINAL : public HUnaryOperation {
public:
explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Integer32());
@@ -2692,52 +2397,21 @@ class HElementsKind: public HUnaryOperation {
SetGVNFlag(kDependsOnElementsKind);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ElementsKind)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HBitNot: public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HBitNot, HValue*);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Integer32();
- }
- virtual Representation observed_input_representation(int index) {
- return Representation::Integer32();
- }
-
- virtual HValue* Canonicalize();
-
- DECLARE_CONCRETE_INSTRUCTION(BitNot)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- explicit HBitNot(HValue* value)
- : HUnaryOperation(value, HType::TaggedNumber()) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- SetFlag(kTruncatingToInt32);
- SetFlag(kAllowUndefinedAsNaN);
- }
-
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HUnaryMathOperation: public HTemplateInstruction<2> {
+class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -2747,11 +2421,12 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0) {
return Representation::Tagged();
} else {
@@ -2775,25 +2450,10 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
}
}
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) {
- if (flexible_int() && !new_rep.IsSmi()) {
- new_rep = Representation::Integer32();
- }
- HValue::UpdateRepresentation(new_rep, h_infer, reason);
- }
-
- virtual void RepresentationChanged(Representation new_rep) {
- if (flexible_int() && new_rep.IsInteger32()) {
- ClearFlag(kFlexibleRepresentation);
- }
- }
-
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
- virtual HValue* Canonicalize();
- virtual Representation RepresentationFromInputs();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE;
BuiltinFunctionId op() const { return op_; }
const char* OpName() const;
@@ -2801,16 +2461,12 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
return op_ == b->op();
}
private:
- bool flexible_int() {
- return op_ == kMathFloor || op_ == kMathRound;
- }
-
HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op)
: HTemplateInstruction<2>(HType::TaggedNumber()), op_(op) {
SetOperandAt(0, context);
@@ -2818,8 +2474,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
switch (op) {
case kMathFloor:
case kMathRound:
- set_representation(Representation::Smi());
- SetFlag(kFlexibleRepresentation);
+ set_representation(Representation::Integer32());
break;
case kMathAbs:
// Not setting representation here: it is None intentionally.
@@ -2848,28 +2503,28 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
BuiltinFunctionId op_;
};
-class HLoadExternalArrayPointer: public HUnaryOperation {
+class HLoadExternalArrayPointer V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HLoadExternalArrayPointer, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::None();
}
DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HLoadExternalArrayPointer(HValue* value)
@@ -2882,11 +2537,11 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HCheckMaps: public HTemplateInstruction<2> {
+class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
public:
static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
Handle<Map> map, CompilationInfo* info,
@@ -2896,7 +2551,7 @@ class HCheckMaps: public HTemplateInstruction<2> {
HValue *typecheck = NULL) {
HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck);
for (int i = 0; i < maps->length(); i++) {
- check_map->map_set_.Add(maps->at(i), zone);
+ check_map->Add(maps->at(i), zone);
}
check_map->map_set_.Sort();
return check_map;
@@ -2904,23 +2559,28 @@ class HCheckMaps: public HTemplateInstruction<2> {
bool CanOmitMapChecks() { return omit_; }
- virtual bool HasEscapingOperandAt(int index) { return false; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
virtual void HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator);
- virtual void PrintDataTo(StringStream* stream);
+ HValue* dominator) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
SmallMapList* map_set() { return &map_set_; }
+ ZoneList<UniqueValueId>* map_unique_ids() { return &map_unique_ids_; }
+
+ bool has_migration_target() {
+ return has_migration_target_;
+ }
- virtual void FinalizeUniqueValueId();
+ virtual void FinalizeUniqueValueId() V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
ASSERT_EQ(map_set_.length(), map_unique_ids_.length());
HCheckMaps* b = HCheckMaps::cast(other);
// Relies on the fact that map_set has been sorted before.
@@ -2935,11 +2595,21 @@ class HCheckMaps: public HTemplateInstruction<2> {
return true;
}
+ virtual int RedefinedOperandIndex() { return 0; }
+
private:
+ void Add(Handle<Map> map, Zone* zone) {
+ map_set_.Add(map, zone);
+ if (!has_migration_target_ && map->is_migration_target()) {
+ has_migration_target_ = true;
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+ }
+
// Clients should use one of the static New* methods above.
HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
: HTemplateInstruction<2>(value->type()),
- omit_(false), map_unique_ids_(0, zone) {
+ omit_(false), has_migration_target_(false), map_unique_ids_(0, zone) {
SetOperandAt(0, value);
// Use the object value for the dependency if NULL is passed.
// TODO(titzer): do GVN flags already express this dependency?
@@ -2955,63 +2625,75 @@ class HCheckMaps: public HTemplateInstruction<2> {
omit_ = true;
for (int i = 0; i < map_set_.length(); i++) {
Handle<Map> map = map_set_.at(i);
+ if (!map->CanTransition()) continue;
map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup,
info);
}
}
bool omit_;
+ bool has_migration_target_;
SmallMapList map_set_;
ZoneList<UniqueValueId> map_unique_ids_;
};
-class HCheckFunction: public HUnaryOperation {
+class HCheckValue V8_FINAL : public HUnaryOperation {
public:
- DECLARE_INSTRUCTION_FACTORY_P2(HCheckFunction, HValue*, Handle<JSFunction>);
+ static HCheckValue* New(Zone* zone, HValue* context,
+ HValue* value, Handle<JSFunction> target) {
+ bool in_new_space = zone->isolate()->heap()->InNewSpace(*target);
+ HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space);
+ return check;
+ }
+ static HCheckValue* New(Zone* zone, HValue* context,
+ HValue* value, Handle<Map> map, UniqueValueId id) {
+ HCheckValue* check = new(zone) HCheckValue(value, map, false);
+ check->object_unique_id_ = id;
+ return check;
+ }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
#endif
- virtual void FinalizeUniqueValueId() {
- target_unique_id_ = UniqueValueId(target_);
+ virtual void FinalizeUniqueValueId() V8_OVERRIDE {
+ object_unique_id_ = UniqueValueId(object_);
}
- Handle<JSFunction> target() const { return target_; }
- bool target_in_new_space() const { return target_in_new_space_; }
+ Handle<HeapObject> object() const { return object_; }
+ bool object_in_new_space() const { return object_in_new_space_; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue)
protected:
- virtual bool DataEquals(HValue* other) {
- HCheckFunction* b = HCheckFunction::cast(other);
- return target_unique_id_ == b->target_unique_id_;
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HCheckValue* b = HCheckValue::cast(other);
+ return object_unique_id_ == b->object_unique_id_;
}
private:
- HCheckFunction(HValue* value, Handle<JSFunction> function)
+ HCheckValue(HValue* value, Handle<HeapObject> object, bool in_new_space)
: HUnaryOperation(value, value->type()),
- target_(function), target_unique_id_() {
+ object_(object), object_in_new_space_(in_new_space) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- target_in_new_space_ = Isolate::Current()->heap()->InNewSpace(*function);
}
- Handle<JSFunction> target_;
- UniqueValueId target_unique_id_;
- bool target_in_new_space_;
+ Handle<HeapObject> object_;
+ UniqueValueId object_unique_id_;
+ bool object_in_new_space_;
};
-class HCheckInstanceType: public HUnaryOperation {
+class HCheckInstanceType V8_FINAL : public HUnaryOperation {
public:
static HCheckInstanceType* NewIsSpecObject(HValue* value, Zone* zone) {
return new(zone) HCheckInstanceType(value, IS_SPEC_OBJECT);
@@ -3027,13 +2709,13 @@ class HCheckInstanceType: public HUnaryOperation {
return new(zone) HCheckInstanceType(value, IS_INTERNALIZED_STRING);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
void GetCheckInterval(InstanceType* first, InstanceType* last);
@@ -3045,11 +2727,13 @@ class HCheckInstanceType: public HUnaryOperation {
// TODO(ager): It could be nice to allow the ommision of instance
// type checks if we have already performed an instance type check
// with a larger range.
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HCheckInstanceType* b = HCheckInstanceType::cast(other);
return check_ == b->check_;
}
+ virtual int RedefinedOperandIndex() { return 0; }
+
private:
enum Check {
IS_SPEC_OBJECT,
@@ -3071,15 +2755,15 @@ class HCheckInstanceType: public HUnaryOperation {
};
-class HCheckSmi: public HUnaryOperation {
+class HCheckSmi V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCheckSmi, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual HValue* Canonicalize() {
+ virtual HValue* Canonicalize() V8_OVERRIDE {
HType value_type = value()->type();
if (value_type.IsSmi()) {
return NULL;
@@ -3090,7 +2774,7 @@ class HCheckSmi: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HCheckSmi(HValue* value) : HUnaryOperation(value, HType::Smi()) {
@@ -3100,14 +2784,14 @@ class HCheckSmi: public HUnaryOperation {
};
-class HIsNumberAndBranch: public HUnaryControlInstruction {
+class HIsNumberAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
explicit HIsNumberAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) {
SetFlag(kFlexibleRepresentation);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -3115,26 +2799,27 @@ class HIsNumberAndBranch: public HUnaryControlInstruction {
};
-class HCheckHeapObject: public HUnaryOperation {
+class HCheckHeapObject V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
#endif
- virtual HValue* Canonicalize() {
+ virtual HValue* Canonicalize() V8_OVERRIDE {
return value()->type().IsHeapObject() ? NULL : this;
}
DECLARE_CONCRETE_INSTRUCTION(CheckHeapObject)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HCheckHeapObject(HValue* value)
@@ -3145,87 +2830,6 @@ class HCheckHeapObject: public HUnaryOperation {
};
-class HCheckPrototypeMaps: public HTemplateInstruction<0> {
- public:
- static HCheckPrototypeMaps* New(Zone* zone,
- HValue* context,
- Handle<JSObject> prototype,
- Handle<JSObject> holder,
- CompilationInfo* info) {
- return new(zone) HCheckPrototypeMaps(prototype, holder, zone, info);
- }
-
- ZoneList<Handle<JSObject> >* prototypes() { return &prototypes_; }
-
- ZoneList<Handle<Map> >* maps() { return &maps_; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual intptr_t Hashcode() {
- return first_prototype_unique_id_.Hashcode() * 17 +
- last_prototype_unique_id_.Hashcode();
- }
-
- virtual void FinalizeUniqueValueId() {
- first_prototype_unique_id_ = UniqueValueId(prototypes_.first());
- last_prototype_unique_id_ = UniqueValueId(prototypes_.last());
- }
-
- bool CanOmitPrototypeChecks() { return can_omit_prototype_maps_; }
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
- return first_prototype_unique_id_ == b->first_prototype_unique_id_ &&
- last_prototype_unique_id_ == b->last_prototype_unique_id_;
- }
-
- private:
- HCheckPrototypeMaps(Handle<JSObject> prototype,
- Handle<JSObject> holder,
- Zone* zone,
- CompilationInfo* info)
- : prototypes_(2, zone),
- maps_(2, zone),
- first_prototype_unique_id_(),
- last_prototype_unique_id_(),
- can_omit_prototype_maps_(true) {
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- // Keep a list of all objects on the prototype chain up to the holder
- // and the expected maps.
- while (true) {
- prototypes_.Add(prototype, zone);
- Handle<Map> map(prototype->map());
- maps_.Add(map, zone);
- can_omit_prototype_maps_ &= map->CanOmitPrototypeChecks();
- if (prototype.is_identical_to(holder)) break;
- prototype = Handle<JSObject>(JSObject::cast(prototype->GetPrototype()));
- }
- if (can_omit_prototype_maps_) {
- // Mark in-flight compilation as dependent on those maps.
- for (int i = 0; i < maps()->length(); i++) {
- Handle<Map> map = maps()->at(i);
- map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup,
- info);
- }
- }
- }
-
- ZoneList<Handle<JSObject> > prototypes_;
- ZoneList<Handle<Map> > maps_;
- UniqueValueId first_prototype_unique_id_;
- UniqueValueId last_prototype_unique_id_;
- bool can_omit_prototype_maps_;
-};
-
-
class InductionVariableData;
@@ -3247,7 +2851,7 @@ class HConstant;
class HBitwise;
-class InductionVariableData : public ZoneObject {
+class InductionVariableData V8_FINAL : public ZoneObject {
public:
class InductionVariableCheck : public ZoneObject {
public:
@@ -3447,7 +3051,7 @@ class InductionVariableData : public ZoneObject {
};
-class HPhi: public HValue {
+class HPhi V8_FINAL : public HValue {
public:
HPhi(int merged_index, Zone* zone)
: inputs_(2, zone),
@@ -3463,19 +3067,22 @@ class HPhi: public HValue {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual Representation RepresentationFromInputs();
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE;
- virtual Range* InferRange(Zone* zone);
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation();
}
- virtual Representation KnownOptimalRepresentation() {
+ virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
return representation();
}
- virtual HType CalculateInferredType();
- virtual int OperandCount() { return inputs_.length(); }
- virtual HValue* OperandAt(int index) const { return inputs_[index]; }
+ virtual HType CalculateInferredType() V8_OVERRIDE;
+ virtual int OperandCount() V8_OVERRIDE { return inputs_.length(); }
+ virtual HValue* OperandAt(int index) const V8_OVERRIDE {
+ return inputs_[index];
+ }
HValue* GetRedundantReplacement();
void AddInput(HValue* value);
bool HasRealUses();
@@ -3500,12 +3107,10 @@ class HPhi: public HValue {
induction_variable_data_ = InductionVariableData::ExaminePhi(this);
}
- virtual void AddInformativeDefinitions();
-
- virtual void PrintTo(StringStream* stream);
+ virtual void PrintTo(StringStream* stream) V8_OVERRIDE;
#ifdef DEBUG
- virtual void Verify();
+ virtual void Verify() V8_OVERRIDE;
#endif
void InitRealUses(int id);
@@ -3542,7 +3147,7 @@ class HPhi: public HValue {
ASSERT(value->IsPhi());
return reinterpret_cast<HPhi*>(value);
}
- virtual Opcode opcode() const { return HValue::kPhi; }
+ virtual Opcode opcode() const V8_OVERRIDE { return HValue::kPhi; }
void SimplifyConstantInputs();
@@ -3550,16 +3155,11 @@ class HPhi: public HValue {
static const int kInvalidMergedIndex = -1;
protected:
- virtual void DeleteFromGraph();
- virtual void InternalSetOperandAt(int index, HValue* value) {
+ virtual void DeleteFromGraph() V8_OVERRIDE;
+ virtual void InternalSetOperandAt(int index, HValue* value) V8_OVERRIDE {
inputs_[index] = value;
}
- virtual bool IsRelationTrueInternal(NumericRelation relation,
- HValue* other,
- int offset = 0,
- int scale = 0);
-
private:
ZoneList<HValue*> inputs_;
int merged_index_;
@@ -3570,65 +3170,50 @@ class HPhi: public HValue {
InductionVariableData* induction_variable_data_;
// TODO(titzer): we can't eliminate the receiver for generating backtraces
- virtual bool IsDeletable() const { return !IsReceiver(); }
+ virtual bool IsDeletable() const V8_OVERRIDE { return !IsReceiver(); }
};
-class HInductionVariableAnnotation : public HUnaryOperation {
+// Common base class for HArgumentsObject and HCapturedObject.
+class HDematerializedObject : public HInstruction {
public:
- static HInductionVariableAnnotation* AddToGraph(HPhi* phi,
- NumericRelation relation,
- int operand_index);
-
- NumericRelation relation() { return relation_; }
- HValue* induction_base() { return phi_->OperandAt(operand_index_); }
+ HDematerializedObject(int count, Zone* zone) : values_(count, zone) {}
- virtual int RedefinedOperandIndex() { return 0; }
- virtual bool IsPurelyInformativeDefinition() { return true; }
- virtual Representation RequiredInputRepresentation(int index) {
- return representation();
+ virtual int OperandCount() V8_FINAL V8_OVERRIDE { return values_.length(); }
+ virtual HValue* OperandAt(int index) const V8_FINAL V8_OVERRIDE {
+ return values_[index];
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool HasEscapingOperandAt(int index) V8_FINAL V8_OVERRIDE {
+ return false;
+ }
+ virtual Representation RequiredInputRepresentation(
+ int index) V8_FINAL V8_OVERRIDE {
+ return Representation::None();
+ }
- virtual bool IsRelationTrueInternal(NumericRelation other_relation,
- HValue* other_related_value,
- int offset = 0,
- int scale = 0) {
- if (induction_base() == other_related_value) {
- return relation().CompoundImplies(other_relation, offset, scale);
- } else {
- return false;
- }
+ protected:
+ virtual void InternalSetOperandAt(int index,
+ HValue* value) V8_FINAL V8_OVERRIDE {
+ values_[index] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(InductionVariableAnnotation)
+ // List of values tracked by this marker.
+ ZoneList<HValue*> values_;
private:
- HInductionVariableAnnotation(HPhi* phi,
- NumericRelation relation,
- int operand_index)
- : HUnaryOperation(phi),
- phi_(phi), relation_(relation), operand_index_(operand_index) {
- }
-
- // We need to store the phi both here and in the instruction operand because
- // the operand can change if a new idef of the phi is added between the phi
- // and this instruction (inserting an idef updates every use).
- HPhi* phi_;
- NumericRelation relation_;
- int operand_index_;
+ virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; }
};
-class HArgumentsObject: public HTemplateInstruction<0> {
+class HArgumentsObject V8_FINAL : public HDematerializedObject {
public:
- static HArgumentsObject* New(Zone* zone,
- HValue* context,
- int count) {
+ static HArgumentsObject* New(Zone* zone, HValue* context, int count) {
return new(zone) HArgumentsObject(count, zone);
}
+ // The values contain a list of all elements in the arguments object
+ // including the receiver object, which is skipped when materializing.
const ZoneList<HValue*>* arguments_values() const { return &values_; }
int arguments_count() const { return values_.length(); }
@@ -3637,44 +3222,85 @@ class HArgumentsObject: public HTemplateInstruction<0> {
SetOperandAt(values_.length() - 1, argument);
}
- virtual int OperandCount() { return values_.length(); }
- virtual HValue* OperandAt(int index) const { return values_[index]; }
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
- virtual bool HasEscapingOperandAt(int index) { return false; }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
+ private:
+ HArgumentsObject(int count, Zone* zone)
+ : HDematerializedObject(count, zone) {
+ set_representation(Representation::Tagged());
+ SetFlag(kIsArguments);
}
+};
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- values_[index] = value;
+class HCapturedObject V8_FINAL : public HDematerializedObject {
+ public:
+ HCapturedObject(int length, int id, Zone* zone)
+ : HDematerializedObject(length, zone), capture_id_(id) {
+ set_representation(Representation::Tagged());
+ values_.AddBlock(NULL, length, zone); // Resize list.
}
- private:
- HArgumentsObject(int count, Zone* zone) : values_(count, zone) {
- set_representation(Representation::Tagged());
- SetFlag(kIsArguments);
+ // The values contain a list of all in-object properties inside the
+ // captured object and is index by field index. Properties in the
+ // properties or elements backing store are not tracked here.
+ const ZoneList<HValue*>* values() const { return &values_; }
+ int length() const { return values_.length(); }
+ int capture_id() const { return capture_id_; }
+
+ // Shortcut for the map value of this captured object.
+ HValue* map_value() const { return values()->first(); }
+
+ void ReuseSideEffectsFromStore(HInstruction* store) {
+ ASSERT(store->HasObservableSideEffects());
+ ASSERT(store->IsStoreNamedField());
+ gvn_flags_.Add(store->gvn_flags());
}
- virtual bool IsDeletable() const { return true; }
+ // Replay effects of this instruction on the given environment.
+ void ReplayEnvironment(HEnvironment* env);
- ZoneList<HValue*> values_;
+ DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
+
+ private:
+ int capture_id_;
};
-class HConstant: public HTemplateInstruction<0> {
+class HConstant V8_FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, int32_t);
DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, double);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Handle<Object>);
+ DECLARE_INSTRUCTION_FACTORY_P2(HConstant, Handle<Map>, UniqueValueId);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, ExternalReference);
- Handle<Object> handle() {
+ static HConstant* CreateAndInsertAfter(Zone* zone,
+ HValue* context,
+ int32_t value,
+ Representation representation,
+ HInstruction* instruction) {
+ HConstant* new_constant =
+ HConstant::New(zone, context, value, representation);
+ new_constant->InsertAfter(instruction);
+ return new_constant;
+ }
+
+ static HConstant* CreateAndInsertBefore(Zone* zone,
+ HValue* context,
+ int32_t value,
+ Representation representation,
+ HInstruction* instruction) {
+ HConstant* new_constant =
+ HConstant::New(zone, context, value, representation);
+ new_constant->InsertBefore(instruction);
+ return new_constant;
+ }
+
+ Handle<Object> handle(Isolate* isolate) {
if (handle_.is_null()) {
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = isolate->factory();
// Default arguments to is_not_in_new_space depend on this heap number
// to be tenured so that it's guaranteed not be be located in new space.
handle_ = factory->NewNumber(double_value_, TENURED);
@@ -3684,9 +3310,10 @@ class HConstant: public HTemplateInstruction<0> {
return handle_;
}
- bool InstanceOf(Handle<Map> map) {
- return handle_->IsJSObject() &&
- Handle<JSObject>::cast(handle_)->map() == *map;
+ bool HasMap(Handle<Map> map) {
+ Handle<Object> constant_object = handle(map->GetIsolate());
+ return constant_object->IsHeapObject() &&
+ Handle<HeapObject>::cast(constant_object)->map() == *map;
}
bool IsSpecialDouble() const {
@@ -3710,38 +3337,40 @@ class HConstant: public HTemplateInstruction<0> {
}
return false;
}
+ if (has_external_reference_value_) {
+ return false;
+ }
ASSERT(!handle_.is_null());
Heap* heap = isolate()->heap();
- ASSERT(unique_id_ != UniqueValueId(heap->minus_zero_value()));
- ASSERT(unique_id_ != UniqueValueId(heap->nan_value()));
- return unique_id_ == UniqueValueId(heap->undefined_value()) ||
- unique_id_ == UniqueValueId(heap->null_value()) ||
- unique_id_ == UniqueValueId(heap->true_value()) ||
- unique_id_ == UniqueValueId(heap->false_value()) ||
- unique_id_ == UniqueValueId(heap->the_hole_value()) ||
- unique_id_ == UniqueValueId(heap->empty_string());
+ ASSERT(unique_id_ != UniqueValueId::minus_zero_value(heap));
+ ASSERT(unique_id_ != UniqueValueId::nan_value(heap));
+ return unique_id_ == UniqueValueId::undefined_value(heap) ||
+ unique_id_ == UniqueValueId::null_value(heap) ||
+ unique_id_ == UniqueValueId::true_value(heap) ||
+ unique_id_ == UniqueValueId::false_value(heap) ||
+ unique_id_ == UniqueValueId::the_hole_value(heap) ||
+ unique_id_ == UniqueValueId::empty_string(heap);
}
bool IsCell() const {
return is_cell_;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
- virtual Representation KnownOptimalRepresentation() {
- if (HasSmiValue() && kSmiValueSize == 31) return Representation::Smi();
+ virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
+ if (HasSmiValue() && SmiValuesAre31Bits()) return Representation::Smi();
if (HasInteger32Value()) return Representation::Integer32();
if (HasNumberValue()) return Representation::Double();
if (HasExternalReferenceValue()) return Representation::External();
return Representation::Tagged();
}
- virtual bool EmitAtUses();
- virtual void PrintDataTo(StringStream* stream);
- bool IsInteger() { return handle()->IsSmi(); }
+ virtual bool EmitAtUses() V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
Maybe<HConstant*> CopyToTruncatedInt32(Zone* zone);
Maybe<HConstant*> CopyToTruncatedNumber(Zone* zone);
@@ -3794,9 +3423,10 @@ class HConstant: public HTemplateInstruction<0> {
return external_reference_value_;
}
+ bool HasBooleanValue() const { return type_.IsBoolean(); }
bool BooleanValue() const { return boolean_value_; }
- virtual intptr_t Hashcode() {
+ virtual intptr_t Hashcode() V8_OVERRIDE {
if (has_int32_value_) {
return static_cast<intptr_t>(int32_value_);
} else if (has_double_value_) {
@@ -3809,7 +3439,7 @@ class HConstant: public HTemplateInstruction<0> {
}
}
- virtual void FinalizeUniqueValueId() {
+ virtual void FinalizeUniqueValueId() V8_OVERRIDE {
if (!has_double_value_ && !has_external_reference_value_) {
ASSERT(!handle_.is_null());
unique_id_ = UniqueValueId(handle_);
@@ -3822,15 +3452,15 @@ class HConstant: public HTemplateInstruction<0> {
}
#ifdef DEBUG
- virtual void Verify() { }
+ virtual void Verify() V8_OVERRIDE { }
#endif
DECLARE_CONCRETE_INSTRUCTION(Constant)
protected:
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HConstant* other_constant = HConstant::cast(other);
if (has_int32_value_) {
return other_constant->has_int32_value_ &&
@@ -3869,11 +3499,13 @@ class HConstant: public HTemplateInstruction<0> {
bool is_not_in_new_space,
bool is_cell,
bool boolean_value);
+ HConstant(Handle<Map> handle,
+ UniqueValueId unique_id);
explicit HConstant(ExternalReference reference);
void Initialize(Representation r);
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
// If this is a numerical constant, handle_ either points to to the
// HeapObject the constant originated from or is null. If the
@@ -3901,7 +3533,7 @@ class HConstant: public HTemplateInstruction<0> {
};
-class HBinaryOperation: public HTemplateInstruction<3> {
+class HBinaryOperation : public HTemplateInstruction<3> {
public:
HBinaryOperation(HValue* context, HValue* left, HValue* right,
HType type = HType::Tagged())
@@ -3951,29 +3583,30 @@ class HBinaryOperation: public HTemplateInstruction<3> {
observed_output_representation_ = observed;
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
if (index == 0) return Representation::Tagged();
return observed_input_representation_[index - 1];
}
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
Representation rep = !FLAG_smi_binop && new_rep.IsSmi()
? Representation::Integer32() : new_rep;
HValue::UpdateRepresentation(rep, h_infer, reason);
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
- virtual Representation RepresentationFromInputs();
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE;
Representation RepresentationFromOutput();
- virtual void AssumeRepresentation(Representation r);
+ virtual void AssumeRepresentation(Representation r) V8_OVERRIDE;
virtual bool IsCommutative() const { return false; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0) return Representation::Tagged();
return representation();
}
@@ -3988,20 +3621,20 @@ class HBinaryOperation: public HTemplateInstruction<3> {
};
-class HWrapReceiver: public HTemplateInstruction<2> {
+class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HWrapReceiver, HValue*, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
HValue* receiver() { return OperandAt(0); }
HValue* function() { return OperandAt(1); }
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
@@ -4014,7 +3647,7 @@ class HWrapReceiver: public HTemplateInstruction<2> {
};
-class HApplyArguments: public HTemplateInstruction<4> {
+class HApplyArguments V8_FINAL : public HTemplateInstruction<4> {
public:
HApplyArguments(HValue* function,
HValue* receiver,
@@ -4028,7 +3661,7 @@ class HApplyArguments: public HTemplateInstruction<4> {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// The length is untagged, all other inputs are tagged.
return (index == 2)
? Representation::Integer32()
@@ -4044,20 +3677,20 @@ class HApplyArguments: public HTemplateInstruction<4> {
};
-class HArgumentsElements: public HTemplateInstruction<0> {
+class HArgumentsElements V8_FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsElements, bool);
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
bool from_inlined() const { return from_inlined_; }
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
@@ -4067,24 +3700,24 @@ class HArgumentsElements: public HTemplateInstruction<0> {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
bool from_inlined_;
};
-class HArgumentsLength: public HUnaryOperation {
+class HArgumentsLength V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsLength, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
@@ -4092,11 +3725,11 @@ class HArgumentsLength: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HAccessArgumentsAt: public HTemplateInstruction<3> {
+class HAccessArgumentsAt V8_FINAL : public HTemplateInstruction<3> {
public:
HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
set_representation(Representation::Tagged());
@@ -4106,9 +3739,9 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> {
SetOperandAt(2, index);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// The arguments elements is considered tagged.
return index == 0
? Representation::Tagged()
@@ -4121,14 +3754,14 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt)
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
};
class HBoundsCheckBaseIndexInformation;
-class HBoundsCheck: public HTemplateInstruction<2> {
+class HBoundsCheck V8_FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HBoundsCheck, HValue*, HValue*);
@@ -4138,12 +3771,6 @@ class HBoundsCheck: public HTemplateInstruction<2> {
HValue* base() { return base_; }
int offset() { return offset_; }
int scale() { return scale_; }
- bool index_can_increase() {
- return (responsibility_direction_ & DIRECTION_LOWER) == 0;
- }
- bool index_can_decrease() {
- return (responsibility_direction_ & DIRECTION_UPPER) == 0;
- }
void ApplyIndexChange();
bool DetectCompoundIndex() {
@@ -4163,44 +3790,34 @@ class HBoundsCheck: public HTemplateInstruction<2> {
}
}
- virtual Representation RequiredInputRepresentation(int arg_index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation();
}
- virtual bool IsRelationTrueInternal(NumericRelation relation,
- HValue* related_value,
- int offset = 0,
- int scale = 0);
-
- virtual void PrintDataTo(StringStream* stream);
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
HValue* index() { return OperandAt(0); }
HValue* length() { return OperandAt(1); }
bool allow_equality() { return allow_equality_; }
void set_allow_equality(bool v) { allow_equality_ = v; }
- virtual int RedefinedOperandIndex() { return 0; }
- virtual bool IsPurelyInformativeDefinition() { return skip_check(); }
- virtual void AddInformativeDefinitions();
+ virtual int RedefinedOperandIndex() V8_OVERRIDE { return 0; }
+ virtual bool IsPurelyInformativeDefinition() V8_OVERRIDE {
+ return skip_check();
+ }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
protected:
friend class HBoundsCheckBaseIndexInformation;
- virtual void SetResponsibilityForRange(RangeGuaranteeDirection direction) {
- responsibility_direction_ = static_cast<RangeGuaranteeDirection>(
- responsibility_direction_ | direction);
- }
-
- virtual bool DataEquals(HValue* other) { return true; }
- virtual void TryGuaranteeRangeChanging(RangeEvaluationContext* context);
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
bool skip_check_;
HValue* base_;
int offset_;
int scale_;
- RangeGuaranteeDirection responsibility_direction_;
bool allow_equality_;
private:
@@ -4211,7 +3828,6 @@ class HBoundsCheck: public HTemplateInstruction<2> {
HBoundsCheck(HValue* index, HValue* length)
: skip_check_(false),
base_(NULL), offset_(0), scale_(0),
- responsibility_direction_(DIRECTION_NONE),
allow_equality_(false) {
SetOperandAt(0, index);
SetOperandAt(1, length);
@@ -4219,13 +3835,14 @@ class HBoundsCheck: public HTemplateInstruction<2> {
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const {
+ virtual bool IsDeletable() const V8_OVERRIDE {
return skip_check() && !FLAG_debug_code;
}
};
-class HBoundsCheckBaseIndexInformation: public HTemplateInstruction<2> {
+class HBoundsCheckBaseIndexInformation V8_FINAL
+ : public HTemplateInstruction<2> {
public:
explicit HBoundsCheckBaseIndexInformation(HBoundsCheck* check) {
DecompositionResult decomposition;
@@ -4242,30 +3859,18 @@ class HBoundsCheckBaseIndexInformation: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(BoundsCheckBaseIndexInformation)
- virtual Representation RequiredInputRepresentation(int arg_index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation();
}
- virtual bool IsRelationTrueInternal(NumericRelation relation,
- HValue* related_value,
- int offset = 0,
- int scale = 0);
- virtual void PrintDataTo(StringStream* stream);
-
- virtual int RedefinedOperandIndex() { return 0; }
- virtual bool IsPurelyInformativeDefinition() { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- protected:
- virtual void SetResponsibilityForRange(RangeGuaranteeDirection direction) {
- bounds_check()->SetResponsibilityForRange(direction);
- }
- virtual void TryGuaranteeRangeChanging(RangeEvaluationContext* context) {
- bounds_check()->TryGuaranteeRangeChanging(context);
- }
+ virtual int RedefinedOperandIndex() V8_OVERRIDE { return 0; }
+ virtual bool IsPurelyInformativeDefinition() V8_OVERRIDE { return true; }
};
-class HBitwiseBinaryOperation: public HBinaryOperation {
+class HBitwiseBinaryOperation : public HBinaryOperation {
public:
HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
HType type = HType::Tagged())
@@ -4276,7 +3881,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
SetAllSideEffects();
}
- virtual void RepresentationChanged(Representation to) {
+ virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
if (!to.IsTagged()) {
ASSERT(to.IsSmiOrInteger32());
ClearAllSideEffects();
@@ -4289,13 +3894,13 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
// We only generate either int32 or generic tagged bitwise operations.
if (new_rep.IsDouble()) new_rep = Representation::Integer32();
HBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
Representation r = HBinaryOperation::observed_input_representation(index);
if (r.IsDouble()) return Representation::Integer32();
return r;
@@ -4309,11 +3914,11 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HMathFloorOfDiv: public HBinaryOperation {
+class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
public:
static HMathFloorOfDiv* New(Zone* zone,
HValue* context,
@@ -4322,16 +3927,13 @@ class HMathFloorOfDiv: public HBinaryOperation {
return new(zone) HMathFloorOfDiv(context, left, right);
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Integer32();
- }
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
@@ -4345,11 +3947,11 @@ class HMathFloorOfDiv: public HBinaryOperation {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HArithmeticBinaryOperation: public HBinaryOperation {
+class HArithmeticBinaryOperation : public HBinaryOperation {
public:
HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right, HType::TaggedNumber()) {
@@ -4358,7 +3960,7 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
SetFlag(kAllowUndefinedAsNaN);
}
- virtual void RepresentationChanged(Representation to) {
+ virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
if (to.IsTagged()) {
SetAllSideEffects();
ClearFlag(kUseGVN);
@@ -4371,11 +3973,11 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HCompareGeneric: public HBinaryOperation {
+class HCompareGeneric V8_FINAL : public HBinaryOperation {
public:
HCompareGeneric(HValue* context,
HValue* left,
@@ -4388,14 +3990,14 @@ class HCompareGeneric: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return index == 0
? Representation::Tagged()
: representation();
}
Token::Value token() const { return token_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
@@ -4404,7 +4006,7 @@ class HCompareGeneric: public HBinaryOperation {
};
-class HCompareNumericAndBranch: public HTemplateControlInstruction<2, 2> {
+class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
public:
HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token)
: token_(token) {
@@ -4424,17 +4026,16 @@ class HCompareNumericAndBranch: public HTemplateControlInstruction<2, 2> {
observed_input_representation_[1] = right;
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return representation();
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
return observed_input_representation_[index];
}
- virtual void PrintDataTo(StringStream* stream);
-
- virtual void AddInformativeDefinitions();
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
@@ -4444,7 +4045,35 @@ class HCompareNumericAndBranch: public HTemplateControlInstruction<2, 2> {
};
-class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
+class HCompareHoleAndBranch V8_FINAL
+ : public HTemplateControlInstruction<2, 1> {
+ public:
+ // TODO(danno): make this private when the IfBuilder properly constructs
+ // control flow instructions.
+ explicit HCompareHoleAndBranch(HValue* object) {
+ SetFlag(kFlexibleRepresentation);
+ SetFlag(kAllowUndefinedAsNaN);
+ SetOperandAt(0, object);
+ }
+
+ DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*);
+
+ HValue* object() { return OperandAt(0); }
+
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return representation();
+ }
+
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareHoleAndBranch)
+};
+
+
+class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
public:
// TODO(danno): make this private when the IfBuilder properly constructs
// control flow instructions.
@@ -4459,13 +4088,13 @@ class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4473,24 +4102,24 @@ class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
};
-class HIsObjectAndBranch: public HUnaryControlInstruction {
+class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
explicit HIsObjectAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
};
-class HIsStringAndBranch: public HUnaryControlInstruction {
+class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
explicit HIsStringAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4498,28 +4127,28 @@ class HIsStringAndBranch: public HUnaryControlInstruction {
};
-class HIsSmiAndBranch: public HUnaryControlInstruction {
+class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
explicit HIsSmiAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
};
-class HIsUndetectableAndBranch: public HUnaryControlInstruction {
+class HIsUndetectableAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
explicit HIsUndetectableAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4527,7 +4156,7 @@ class HIsUndetectableAndBranch: public HUnaryControlInstruction {
};
-class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
+class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
public:
HStringCompareAndBranch(HValue* context,
HValue* left,
@@ -4539,6 +4168,7 @@ class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
SetOperandAt(1, left);
SetOperandAt(2, right);
set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesNewSpacePromotion);
}
HValue* context() { return OperandAt(0); }
@@ -4546,9 +4176,9 @@ class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
HValue* right() { return OperandAt(2); }
Token::Value token() const { return token_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4563,9 +4193,9 @@ class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
};
-class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
+class HIsConstructCallAndBranch : public HTemplateControlInstruction<2, 0> {
public:
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -4573,7 +4203,7 @@ class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
};
-class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
+class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
: HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
@@ -4585,9 +4215,9 @@ class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
InstanceType from() { return from_; }
InstanceType to() { return to_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4599,12 +4229,12 @@ class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
};
-class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction {
+class HHasCachedArrayIndexAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
explicit HHasCachedArrayIndexAndBranch(HValue* value)
: HUnaryControlInstruction(value, NULL, NULL) { }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4612,28 +4242,28 @@ class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction {
};
-class HGetCachedArrayIndex: public HUnaryOperation {
+class HGetCachedArrayIndex V8_FINAL : public HUnaryOperation {
public:
explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HClassOfTestAndBranch: public HUnaryControlInstruction {
+class HClassOfTestAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
: HUnaryControlInstruction(value, NULL, NULL),
@@ -4641,11 +4271,11 @@ class HClassOfTestAndBranch: public HUnaryControlInstruction {
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> class_name() const { return class_name_; }
@@ -4654,18 +4284,18 @@ class HClassOfTestAndBranch: public HUnaryControlInstruction {
};
-class HTypeofIsAndBranch: public HUnaryControlInstruction {
+class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
: HUnaryControlInstruction(value, NULL, NULL),
type_literal_(type_literal) { }
Handle<String> type_literal() { return type_literal_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4674,7 +4304,7 @@ class HTypeofIsAndBranch: public HUnaryControlInstruction {
};
-class HInstanceOf: public HBinaryOperation {
+class HInstanceOf V8_FINAL : public HBinaryOperation {
public:
HInstanceOf(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right, HType::Boolean()) {
@@ -4682,17 +4312,17 @@ class HInstanceOf: public HBinaryOperation {
SetAllSideEffects();
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
};
-class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
+class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> {
public:
HInstanceOfKnownGlobal(HValue* context,
HValue* left,
@@ -4708,7 +4338,7 @@ class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
HValue* left() { return OperandAt(1); }
Handle<JSFunction> function() { return function_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4722,7 +4352,7 @@ class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
// TODO(mstarzinger): This instruction should be modeled as a load of the map
// field followed by a load of the instance size field once HLoadNamedField is
// flexible enough to accommodate byte-field loads.
-class HInstanceSize: public HTemplateInstruction<1> {
+class HInstanceSize V8_FINAL : public HTemplateInstruction<1> {
public:
explicit HInstanceSize(HValue* object) {
SetOperandAt(0, object);
@@ -4731,7 +4361,7 @@ class HInstanceSize: public HTemplateInstruction<1> {
HValue* object() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -4739,7 +4369,7 @@ class HInstanceSize: public HTemplateInstruction<1> {
};
-class HPower: public HTemplateInstruction<2> {
+class HPower V8_FINAL : public HTemplateInstruction<2> {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4749,19 +4379,19 @@ class HPower: public HTemplateInstruction<2> {
HValue* left() { return OperandAt(0); }
HValue* right() const { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return index == 0
? Representation::Double()
: Representation::None();
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
return RequiredInputRepresentation(index);
}
DECLARE_CONCRETE_INSTRUCTION(Power)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HPower(HValue* left, HValue* right) {
@@ -4772,13 +4402,13 @@ class HPower: public HTemplateInstruction<2> {
SetGVNFlag(kChangesNewSpacePromotion);
}
- virtual bool IsDeletable() const {
+ virtual bool IsDeletable() const V8_OVERRIDE {
return !right()->representation().IsTagged();
}
};
-class HRandom: public HTemplateInstruction<1> {
+class HRandom V8_FINAL : public HTemplateInstruction<1> {
public:
explicit HRandom(HValue* global_object) {
SetOperandAt(0, global_object);
@@ -4787,18 +4417,18 @@ class HRandom: public HTemplateInstruction<1> {
HValue* global_object() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(Random)
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HAdd: public HArithmeticBinaryOperation {
+class HAdd V8_FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4807,15 +4437,16 @@ class HAdd: public HArithmeticBinaryOperation {
// Add is only commutative if two integer values are added and not if two
// tagged values are added (because it might be a String concatenation).
- virtual bool IsCommutative() const {
+ virtual bool IsCommutative() const V8_OVERRIDE {
return !representation().IsTagged();
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual bool TryDecompose(DecompositionResult* decomposition) {
+ virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
if (left()->IsInteger32Constant()) {
decomposition->Apply(right(), left()->GetInteger32Constant());
return true;
@@ -4827,12 +4458,17 @@ class HAdd: public HArithmeticBinaryOperation {
}
}
+ virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
+ if (to.IsTagged()) ClearFlag(kAllowUndefinedAsNaN);
+ HArithmeticBinaryOperation::RepresentationChanged(to);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Add)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HAdd(HValue* context, HValue* left, HValue* right)
@@ -4842,18 +4478,19 @@ class HAdd: public HArithmeticBinaryOperation {
};
-class HSub: public HArithmeticBinaryOperation {
+class HSub V8_FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual bool TryDecompose(DecompositionResult* decomposition) {
+ virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
if (right()->IsInteger32Constant()) {
decomposition->Apply(left(), -right()->GetInteger32Constant());
return true;
@@ -4865,9 +4502,9 @@ class HSub: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Sub)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HSub(HValue* context, HValue* left, HValue* right)
@@ -4877,7 +4514,7 @@ class HSub: public HArithmeticBinaryOperation {
};
-class HMul: public HArithmeticBinaryOperation {
+class HMul V8_FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4895,28 +4532,28 @@ class HMul: public HArithmeticBinaryOperation {
return mul;
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
// Only commutative if it is certain that not two objects are multiplicated.
- virtual bool IsCommutative() const {
+ virtual bool IsCommutative() const V8_OVERRIDE {
return !representation().IsTagged();
}
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
- if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+ const char* reason) V8_OVERRIDE {
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
DECLARE_CONCRETE_INSTRUCTION(Mul)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HMul(HValue* context, HValue* left, HValue* right)
@@ -4926,7 +4563,7 @@ class HMul: public HArithmeticBinaryOperation {
};
-class HMod: public HArithmeticBinaryOperation {
+class HMod V8_FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4946,13 +4583,14 @@ class HMod: public HArithmeticBinaryOperation {
return false;
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -4960,9 +4598,9 @@ class HMod: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Mod)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HMod(HValue* context,
@@ -4979,7 +4617,7 @@ class HMod: public HArithmeticBinaryOperation {
};
-class HDiv: public HArithmeticBinaryOperation {
+class HDiv V8_FINAL : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -4995,13 +4633,14 @@ class HDiv: public HArithmeticBinaryOperation {
return false;
}
- virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+ virtual HValue* EnsureAndPropagateNotMinusZero(
+ BitVector* visited) V8_OVERRIDE;
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5009,9 +4648,9 @@ class HDiv: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Div)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HDiv(HValue* context, HValue* left, HValue* right)
@@ -5022,7 +4661,7 @@ class HDiv: public HArithmeticBinaryOperation {
};
-class HMathMinMax: public HArithmeticBinaryOperation {
+class HMathMinMax V8_FINAL : public HArithmeticBinaryOperation {
public:
enum Operation { kMathMin, kMathMax };
@@ -5032,18 +4671,14 @@ class HMathMinMax: public HArithmeticBinaryOperation {
HValue* right,
Operation op);
- virtual Representation RequiredInputRepresentation(int index) {
- return index == 0 ? Representation::Tagged()
- : representation();
- }
-
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
return RequiredInputRepresentation(index);
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
+ virtual void InferRepresentation(
+ HInferRepresentationPhase* h_infer) V8_OVERRIDE;
- virtual Representation RepresentationFromInputs() {
+ virtual Representation RepresentationFromInputs() V8_OVERRIDE {
Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
Representation result = Representation::Smi();
@@ -5053,19 +4688,19 @@ class HMathMinMax: public HArithmeticBinaryOperation {
return result;
}
- virtual bool IsCommutative() const { return true; }
+ virtual bool IsCommutative() const V8_OVERRIDE { return true; }
Operation operation() { return operation_; }
DECLARE_CONCRETE_INSTRUCTION(MathMinMax)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
return other->IsMathMinMax() &&
HMathMinMax::cast(other)->operation_ == operation_;
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
@@ -5076,7 +4711,7 @@ class HMathMinMax: public HArithmeticBinaryOperation {
};
-class HBitwise: public HBitwiseBinaryOperation {
+class HBitwise V8_FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -5086,20 +4721,20 @@ class HBitwise: public HBitwiseBinaryOperation {
Token::Value op() const { return op_; }
- virtual bool IsCommutative() const { return true; }
+ virtual bool IsCommutative() const V8_OVERRIDE { return true; }
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Bitwise)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
return op() == HBitwise::cast(other)->op();
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
private:
HBitwise(HValue* context,
@@ -5119,6 +4754,7 @@ class HBitwise: public HBitwiseBinaryOperation {
right->representation().IsSmi() &&
HConstant::cast(right)->Integer32Value() >= 0))) {
SetFlag(kTruncatingToSmi);
+ SetFlag(kTruncatingToInt32);
// BIT_OR with a smi-range negative value will always set the entire
// sign-extension of the smi-sign.
} else if (op == Token::BIT_OR &&
@@ -5129,6 +4765,7 @@ class HBitwise: public HBitwiseBinaryOperation {
right->representation().IsSmi() &&
HConstant::cast(right)->Integer32Value() < 0))) {
SetFlag(kTruncatingToSmi);
+ SetFlag(kTruncatingToInt32);
}
}
@@ -5136,18 +4773,18 @@ class HBitwise: public HBitwiseBinaryOperation {
};
-class HShl: public HBitwiseBinaryOperation {
+class HShl V8_FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi() &&
!(right()->IsInteger32Constant() &&
right()->GetInteger32Constant() >= 0)) {
@@ -5159,7 +4796,7 @@ class HShl: public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Shl)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HShl(HValue* context, HValue* left, HValue* right)
@@ -5167,14 +4804,14 @@ class HShl: public HBitwiseBinaryOperation {
};
-class HShr: public HBitwiseBinaryOperation {
+class HShr V8_FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual bool TryDecompose(DecompositionResult* decomposition) {
+ virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
if (right()->IsInteger32Constant()) {
if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
// This is intended to look for HAdd and HSub, to handle compounds
@@ -5186,11 +4823,11 @@ class HShr: public HBitwiseBinaryOperation {
return false;
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5198,7 +4835,7 @@ class HShr: public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Shr)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HShr(HValue* context, HValue* left, HValue* right)
@@ -5206,14 +4843,14 @@ class HShr: public HBitwiseBinaryOperation {
};
-class HSar: public HBitwiseBinaryOperation {
+class HSar V8_FINAL : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* left,
HValue* right);
- virtual bool TryDecompose(DecompositionResult* decomposition) {
+ virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
if (right()->IsInteger32Constant()) {
if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
// This is intended to look for HAdd and HSub, to handle compounds
@@ -5225,11 +4862,11 @@ class HSar: public HBitwiseBinaryOperation {
return false;
}
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5237,7 +4874,7 @@ class HSar: public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Sar)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HSar(HValue* context, HValue* left, HValue* right)
@@ -5245,7 +4882,7 @@ class HSar: public HBitwiseBinaryOperation {
};
-class HRor: public HBitwiseBinaryOperation {
+class HRor V8_FINAL : public HBitwiseBinaryOperation {
public:
HRor(HValue* context, HValue* left, HValue* right)
: HBitwiseBinaryOperation(context, left, right) {
@@ -5254,7 +4891,7 @@ class HRor: public HBitwiseBinaryOperation {
virtual void UpdateRepresentation(Representation new_rep,
HInferRepresentationPhase* h_infer,
- const char* reason) {
+ const char* reason) V8_OVERRIDE {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5262,17 +4899,17 @@ class HRor: public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Ror)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
};
-class HOsrEntry: public HTemplateInstruction<0> {
+class HOsrEntry V8_FINAL : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HOsrEntry, BailoutId);
BailoutId ast_id() const { return ast_id_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -5288,7 +4925,7 @@ class HOsrEntry: public HTemplateInstruction<0> {
};
-class HParameter: public HTemplateInstruction<0> {
+class HParameter V8_FINAL : public HTemplateInstruction<0> {
public:
enum ParameterKind {
STACK_PARAMETER,
@@ -5303,9 +4940,9 @@ class HParameter: public HTemplateInstruction<0> {
unsigned index() const { return index_; }
ParameterKind kind() const { return kind_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -5332,7 +4969,7 @@ class HParameter: public HTemplateInstruction<0> {
};
-class HCallStub: public HUnaryCall {
+class HCallStub V8_FINAL : public HUnaryCall {
public:
HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
: HUnaryCall(context, argument_count),
@@ -5351,11 +4988,7 @@ class HCallStub: public HUnaryCall {
return transcendental_type_;
}
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(CallStub)
@@ -5365,23 +4998,22 @@ class HCallStub: public HUnaryCall {
};
-class HUnknownOSRValue: public HTemplateInstruction<0> {
+class HUnknownOSRValue V8_FINAL : public HTemplateInstruction<0> {
public:
- DECLARE_INSTRUCTION_FACTORY_P0(HUnknownOSRValue)
+ DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int);
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::None();
- }
+ virtual void PrintDataTo(StringStream* stream);
- void set_incoming_value(HPhi* value) {
- incoming_value_ = value;
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::None();
}
- HPhi* incoming_value() {
- return incoming_value_;
- }
+ void set_incoming_value(HPhi* value) { incoming_value_ = value; }
+ HPhi* incoming_value() { return incoming_value_; }
+ HEnvironment *environment() { return environment_; }
+ int index() { return index_; }
- virtual Representation KnownOptimalRepresentation() {
+ virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
if (incoming_value_ == NULL) return Representation::None();
return incoming_value_->KnownOptimalRepresentation();
}
@@ -5389,16 +5021,20 @@ class HUnknownOSRValue: public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
private:
- HUnknownOSRValue()
- : incoming_value_(NULL) {
+ HUnknownOSRValue(HEnvironment* environment, int index)
+ : environment_(environment),
+ index_(index),
+ incoming_value_(NULL) {
set_representation(Representation::Tagged());
}
+ HEnvironment* environment_;
+ int index_;
HPhi* incoming_value_;
};
-class HLoadGlobalCell: public HTemplateInstruction<0> {
+class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
public:
HLoadGlobalCell(Handle<Cell> cell, PropertyDetails details)
: cell_(cell), details_(details), unique_id_() {
@@ -5410,30 +5046,30 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
Handle<Cell> cell() const { return cell_; }
bool RequiresHoleCheck() const;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual intptr_t Hashcode() {
+ virtual intptr_t Hashcode() V8_OVERRIDE {
return unique_id_.Hashcode();
}
- virtual void FinalizeUniqueValueId() {
+ virtual void FinalizeUniqueValueId() V8_OVERRIDE {
unique_id_ = UniqueValueId(cell_);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
return unique_id_ == b->unique_id_;
}
private:
- virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
+ virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
Handle<Cell> cell_;
PropertyDetails details_;
@@ -5441,7 +5077,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
};
-class HLoadGlobalGeneric: public HTemplateInstruction<2> {
+class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
public:
HLoadGlobalGeneric(HValue* context,
HValue* global_object,
@@ -5460,9 +5096,9 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
Handle<Object> name() const { return name_; }
bool for_typeof() const { return for_typeof_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -5474,7 +5110,7 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
};
-class HAllocate: public HTemplateInstruction<2> {
+class HAllocate V8_FINAL : public HTemplateInstruction<2> {
public:
static HAllocate* New(Zone* zone,
HValue* context,
@@ -5492,7 +5128,7 @@ class HAllocate: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* size() { return OperandAt(1); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0) {
return Representation::Tagged();
} else {
@@ -5536,14 +5172,10 @@ class HAllocate: public HTemplateInstruction<2> {
flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
}
- void UpdateSize(HValue* size) {
- SetOperandAt(1, size);
- }
-
virtual void HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator);
+ HValue* dominator) V8_OVERRIDE;
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Allocate)
@@ -5561,7 +5193,10 @@ class HAllocate: public HTemplateInstruction<2> {
HType type,
PretenureFlag pretenure_flag,
InstanceType instance_type)
- : HTemplateInstruction<2>(type) {
+ : HTemplateInstruction<2>(type),
+ dominating_allocate_(NULL),
+ filler_free_space_size_(NULL),
+ clear_next_map_word_(false) {
SetOperandAt(0, context);
SetOperandAt(1, size);
set_representation(Representation::Tagged());
@@ -5573,17 +5208,74 @@ class HAllocate: public HTemplateInstruction<2> {
? ALLOCATE_IN_OLD_POINTER_SPACE : ALLOCATE_IN_OLD_DATA_SPACE)
: ALLOCATE_IN_NEW_SPACE;
if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
- flags_ = static_cast<HAllocate::Flags>(flags_ |
- ALLOCATE_DOUBLE_ALIGNED);
+ flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
+ }
+ // We have to fill the allocated object with one word fillers if we do
+ // not use allocation folding since some allocations may depend on each
+ // other, i.e., have a pointer to each other. A GC in between these
+ // allocations may leave such objects behind in a not completely initialized
+ // state.
+ if (!FLAG_use_gvn || !FLAG_use_allocation_folding) {
+ flags_ = static_cast<HAllocate::Flags>(flags_ | PREFILL_WITH_FILLER);
}
+ clear_next_map_word_ = pretenure_flag == NOT_TENURED &&
+ AllocationSite::CanTrack(instance_type);
}
+ void UpdateSize(HValue* size) {
+ SetOperandAt(1, size);
+ }
+
+ HAllocate* GetFoldableDominator(HAllocate* dominator);
+
+ void UpdateFreeSpaceFiller(int32_t filler_size);
+
+ void CreateFreeSpaceFiller(int32_t filler_size);
+
+ bool IsFoldable(HAllocate* allocate) {
+ return (IsNewSpaceAllocation() && allocate->IsNewSpaceAllocation()) ||
+ (IsOldDataSpaceAllocation() && allocate->IsOldDataSpaceAllocation()) ||
+ (IsOldPointerSpaceAllocation() &&
+ allocate->IsOldPointerSpaceAllocation());
+ }
+
+ void ClearNextMapWord(int offset);
+
Flags flags_;
Handle<Map> known_initial_map_;
+ HAllocate* dominating_allocate_;
+ HStoreNamedField* filler_free_space_size_;
+ bool clear_next_map_word_;
+};
+
+
+class HStoreCodeEntry V8_FINAL: public HTemplateInstruction<2> {
+ public:
+ static HStoreCodeEntry* New(Zone* zone,
+ HValue* context,
+ HValue* function,
+ HValue* code) {
+ return new(zone) HStoreCodeEntry(function, code);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ HValue* function() { return OperandAt(0); }
+ HValue* code_object() { return OperandAt(1); }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry)
+
+ private:
+ HStoreCodeEntry(HValue* function, HValue* code) {
+ SetOperandAt(0, function);
+ SetOperandAt(1, code);
+ }
};
-class HInnerAllocatedObject: public HTemplateInstruction<1> {
+class HInnerAllocatedObject V8_FINAL: public HTemplateInstruction<1> {
public:
static HInnerAllocatedObject* New(Zone* zone,
HValue* context,
@@ -5596,11 +5288,11 @@ class HInnerAllocatedObject: public HTemplateInstruction<1> {
HValue* base_object() { return OperandAt(0); }
int offset() { return offset_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
@@ -5647,7 +5339,7 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
}
-class HStoreGlobalCell: public HUnaryOperation {
+class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*,
Handle<PropertyCell>, PropertyDetails);
@@ -5660,10 +5352,10 @@ class HStoreGlobalCell: public HUnaryOperation {
return StoringValueNeedsWriteBarrier(value());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell)
@@ -5682,7 +5374,7 @@ class HStoreGlobalCell: public HUnaryOperation {
};
-class HStoreGlobalGeneric: public HTemplateInstruction<3> {
+class HStoreGlobalGeneric : public HTemplateInstruction<3> {
public:
inline static HStoreGlobalGeneric* New(Zone* zone,
HValue* context,
@@ -5700,9 +5392,9 @@ class HStoreGlobalGeneric: public HTemplateInstruction<3> {
HValue* value() { return OperandAt(2); }
StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -5728,7 +5420,7 @@ class HStoreGlobalGeneric: public HTemplateInstruction<3> {
};
-class HLoadContextSlot: public HUnaryOperation {
+class HLoadContextSlot V8_FINAL : public HUnaryOperation {
public:
enum Mode {
// Perform a normal load of the context slot without checking its value.
@@ -5773,29 +5465,29 @@ class HLoadContextSlot: public HUnaryOperation {
return mode_ != kNoCheck;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HLoadContextSlot* b = HLoadContextSlot::cast(other);
return (slot_index() == b->slot_index());
}
private:
- virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
+ virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
int slot_index_;
Mode mode_;
};
-class HStoreContextSlot: public HTemplateInstruction<2> {
+class HStoreContextSlot V8_FINAL : public HTemplateInstruction<2> {
public:
enum Mode {
// Perform a normal store to the context slot without checking its previous
@@ -5830,11 +5522,11 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
return mode_ != kNoCheck;
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot)
@@ -5853,7 +5545,7 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
// Represents an access to a portion of an object, such as the map pointer,
// array elements pointer, etc, but not accesses to array elements themselves.
-class HObjectAccess {
+class HObjectAccess V8_FINAL {
public:
inline bool IsInobject() const {
return portion() != kBackingStore && portion() != kExternalMemory;
@@ -5892,6 +5584,14 @@ class HObjectAccess {
return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
+ static HObjectAccess ForLiteralsPointer() {
+ return HObjectAccess(kInobject, JSFunction::kLiteralsOffset);
+ }
+
+ static HObjectAccess ForNextFunctionLinkPointer() {
+ return HObjectAccess(kInobject, JSFunction::kNextFunctionLinkOffset);
+ }
+
static HObjectAccess ForArrayLength(ElementsKind elements_kind) {
return HObjectAccess(
kArrayLengths,
@@ -5936,6 +5636,35 @@ class HObjectAccess {
return HObjectAccess(kInobject, JSFunction::kPrototypeOrInitialMapOffset);
}
+ static HObjectAccess ForSharedFunctionInfoPointer() {
+ return HObjectAccess(kInobject, JSFunction::kSharedFunctionInfoOffset);
+ }
+
+ static HObjectAccess ForCodeEntryPointer() {
+ return HObjectAccess(kInobject, JSFunction::kCodeEntryOffset);
+ }
+
+ static HObjectAccess ForCodeOffset() {
+ return HObjectAccess(kInobject, SharedFunctionInfo::kCodeOffset);
+ }
+
+ static HObjectAccess ForFirstCodeSlot() {
+ return HObjectAccess(kInobject, SharedFunctionInfo::kFirstCodeSlot);
+ }
+
+ static HObjectAccess ForFirstContextSlot() {
+ return HObjectAccess(kInobject, SharedFunctionInfo::kFirstContextSlot);
+ }
+
+ static HObjectAccess ForOptimizedCodeMap() {
+ return HObjectAccess(kInobject,
+ SharedFunctionInfo::kOptimizedCodeMapOffset);
+ }
+
+ static HObjectAccess ForFunctionContextPointer() {
+ return HObjectAccess(kInobject, JSFunction::kContextOffset);
+ }
+
static HObjectAccess ForMap() {
return HObjectAccess(kMaps, JSObject::kMapOffset);
}
@@ -5966,6 +5695,8 @@ class HObjectAccess {
// Create an access to an in-object property in a JSArray.
static HObjectAccess ForJSArrayOffset(int offset);
+ static HObjectAccess ForContextSlot(int index);
+
// Create an access to the backing store of an object.
static HObjectAccess ForBackingStoreOffset(int offset,
Representation representation = Representation::Tagged());
@@ -6028,51 +5759,43 @@ class HObjectAccess {
};
-class HLoadNamedField: public HTemplateInstruction<2> {
+class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HLoadNamedField, HValue*, HObjectAccess);
- DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*, HObjectAccess,
- HValue*);
HValue* object() { return OperandAt(0); }
- HValue* typecheck() {
- ASSERT(HasTypeCheck());
- return OperandAt(1);
- }
-
- bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
+ bool HasTypeCheck() { return object()->IsCheckMaps(); }
HObjectAccess access() const { return access_; }
Representation field_representation() const {
return access_.representation();
}
- virtual bool HasEscapingOperandAt(int index) { return false; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
+ virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE {
+ return !access().IsInobject() || access().offset() >= size;
+ }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0 && access().IsExternalMemory()) {
// object must be external in case of external memory access
return Representation::External();
}
return Representation::Tagged();
}
- virtual Range* InferRange(Zone* zone);
- virtual void PrintDataTo(StringStream* stream);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HLoadNamedField* b = HLoadNamedField::cast(other);
return access_.Equals(b->access_);
}
private:
- HLoadNamedField(HValue* object,
- HObjectAccess access,
- HValue* typecheck = NULL)
- : access_(access) {
+ HLoadNamedField(HValue* object, HObjectAccess access) : access_(access) {
ASSERT(object != NULL);
SetOperandAt(0, object);
- SetOperandAt(1, typecheck != NULL ? typecheck : object);
Representation representation = access.representation();
if (representation.IsSmi()) {
@@ -6092,52 +5815,13 @@ class HLoadNamedField: public HTemplateInstruction<2> {
access.SetGVNFlags(this, false);
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
HObjectAccess access_;
};
-class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
- public:
- HLoadNamedFieldPolymorphic(HValue* context,
- HValue* object,
- SmallMapList* types,
- Handle<String> name,
- Zone* zone);
-
- HValue* context() { return OperandAt(0); }
- HValue* object() { return OperandAt(1); }
- SmallMapList* types() { return &types_; }
- Handle<String> name() { return name_; }
- bool need_generic() { return need_generic_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedFieldPolymorphic)
-
- static const int kMaxLoadPolymorphism = 4;
-
- virtual void FinalizeUniqueValueId();
-
- protected:
- virtual bool DataEquals(HValue* value);
-
- private:
- SmallMapList types_;
- Handle<String> name_;
- ZoneList<UniqueValueId> types_unique_ids_;
- UniqueValueId name_unique_id_;
- bool need_generic_;
-};
-
-
-
-class HLoadNamedGeneric: public HTemplateInstruction<2> {
+class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> {
public:
HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
: name_(name) {
@@ -6151,11 +5835,11 @@ class HLoadNamedGeneric: public HTemplateInstruction<2> {
HValue* object() { return OperandAt(1); }
Handle<Object> name() const { return name_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
@@ -6164,7 +5848,7 @@ class HLoadNamedGeneric: public HTemplateInstruction<2> {
};
-class HLoadFunctionPrototype: public HUnaryOperation {
+class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
public:
explicit HLoadFunctionPrototype(HValue* function)
: HUnaryOperation(function) {
@@ -6175,14 +5859,14 @@ class HLoadFunctionPrototype: public HUnaryOperation {
HValue* function() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
};
class ArrayInstructionInterface {
@@ -6190,12 +5874,13 @@ class ArrayInstructionInterface {
virtual HValue* GetKey() = 0;
virtual void SetKey(HValue* key) = 0;
virtual void SetIndexOffset(uint32_t index_offset) = 0;
+ virtual int MaxIndexOffsetBits() = 0;
virtual bool IsDehoisted() = 0;
virtual void SetDehoisted(bool is_dehoisted) = 0;
virtual ~ArrayInstructionInterface() { };
static Representation KeyedAccessIndexRequirement(Representation r) {
- return r.IsInteger32() || kSmiValueSize != 31
+ return r.IsInteger32() || SmiValuesAre32Bits()
? Representation::Integer32() : Representation::Smi();
}
};
@@ -6207,7 +5892,7 @@ enum LoadKeyedHoleMode {
};
-class HLoadKeyed
+class HLoadKeyed V8_FINAL
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
DECLARE_INSTRUCTION_FACTORY_P4(HLoadKeyed, HValue*, HValue*, HValue*,
@@ -6229,6 +5914,9 @@ class HLoadKeyed
void SetIndexOffset(uint32_t index_offset) {
bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
}
+ virtual int MaxIndexOffsetBits() {
+ return kBitsForIndexOffset;
+ }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return IsDehoistedField::decode(bit_field_); }
@@ -6242,7 +5930,7 @@ class HLoadKeyed
return HoleModeField::decode(bit_field_);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// kind_fast: tagged[int32] (none)
// kind_double: tagged[int32] (none)
// kind_external: external[int32] (none)
@@ -6257,22 +5945,22 @@ class HLoadKeyed
return Representation::None();
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
return RequiredInputRepresentation(index);
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool UsesMustHandleHole() const;
bool AllUsesCanTreatHoleAsNaN() const;
bool RequiresHoleCheck() const;
- virtual Range* InferRange(Zone* zone);
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
if (!other->IsLoadKeyed()) return false;
HLoadKeyed* other_load = HLoadKeyed::cast(other);
@@ -6332,7 +6020,7 @@ class HLoadKeyed
SetFlag(kUseGVN);
}
- virtual bool IsDeletable() const {
+ virtual bool IsDeletable() const V8_OVERRIDE {
return !RequiresHoleCheck();
}
@@ -6368,7 +6056,7 @@ class HLoadKeyed
};
-class HLoadKeyedGeneric: public HTemplateInstruction<3> {
+class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) {
set_representation(Representation::Tagged());
@@ -6382,28 +6070,33 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> {
HValue* key() { return OperandAt(1); }
HValue* context() { return OperandAt(2); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// tagged[tagged]
return Representation::Tagged();
}
- virtual HValue* Canonicalize();
+ virtual HValue* Canonicalize() V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
};
-class HStoreNamedField: public HTemplateInstruction<2> {
+class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HStoreNamedField, HValue*,
HObjectAccess, HValue*);
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
- virtual bool HasEscapingOperandAt(int index) { return index == 1; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE {
+ return index == 1;
+ }
+ virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE {
+ return !access().IsInobject() || access().offset() >= size;
+ }
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
if (index == 0 && access().IsExternalMemory()) {
// object must be external in case of external memory access
return Representation::External();
@@ -6416,35 +6109,47 @@ class HStoreNamedField: public HTemplateInstruction<2> {
return Representation::Tagged();
}
virtual void HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) {
+ HValue* dominator) V8_OVERRIDE {
ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
void SkipWriteBarrier() { write_barrier_mode_ = SKIP_WRITE_BARRIER; }
bool IsSkipWriteBarrier() const {
return write_barrier_mode_ == SKIP_WRITE_BARRIER;
}
- HValue* object() { return OperandAt(0); }
- HValue* value() { return OperandAt(1); }
+ HValue* object() const { return OperandAt(0); }
+ HValue* value() const { return OperandAt(1); }
+ HValue* transition() const { return OperandAt(2); }
HObjectAccess access() const { return access_; }
- Handle<Map> transition() const { return transition_; }
- UniqueValueId transition_unique_id() const { return transition_unique_id_; }
- void SetTransition(Handle<Map> map, CompilationInfo* info) {
- ASSERT(transition_.is_null()); // Only set once.
+ HValue* new_space_dominator() const { return new_space_dominator_; }
+ bool has_transition() const { return has_transition_; }
+
+ Handle<Map> transition_map() const {
+ if (has_transition()) {
+ return Handle<Map>::cast(
+ HConstant::cast(transition())->handle(Isolate::Current()));
+ } else {
+ return Handle<Map>();
+ }
+ }
+
+ void SetTransition(HConstant* map_constant, CompilationInfo* info) {
+ ASSERT(!has_transition()); // Only set once.
+ Handle<Map> map = Handle<Map>::cast(map_constant->handle(info->isolate()));
if (map->CanBeDeprecated()) {
map->AddDependentCompilationInfo(DependentCode::kTransitionGroup, info);
}
- transition_ = map;
+ SetOperandAt(2, map_constant);
+ has_transition_ = true;
}
- HValue* new_space_dominator() const { return new_space_dominator_; }
bool NeedsWriteBarrier() {
ASSERT(!(FLAG_track_double_fields && field_representation().IsDouble()) ||
- transition_.is_null());
+ !has_transition());
if (IsSkipWriteBarrier()) return false;
if (field_representation().IsDouble()) return false;
if (field_representation().IsSmi()) return false;
@@ -6459,37 +6164,36 @@ class HStoreNamedField: public HTemplateInstruction<2> {
return ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
- virtual void FinalizeUniqueValueId() {
- transition_unique_id_ = UniqueValueId(transition_);
- }
-
Representation field_representation() const {
return access_.representation();
}
+ void UpdateValue(HValue* value) {
+ SetOperandAt(1, value);
+ }
+
private:
HStoreNamedField(HValue* obj,
HObjectAccess access,
HValue* val)
: access_(access),
- transition_(),
- transition_unique_id_(),
new_space_dominator_(NULL),
- write_barrier_mode_(UPDATE_WRITE_BARRIER) {
+ write_barrier_mode_(UPDATE_WRITE_BARRIER),
+ has_transition_(false) {
SetOperandAt(0, obj);
SetOperandAt(1, val);
+ SetOperandAt(2, obj);
access.SetGVNFlags(this, true);
}
HObjectAccess access_;
- Handle<Map> transition_;
- UniqueValueId transition_unique_id_;
HValue* new_space_dominator_;
- WriteBarrierMode write_barrier_mode_;
+ WriteBarrierMode write_barrier_mode_ : 1;
+ bool has_transition_ : 1;
};
-class HStoreNamedGeneric: public HTemplateInstruction<3> {
+class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
public:
HStoreNamedGeneric(HValue* context,
HValue* object,
@@ -6510,9 +6214,9 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
Handle<String> name() { return name_; }
StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -6524,14 +6228,13 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
};
-class HStoreKeyed
+class HStoreKeyed V8_FINAL
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
DECLARE_INSTRUCTION_FACTORY_P4(HStoreKeyed, HValue*, HValue*, HValue*,
ElementsKind);
- virtual bool HasEscapingOperandAt(int index) { return index != 0; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// kind_fast: tagged[int32] = tagged
// kind_double: tagged[int32] = double
// kind_smi : tagged[int32] = smi
@@ -6561,7 +6264,7 @@ class HStoreKeyed
return IsExternalArrayElementsKind(elements_kind());
}
- virtual Representation observed_input_representation(int index) {
+ virtual Representation observed_input_representation(int index) V8_OVERRIDE {
if (index < 2) return RequiredInputRepresentation(index);
if (IsUninitialized()) {
return Representation::None();
@@ -6588,6 +6291,9 @@ class HStoreKeyed
ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+ virtual int MaxIndexOffsetBits() {
+ return 31 - ElementsKindToShiftSize(elements_kind_);
+ }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
@@ -6602,7 +6308,7 @@ class HStoreKeyed
}
virtual void HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) {
+ HValue* dominator) V8_OVERRIDE {
ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
}
@@ -6620,7 +6326,7 @@ class HStoreKeyed
bool NeedsCanonicalization();
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
@@ -6666,7 +6372,7 @@ class HStoreKeyed
};
-class HStoreKeyedGeneric: public HTemplateInstruction<4> {
+class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
public:
HStoreKeyedGeneric(HValue* context,
HValue* object,
@@ -6687,12 +6393,12 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
HValue* context() { return OperandAt(3); }
StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// tagged[tagged] = tagged
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
@@ -6701,7 +6407,7 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
};
-class HTransitionElementsKind: public HTemplateInstruction<2> {
+class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
public:
inline static HTransitionElementsKind* New(Zone* zone,
HValue* context,
@@ -6712,7 +6418,7 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
original_map, transitioned_map);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -6723,9 +6429,9 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
ElementsKind from_kind() { return from_kind_; }
ElementsKind to_kind() { return to_kind_; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual void FinalizeUniqueValueId() {
+ virtual void FinalizeUniqueValueId() V8_OVERRIDE {
original_map_unique_id_ = UniqueValueId(original_map_);
transitioned_map_unique_id_ = UniqueValueId(transitioned_map_);
}
@@ -6733,7 +6439,7 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
return original_map_unique_id_ == instr->original_map_unique_id_ &&
transitioned_map_unique_id_ == instr->transitioned_map_unique_id_;
@@ -6754,11 +6460,7 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
SetOperandAt(1, context);
SetFlag(kUseGVN);
SetGVNFlag(kChangesElementsKind);
- if (original_map->has_fast_double_elements()) {
- SetGVNFlag(kChangesElementsPointer);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
- if (transitioned_map->has_fast_double_elements()) {
+ if (!IsSimpleMapChangeTransition(from_kind_, to_kind_)) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
}
@@ -6774,7 +6476,7 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
};
-class HStringAdd: public HBinaryOperation {
+class HStringAdd V8_FINAL : public HBinaryOperation {
public:
static HInstruction* New(Zone* zone,
HValue* context,
@@ -6784,14 +6486,14 @@ class HStringAdd: public HBinaryOperation {
StringAddFlags flags() const { return flags_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(StringAdd)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
private:
HStringAdd(HValue* context, HValue* left, HValue* right, StringAddFlags flags)
@@ -6804,13 +6506,13 @@ class HStringAdd: public HBinaryOperation {
// No side-effects except possible allocation.
// NOTE: this instruction _does not_ call ToString() on its inputs.
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
const StringAddFlags flags_;
};
-class HStringCharCodeAt: public HTemplateInstruction<3> {
+class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
public:
static HStringCharCodeAt* New(Zone* zone,
HValue* context,
@@ -6833,9 +6535,9 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
- virtual Range* InferRange(Zone* zone) {
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE {
return new(zone) Range(0, String::kMaxUtf16CodeUnit);
}
@@ -6851,17 +6553,17 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
}
// No side effects: runtime function assumes string + number inputs.
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HStringCharFromCode: public HTemplateInstruction<2> {
+class HStringCharFromCode V8_FINAL : public HTemplateInstruction<2> {
public:
static HInstruction* New(Zone* zone,
HValue* context,
HValue* char_code);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return index == 0
? Representation::Tagged()
: Representation::Integer32();
@@ -6870,7 +6572,7 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
HValue* context() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
- virtual bool DataEquals(HValue* other) { return true; }
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
@@ -6884,14 +6586,14 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
SetGVNFlag(kChangesNewSpacePromotion);
}
- virtual bool IsDeletable() const {
+ virtual bool IsDeletable() const V8_OVERRIDE {
return !value()->ToNumberCanBeObserved();
}
};
template <int V>
-class HMaterializedLiteral: public HTemplateInstruction<V> {
+class HMaterializedLiteral : public HTemplateInstruction<V> {
public:
HMaterializedLiteral<V>(int index, int depth, AllocationSiteMode mode)
: literal_index_(index), depth_(depth), allocation_site_mode_(mode) {
@@ -6911,7 +6613,7 @@ class HMaterializedLiteral: public HTemplateInstruction<V> {
}
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; }
int literal_index_;
int depth_;
@@ -6919,7 +6621,7 @@ class HMaterializedLiteral: public HTemplateInstruction<V> {
};
-class HRegExpLiteral: public HMaterializedLiteral<1> {
+class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
public:
HRegExpLiteral(HValue* context,
Handle<FixedArray> literals,
@@ -6940,7 +6642,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
Handle<String> pattern() { return pattern_; }
Handle<String> flags() { return flags_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -6953,7 +6655,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
};
-class HFunctionLiteral: public HTemplateInstruction<1> {
+class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
public:
HFunctionLiteral(HValue* context,
Handle<SharedFunctionInfo> shared,
@@ -6971,7 +6673,7 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
HValue* context() { return OperandAt(0); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -6984,7 +6686,7 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
LanguageMode language_mode() const { return language_mode_; }
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
Handle<SharedFunctionInfo> shared_info_;
bool pretenure_ : 1;
@@ -6994,7 +6696,7 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
};
-class HTypeof: public HTemplateInstruction<2> {
+class HTypeof V8_FINAL : public HTemplateInstruction<2> {
public:
explicit HTypeof(HValue* context, HValue* value) {
SetOperandAt(0, context);
@@ -7005,24 +6707,24 @@ class HTypeof: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(Typeof)
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HTrapAllocationMemento : public HTemplateInstruction<1> {
+class HTrapAllocationMemento V8_FINAL : public HTemplateInstruction<1> {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HTrapAllocationMemento, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -7037,11 +6739,11 @@ class HTrapAllocationMemento : public HTemplateInstruction<1> {
};
-class HToFastProperties: public HUnaryOperation {
+class HToFastProperties V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HToFastProperties, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -7049,8 +6751,11 @@ class HToFastProperties: public HUnaryOperation {
private:
explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
- // This instruction is not marked as having side effects, but
- // changes the map of the input operand. Use it only when creating
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesNewSpacePromotion);
+
+ // This instruction is not marked as kChangesMaps, but does
+ // change the map of the input operand. Use it only when creating
// object literals via a runtime call.
ASSERT(value->IsCallRuntime());
#ifdef DEBUG
@@ -7058,31 +6763,30 @@ class HToFastProperties: public HUnaryOperation {
ASSERT(function->function_id == Runtime::kCreateObjectLiteral ||
function->function_id == Runtime::kCreateObjectLiteralShallow);
#endif
- set_representation(Representation::Tagged());
}
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HValueOf: public HUnaryOperation {
+class HValueOf V8_FINAL : public HUnaryOperation {
public:
explicit HValueOf(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ValueOf)
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
-class HDateField: public HUnaryOperation {
+class HDateField V8_FINAL : public HUnaryOperation {
public:
HDateField(HValue* date, Smi* index)
: HUnaryOperation(date), index_(index) {
@@ -7091,7 +6795,7 @@ class HDateField: public HUnaryOperation {
Smi* index() const { return index_; }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -7102,7 +6806,7 @@ class HDateField: public HUnaryOperation {
};
-class HSeqStringSetChar: public HTemplateInstruction<3> {
+class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<3> {
public:
HSeqStringSetChar(String::Encoding encoding,
HValue* string,
@@ -7119,7 +6823,7 @@ class HSeqStringSetChar: public HTemplateInstruction<3> {
HValue* index() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return (index == 0) ? Representation::Tagged()
: Representation::Integer32();
}
@@ -7131,17 +6835,17 @@ class HSeqStringSetChar: public HTemplateInstruction<3> {
};
-class HCheckMapValue: public HTemplateInstruction<2> {
+class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HCheckMapValue, HValue*, HValue*);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::Tagged();
}
@@ -7151,7 +6855,7 @@ class HCheckMapValue: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
protected:
- virtual bool DataEquals(HValue* other) {
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
return true;
}
@@ -7168,7 +6872,7 @@ class HCheckMapValue: public HTemplateInstruction<2> {
};
-class HForInPrepareMap : public HTemplateInstruction<2> {
+class HForInPrepareMap V8_FINAL : public HTemplateInstruction<2> {
public:
static HForInPrepareMap* New(Zone* zone,
HValue* context,
@@ -7176,16 +6880,16 @@ class HForInPrepareMap : public HTemplateInstruction<2> {
return new(zone) HForInPrepareMap(context, object);
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
HValue* context() { return OperandAt(0); }
HValue* enumerable() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::Tagged();
}
@@ -7202,11 +6906,11 @@ class HForInPrepareMap : public HTemplateInstruction<2> {
};
-class HForInCacheArray : public HTemplateInstruction<2> {
+class HForInCacheArray V8_FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_FACTORY_P3(HForInCacheArray, HValue*, HValue*, int);
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -7222,9 +6926,9 @@ class HForInCacheArray : public HTemplateInstruction<2> {
index_cache_ = index_cache;
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::Tagged();
}
@@ -7244,7 +6948,7 @@ class HForInCacheArray : public HTemplateInstruction<2> {
};
-class HLoadFieldByIndex : public HTemplateInstruction<2> {
+class HLoadFieldByIndex V8_FINAL : public HTemplateInstruction<2> {
public:
HLoadFieldByIndex(HValue* object,
HValue* index) {
@@ -7253,23 +6957,23 @@ class HLoadFieldByIndex : public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
}
- virtual Representation RequiredInputRepresentation(int index) {
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
HValue* object() { return OperandAt(0); }
HValue* index() { return OperandAt(1); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual HType CalculateInferredType() {
+ virtual HType CalculateInferredType() V8_OVERRIDE {
return HType::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
private:
- virtual bool IsDeletable() const { return true; }
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
};
diff --git a/chromium/v8/src/hydrogen-mark-deoptimize.cc b/chromium/v8/src/hydrogen-mark-deoptimize.cc
index 111fcd2ce9b..c0236e91cbb 100644
--- a/chromium/v8/src/hydrogen-mark-deoptimize.cc
+++ b/chromium/v8/src/hydrogen-mark-deoptimize.cc
@@ -34,14 +34,9 @@ void HMarkDeoptimizeOnUndefinedPhase::Run() {
const ZoneList<HPhi*>* phi_list = graph()->phi_list();
for (int i = 0; i < phi_list->length(); i++) {
HPhi* phi = phi_list->at(i);
- if (phi->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- HValue* use_value = it.value();
- if (!use_value->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
- ProcessPhi(phi);
- break;
- }
- }
+ if (phi->CheckFlag(HValue::kAllowUndefinedAsNaN) &&
+ !phi->CheckUsesForFlag(HValue::kAllowUndefinedAsNaN)) {
+ ProcessPhi(phi);
}
}
}
@@ -68,4 +63,22 @@ void HMarkDeoptimizeOnUndefinedPhase::ProcessPhi(HPhi* phi) {
}
}
+
+void HComputeChangeUndefinedToNaN::Run() {
+ const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+ for (int i = 0; i < blocks->length(); ++i) {
+ const HBasicBlock* block(blocks->at(i));
+ for (HInstruction* current = block->first(); current != NULL; ) {
+ HInstruction* next = current->next();
+ if (current->IsChange()) {
+ if (HChange::cast(current)->can_convert_undefined_to_nan()) {
+ current->SetFlag(HValue::kAllowUndefinedAsNaN);
+ }
+ }
+ current = next;
+ }
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/hydrogen-mark-deoptimize.h b/chromium/v8/src/hydrogen-mark-deoptimize.h
index 0aa2c2c7540..30f35b3dec5 100644
--- a/chromium/v8/src/hydrogen-mark-deoptimize.h
+++ b/chromium/v8/src/hydrogen-mark-deoptimize.h
@@ -58,6 +58,18 @@ class HMarkDeoptimizeOnUndefinedPhase : public HPhase {
};
+class HComputeChangeUndefinedToNaN : public HPhase {
+ public:
+ explicit HComputeChangeUndefinedToNaN(HGraph* graph)
+ : HPhase("H_Compute change undefined to nan", graph) {}
+
+ void Run();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HComputeChangeUndefinedToNaN);
+};
+
+
} } // namespace v8::internal
#endif // V8_HYDROGEN_MARK_DEOPTIMIZE_H_
diff --git a/chromium/v8/src/hydrogen-osr.cc b/chromium/v8/src/hydrogen-osr.cc
index 73fa40a72cf..6b1df1e7a5b 100644
--- a/chromium/v8/src/hydrogen-osr.cc
+++ b/chromium/v8/src/hydrogen-osr.cc
@@ -80,7 +80,8 @@ HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
osr_values_ = new(zone) ZoneList<HUnknownOSRValue*>(length, zone);
for (int i = 0; i < first_expression_index; ++i) {
- HUnknownOSRValue* osr_value = builder_->Add<HUnknownOSRValue>();
+ HUnknownOSRValue* osr_value
+ = builder_->Add<HUnknownOSRValue>(environment, i);
environment->Bind(i, osr_value);
osr_values_->Add(osr_value, zone);
}
@@ -88,12 +89,21 @@ HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry(
if (first_expression_index != length) {
environment->Drop(length - first_expression_index);
for (int i = first_expression_index; i < length; ++i) {
- HUnknownOSRValue* osr_value = builder_->Add<HUnknownOSRValue>();
+ HUnknownOSRValue* osr_value
+ = builder_->Add<HUnknownOSRValue>(environment, i);
environment->Push(osr_value);
osr_values_->Add(osr_value, zone);
}
}
+ unoptimized_frame_slots_ =
+ environment->local_count() + environment->push_count();
+
+ // Keep a copy of the old environment, since the OSR values need it
+ // to figure out where exactly they are located in the unoptimized frame.
+ environment = environment->Copy();
+ builder_->current_block()->UpdateEnvironment(environment);
+
builder_->Add<HSimulate>(osr_entry_id);
builder_->Add<HOsrEntry>(osr_entry_id);
HContext* context = builder_->Add<HContext>();
@@ -117,8 +127,9 @@ void HOsrBuilder::FinishOsrValues() {
const ZoneList<HPhi*>* phis = osr_loop_entry_->phis();
for (int j = 0; j < phis->length(); j++) {
HPhi* phi = phis->at(j);
- ASSERT(phi->HasMergedIndex());
- osr_values_->at(phi->merged_index())->set_incoming_value(phi);
+ if (phi->HasMergedIndex()) {
+ osr_values_->at(phi->merged_index())->set_incoming_value(phi);
+ }
}
}
diff --git a/chromium/v8/src/hydrogen-osr.h b/chromium/v8/src/hydrogen-osr.h
index 0c6b65d0d4e..5014a75bdaf 100644
--- a/chromium/v8/src/hydrogen-osr.h
+++ b/chromium/v8/src/hydrogen-osr.h
@@ -40,7 +40,8 @@ namespace internal {
class HOsrBuilder : public ZoneObject {
public:
explicit HOsrBuilder(HOptimizedGraphBuilder* builder)
- : builder_(builder),
+ : unoptimized_frame_slots_(0),
+ builder_(builder),
osr_entry_(NULL),
osr_loop_entry_(NULL),
osr_values_(NULL) { }
@@ -55,10 +56,16 @@ class HOsrBuilder : public ZoneObject {
// Process the OSR values and phis after initial graph optimization.
void FinishOsrValues();
+ // Return the number of slots in the unoptimized frame at the entry to OSR.
+ int UnoptimizedFrameSlots() const {
+ return unoptimized_frame_slots_;
+ }
+
private:
HBasicBlock* BuildLoopEntry();
bool HasOsrEntryAt(IterationStatement* statement);
+ int unoptimized_frame_slots_;
HOptimizedGraphBuilder* builder_;
HBasicBlock* osr_entry_;
HBasicBlock* osr_loop_entry_;
diff --git a/chromium/v8/src/hydrogen-representation-changes.cc b/chromium/v8/src/hydrogen-representation-changes.cc
index 63b7b4d6ec8..960113782f8 100644
--- a/chromium/v8/src/hydrogen-representation-changes.cc
+++ b/chromium/v8/src/hydrogen-representation-changes.cc
@@ -47,8 +47,6 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
HInstruction* new_value = NULL;
bool is_truncating_to_smi = use_value->CheckFlag(HValue::kTruncatingToSmi);
bool is_truncating_to_int = use_value->CheckFlag(HValue::kTruncatingToInt32);
- bool allow_undefined_as_nan =
- use_value->CheckFlag(HValue::kAllowUndefinedAsNaN);
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
@@ -61,10 +59,8 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
}
if (new_value == NULL) {
- new_value = new(graph()->zone()) HChange(value, to,
- is_truncating_to_smi,
- is_truncating_to_int,
- allow_undefined_as_nan);
+ new_value = new(graph()->zone()) HChange(
+ value, to, is_truncating_to_smi, is_truncating_to_int);
}
new_value->InsertBefore(next);
@@ -103,7 +99,8 @@ void HRepresentationChangesPhase::Run() {
// int32-phis allow truncation and iteratively remove the ones that
// are used in an operation that does not allow a truncating
// conversion.
- ZoneList<HPhi*> worklist(8, zone());
+ ZoneList<HPhi*> int_worklist(8, zone());
+ ZoneList<HPhi*> smi_worklist(8, zone());
const ZoneList<HPhi*>* phi_list(graph()->phi_list());
for (int i = 0; i < phi_list->length(); i++) {
@@ -112,51 +109,64 @@ void HRepresentationChangesPhase::Run() {
phi->SetFlag(HValue::kTruncatingToInt32);
} else if (phi->representation().IsSmi()) {
phi->SetFlag(HValue::kTruncatingToSmi);
+ phi->SetFlag(HValue::kTruncatingToInt32);
}
}
for (int i = 0; i < phi_list->length(); i++) {
HPhi* phi = phi_list->at(i);
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- // If a Phi is used as a non-truncating int32 or as a double,
- // clear its "truncating" flag.
- HValue* use = it.value();
- Representation input_representation =
- use->RequiredInputRepresentation(it.index());
- if ((phi->representation().IsInteger32() &&
- !(input_representation.IsInteger32() &&
- use->CheckFlag(HValue::kTruncatingToInt32))) ||
- (phi->representation().IsSmi() &&
- !(input_representation.IsSmi() ||
- use->CheckFlag(HValue::kTruncatingToSmi)))) {
+ HValue* value = NULL;
+ if (phi->representation().IsSmiOrInteger32() &&
+ !phi->CheckUsesForFlag(HValue::kTruncatingToInt32, &value)) {
+ int_worklist.Add(phi, zone());
+ phi->ClearFlag(HValue::kTruncatingToInt32);
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
+ phi->id(), value->id(), value->Mnemonic());
+ }
+ }
+
+ if (phi->representation().IsSmi() &&
+ !phi->CheckUsesForFlag(HValue::kTruncatingToSmi, &value)) {
+ smi_worklist.Add(phi, zone());
+ phi->ClearFlag(HValue::kTruncatingToSmi);
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
+ phi->id(), value->id(), value->Mnemonic());
+ }
+ }
+ }
+
+ while (!int_worklist.is_empty()) {
+ HPhi* current = int_worklist.RemoveLast();
+ for (int i = 0; i < current->OperandCount(); ++i) {
+ HValue* input = current->OperandAt(i);
+ if (input->IsPhi() &&
+ input->representation().IsSmiOrInteger32() &&
+ input->CheckFlag(HValue::kTruncatingToInt32)) {
if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating because of #%d %s\n",
- phi->id(), it.value()->id(), it.value()->Mnemonic());
+ PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
+ input->id(), current->id(), current->Mnemonic());
}
- phi->ClearFlag(HValue::kTruncatingToInt32);
- phi->ClearFlag(HValue::kTruncatingToSmi);
- worklist.Add(phi, zone());
- break;
+ input->ClearFlag(HValue::kTruncatingToInt32);
+ int_worklist.Add(HPhi::cast(input), zone());
}
}
}
- while (!worklist.is_empty()) {
- HPhi* current = worklist.RemoveLast();
+ while (!smi_worklist.is_empty()) {
+ HPhi* current = smi_worklist.RemoveLast();
for (int i = 0; i < current->OperandCount(); ++i) {
HValue* input = current->OperandAt(i);
if (input->IsPhi() &&
- ((input->representation().IsInteger32() &&
- input->CheckFlag(HValue::kTruncatingToInt32)) ||
- (input->representation().IsSmi() &&
- input->CheckFlag(HValue::kTruncatingToSmi)))) {
+ input->representation().IsSmi() &&
+ input->CheckFlag(HValue::kTruncatingToSmi)) {
if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating because of #%d %s\n",
+ PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
input->id(), current->id(), current->Mnemonic());
}
- input->ClearFlag(HValue::kTruncatingToInt32);
input->ClearFlag(HValue::kTruncatingToSmi);
- worklist.Add(HPhi::cast(input), zone());
+ smi_worklist.Add(HPhi::cast(input), zone());
}
}
}
diff --git a/chromium/v8/src/hydrogen-uint32-analysis.cc b/chromium/v8/src/hydrogen-uint32-analysis.cc
index 67219f55dff..8de887d6f80 100644
--- a/chromium/v8/src/hydrogen-uint32-analysis.cc
+++ b/chromium/v8/src/hydrogen-uint32-analysis.cc
@@ -33,14 +33,19 @@ namespace internal {
bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) {
// Operations that operate on bits are safe.
- if (use->IsBitwise() ||
- use->IsShl() ||
- use->IsSar() ||
- use->IsShr() ||
- use->IsBitNot()) {
+ if (use->IsBitwise() || use->IsShl() || use->IsSar() || use->IsShr()) {
return true;
- } else if (use->IsChange() || use->IsSimulate()) {
- // Conversions and deoptimization have special support for unt32.
+ } else if (use->IsSimulate()) {
+ // Deoptimization has special support for uint32.
+ return true;
+ } else if (use->IsChange()) {
+ // Conversions have special support for uint32.
+ // This ASSERT guards that the conversion in question is actually
+ // implemented. Do not extend the whitelist without adding
+ // support to LChunkBuilder::DoChange().
+ ASSERT(HChange::cast(use)->to().IsDouble() ||
+ HChange::cast(use)->to().IsSmi() ||
+ HChange::cast(use)->to().IsTagged());
return true;
} else if (use->IsStoreKeyed()) {
HStoreKeyed* store = HStoreKeyed::cast(use);
diff --git a/chromium/v8/src/hydrogen.cc b/chromium/v8/src/hydrogen.cc
index aab64d64d45..0b4fb26c199 100644
--- a/chromium/v8/src/hydrogen.cc
+++ b/chromium/v8/src/hydrogen.cc
@@ -649,7 +649,7 @@ HConstant* HGraph::GetConstant##Name() { \
if (!constant_##name##_.is_set()) { \
HConstant* constant = new(zone()) HConstant( \
isolate()->factory()->name##_value(), \
- UniqueValueId(isolate()->heap()->name##_value()), \
+ UniqueValueId::name##_value(isolate()->heap()), \
Representation::Tagged(), \
htype, \
false, \
@@ -671,6 +671,21 @@ DEFINE_GET_CONSTANT(Null, null, HType::Tagged(), false)
#undef DEFINE_GET_CONSTANT
+#define DEFINE_IS_CONSTANT(Name, name) \
+bool HGraph::IsConstant##Name(HConstant* constant) { \
+ return constant_##name##_.is_set() && constant == constant_##name##_.get(); \
+}
+DEFINE_IS_CONSTANT(Undefined, undefined)
+DEFINE_IS_CONSTANT(0, 0)
+DEFINE_IS_CONSTANT(1, 1)
+DEFINE_IS_CONSTANT(Minus1, minus1)
+DEFINE_IS_CONSTANT(True, true)
+DEFINE_IS_CONSTANT(False, false)
+DEFINE_IS_CONSTANT(Hole, the_hole)
+DEFINE_IS_CONSTANT(Null, null)
+
+#undef DEFINE_IS_CONSTANT
+
HConstant* HGraph::GetInvalidContext() {
return GetConstant(&constant_invalid_context_, 0xFFFFC0C7);
@@ -678,14 +693,14 @@ HConstant* HGraph::GetInvalidContext() {
bool HGraph::IsStandardConstant(HConstant* constant) {
- if (constant == GetConstantUndefined()) return true;
- if (constant == GetConstant0()) return true;
- if (constant == GetConstant1()) return true;
- if (constant == GetConstantMinus1()) return true;
- if (constant == GetConstantTrue()) return true;
- if (constant == GetConstantFalse()) return true;
- if (constant == GetConstantHole()) return true;
- if (constant == GetConstantNull()) return true;
+ if (IsConstantUndefined(constant)) return true;
+ if (IsConstant0(constant)) return true;
+ if (IsConstant1(constant)) return true;
+ if (IsConstantMinus1(constant)) return true;
+ if (IsConstantTrue(constant)) return true;
+ if (IsConstantFalse(constant)) return true;
+ if (IsConstantHole(constant)) return true;
+ if (IsConstantNull(constant)) return true;
return false;
}
@@ -828,20 +843,19 @@ void HGraphBuilder::IfBuilder::Else() {
ASSERT(!captured_);
ASSERT(!finished_);
last_true_block_ = builder_->current_block();
- ASSERT(first_true_block_ == NULL || !last_true_block_->IsFinished());
builder_->set_current_block(first_false_block_);
did_else_ = true;
}
-void HGraphBuilder::IfBuilder::Deopt() {
+void HGraphBuilder::IfBuilder::Deopt(const char* reason) {
ASSERT(did_then_);
if (did_else_) {
deopt_else_ = true;
} else {
deopt_then_ = true;
}
- builder_->Add<HDeoptimize>(Deoptimizer::EAGER);
+ builder_->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
}
@@ -864,9 +878,11 @@ void HGraphBuilder::IfBuilder::End() {
if (!did_else_) {
last_true_block_ = builder_->current_block();
}
- if (first_true_block_ == NULL) {
+ if (last_true_block_ == NULL || last_true_block_->IsFinished()) {
+ ASSERT(did_else_);
// Return on true. Nothing to do, just continue the false block.
- } else if (first_false_block_ == NULL) {
+ } else if (first_false_block_ == NULL ||
+ (did_else_ && builder_->current_block()->IsFinished())) {
// Deopt on false. Nothing to do except switching to the true block.
builder_->set_current_block(last_true_block_);
} else {
@@ -906,6 +922,24 @@ HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
header_block_ = builder->CreateLoopHeaderBlock();
body_block_ = NULL;
exit_block_ = NULL;
+ exit_trampoline_block_ = NULL;
+ increment_amount_ = builder_->graph()->GetConstant1();
+}
+
+
+HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
+ HValue* context,
+ LoopBuilder::Direction direction,
+ HValue* increment_amount)
+ : builder_(builder),
+ context_(context),
+ direction_(direction),
+ finished_(false) {
+ header_block_ = builder->CreateLoopHeaderBlock();
+ body_block_ = NULL;
+ exit_block_ = NULL;
+ exit_trampoline_block_ = NULL;
+ increment_amount_ = increment_amount;
}
@@ -921,12 +955,14 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
HEnvironment* body_env = env->Copy();
HEnvironment* exit_env = env->Copy();
- body_block_ = builder_->CreateBasicBlock(body_env);
- exit_block_ = builder_->CreateBasicBlock(exit_env);
// Remove the phi from the expression stack
body_env->Pop();
+ exit_env->Pop();
+ body_block_ = builder_->CreateBasicBlock(body_env);
+ exit_block_ = builder_->CreateBasicBlock(exit_env);
builder_->set_current_block(header_block_);
+ env->Pop();
HCompareNumericAndBranch* compare =
new(zone()) HCompareNumericAndBranch(phi_, terminating, token);
compare->SetSuccessorAt(0, body_block_);
@@ -950,15 +986,26 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
}
+void HGraphBuilder::LoopBuilder::Break() {
+ if (exit_trampoline_block_ == NULL) {
+ // Its the first time we saw a break.
+ HEnvironment* env = exit_block_->last_environment()->Copy();
+ exit_trampoline_block_ = builder_->CreateBasicBlock(env);
+ exit_block_->GotoNoSimulate(exit_trampoline_block_);
+ }
+
+ builder_->current_block()->GotoNoSimulate(exit_trampoline_block_);
+}
+
+
void HGraphBuilder::LoopBuilder::EndBody() {
ASSERT(!finished_);
if (direction_ == kPostIncrement || direction_ == kPostDecrement) {
- HValue* one = builder_->graph()->GetConstant1();
if (direction_ == kPostIncrement) {
- increment_ = HAdd::New(zone(), context_, phi_, one);
+ increment_ = HAdd::New(zone(), context_, phi_, increment_amount_);
} else {
- increment_ = HSub::New(zone(), context_, phi_, one);
+ increment_ = HSub::New(zone(), context_, phi_, increment_amount_);
}
increment_->ClearFlag(HValue::kCanOverflow);
builder_->AddInstruction(increment_);
@@ -966,12 +1013,15 @@ void HGraphBuilder::LoopBuilder::EndBody() {
// Push the new increment value on the expression stack to merge into the phi.
builder_->environment()->Push(increment_);
- builder_->current_block()->GotoNoSimulate(header_block_);
- header_block_->loop_information()->RegisterBackEdge(body_block_);
+ HBasicBlock* last_block = builder_->current_block();
+ last_block->GotoNoSimulate(header_block_);
+ header_block_->loop_information()->RegisterBackEdge(last_block);
- builder_->set_current_block(exit_block_);
- // Pop the phi from the expression stack
- builder_->environment()->Pop();
+ if (exit_trampoline_block_ != NULL) {
+ builder_->set_current_block(exit_trampoline_block_);
+ } else {
+ builder_->set_current_block(exit_block_);
+ }
finished_ = true;
}
@@ -1042,9 +1092,9 @@ HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
void HGraphBuilder::FinishExitWithHardDeoptimization(
- HBasicBlock* continuation) {
+ const char* reason, HBasicBlock* continuation) {
PadEnvironmentForContinuation(current_block(), continuation);
- Add<HDeoptimize>(Deoptimizer::EAGER);
+ Add<HDeoptimize>(reason, Deoptimizer::EAGER);
if (graph()->IsInsideNoSideEffectsScope()) {
current_block()->GotoNoSimulate(continuation);
} else {
@@ -1058,12 +1108,14 @@ void HGraphBuilder::PadEnvironmentForContinuation(
HBasicBlock* continuation) {
if (continuation->last_environment() != NULL) {
// When merging from a deopt block to a continuation, resolve differences in
- // environment by pushing undefined and popping extra values so that the
- // environments match during the join.
+ // environment by pushing constant 0 and popping extra values so that the
+ // environments match during the join. Push 0 since it has the most specific
+ // representation, and will not influence representation inference of the
+ // phi.
int continuation_env_length = continuation->last_environment()->length();
while (continuation_env_length != from->last_environment()->length()) {
if (continuation_env_length > from->last_environment()->length()) {
- from->last_environment()->Push(graph()->GetConstantUndefined());
+ from->last_environment()->Push(graph()->GetConstant0());
} else {
from->last_environment()->Pop();
}
@@ -1114,7 +1166,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
IfBuilder key_checker(this);
key_checker.If<HCompareNumericAndBranch>(key, max_capacity, Token::LT);
key_checker.Then();
- key_checker.ElseDeopt();
+ key_checker.ElseDeopt("Key out of capacity range");
key_checker.End();
HValue* new_capacity = BuildNewElementsCapacity(key);
@@ -1188,7 +1240,7 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
}
if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
- HInstruction* elements = AddLoadElements(object, NULL);
+ HInstruction* elements = AddLoadElements(object);
HInstruction* empty_fixed_array = Add<HConstant>(
isolate()->factory()->empty_fixed_array());
@@ -1216,10 +1268,9 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
- HValue* object,
+ HValue* checked_object,
HValue* key,
HValue* val,
- HCheckMaps* mapcheck,
bool is_js_array,
ElementsKind elements_kind,
bool is_store,
@@ -1234,13 +1285,12 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
// generated store code.
if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
(elements_kind == FAST_ELEMENTS && is_store)) {
- if (mapcheck != NULL) {
- mapcheck->ClearGVNFlag(kDependsOnElementsKind);
- }
+ checked_object->ClearGVNFlag(kDependsOnElementsKind);
}
+
bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
bool fast_elements = IsFastObjectElementsKind(elements_kind);
- HValue* elements = AddLoadElements(object, mapcheck);
+ HValue* elements = AddLoadElements(checked_object);
if (is_store && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
@@ -1249,8 +1299,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
HInstruction* length = NULL;
if (is_js_array) {
- length = Add<HLoadNamedField>(object,
- HObjectAccess::ForArrayLength(elements_kind), mapcheck);
+ length = Add<HLoadNamedField>(
+ checked_object, HObjectAccess::ForArrayLength(elements_kind));
} else {
length = AddLoadFixedArrayLength(elements);
}
@@ -1270,7 +1320,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
negative_checker.Then();
HInstruction* result = AddExternalArrayElementAccess(
external_elements, key, val, bounds_check, elements_kind, is_store);
- negative_checker.ElseDeopt();
+ negative_checker.ElseDeopt("Negative key encountered");
length_checker.End();
return result;
} else {
@@ -1280,7 +1330,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
Add<HLoadExternalArrayPointer>(elements);
return AddExternalArrayElementAccess(
external_elements, checked_key, val,
- mapcheck, elements_kind, is_store);
+ checked_object, elements_kind, is_store);
}
}
ASSERT(fast_smi_only_elements ||
@@ -1297,8 +1347,9 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
if (IsGrowStoreMode(store_mode)) {
NoObservableSideEffectsScope no_effects(this);
- elements = BuildCheckForCapacityGrow(object, elements, elements_kind,
- length, key, is_js_array);
+ elements = BuildCheckForCapacityGrow(checked_object, elements,
+ elements_kind, length, key,
+ is_js_array);
checked_key = key;
} else {
checked_key = Add<HBoundsCheck>(key, length);
@@ -1306,9 +1357,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
if (is_store && (fast_elements || fast_smi_only_elements)) {
if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
NoObservableSideEffectsScope no_effects(this);
-
- elements = BuildCopyElementsOnWrite(object, elements, elements_kind,
- length);
+ elements = BuildCopyElementsOnWrite(checked_object, elements,
+ elements_kind, length);
} else {
HCheckMaps* check_cow_map = Add<HCheckMaps>(
elements, isolate()->factory()->fixed_array_map(),
@@ -1317,7 +1367,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
}
}
- return AddFastElementAccess(elements, checked_key, val, mapcheck,
+ return AddFastElementAccess(elements, checked_key, val, checked_object,
elements_kind, is_store, load_mode, store_mode);
}
@@ -1490,11 +1540,8 @@ HInstruction* HGraphBuilder::AddFastElementAccess(
}
-HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
- HValue* typecheck) {
- return Add<HLoadNamedField>(object,
- HObjectAccess::ForElementsPointer(),
- typecheck);
+HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object) {
+ return Add<HLoadNamedField>(object, HObjectAccess::ForElementsPointer());
}
@@ -1505,14 +1552,15 @@ HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(HValue* object) {
HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* old_capacity) {
- HValue* half_old_capacity = Add<HShr>(old_capacity, graph_->GetConstant1());
+ HValue* half_old_capacity = AddUncasted<HShr>(old_capacity,
+ graph_->GetConstant1());
- HValue* new_capacity = Add<HAdd>(half_old_capacity, old_capacity);
+ HValue* new_capacity = AddUncasted<HAdd>(half_old_capacity, old_capacity);
new_capacity->ClearFlag(HValue::kCanOverflow);
HValue* min_growth = Add<HConstant>(16);
- new_capacity = Add<HAdd>(new_capacity, min_growth);
+ new_capacity = AddUncasted<HAdd>(new_capacity, min_growth);
new_capacity->ClearFlag(HValue::kCanOverflow);
return new_capacity;
@@ -1632,13 +1680,26 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
from_elements_kind,
ALLOW_RETURN_HOLE);
- ElementsKind holey_kind = IsFastSmiElementsKind(to_elements_kind)
+ ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
+ IsFastSmiElementsKind(to_elements_kind))
? FAST_HOLEY_ELEMENTS : to_elements_kind;
- HInstruction* holey_store = Add<HStoreKeyed>(to_elements, key,
- element, holey_kind);
- // Allow NaN hole values to converted to their tagged counterparts.
- if (IsFastHoleyElementsKind(to_elements_kind)) {
- holey_store->SetFlag(HValue::kAllowUndefinedAsNaN);
+
+ if (IsHoleyElementsKind(from_elements_kind) &&
+ from_elements_kind != to_elements_kind) {
+ IfBuilder if_hole(this);
+ if_hole.If<HCompareHoleAndBranch>(element);
+ if_hole.Then();
+ HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
+ ? Add<HConstant>(FixedDoubleArray::hole_nan_as_double())
+ : graph()->GetConstantHole();
+ Add<HStoreKeyed>(to_elements, key, hole_constant, kind);
+ if_hole.Else();
+ HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
+ store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ if_hole.End();
+ } else {
+ HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
+ store->SetFlag(HValue::kAllowUndefinedAsNaN);
}
builder.EndBody();
@@ -1663,22 +1724,12 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
if (mode == TRACK_ALLOCATION_SITE) {
size += AllocationMemento::kSize;
}
- int elems_offset = size;
- InstanceType instance_type = IsFastDoubleElementsKind(kind) ?
- FIXED_DOUBLE_ARRAY_TYPE : FIXED_ARRAY_TYPE;
- if (length > 0) {
- size += IsFastDoubleElementsKind(kind)
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
HValue* size_in_bytes = Add<HConstant>(size);
HInstruction* object = Add<HAllocate>(size_in_bytes,
HType::JSObject(),
NOT_TENURED,
- instance_type);
+ JS_OBJECT_TYPE);
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -1695,10 +1746,17 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
}
if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- HValue* boilerplate_elements = AddLoadElements(boilerplate, NULL);
- HValue* object_elements = Add<HInnerAllocatedObject>(object, elems_offset);
+ HValue* boilerplate_elements = AddLoadElements(boilerplate);
+ HValue* object_elements;
+ if (IsFastDoubleElementsKind(kind)) {
+ HValue* elems_size = Add<HConstant>(FixedDoubleArray::SizeFor(length));
+ object_elements = Add<HAllocate>(elems_size, HType::JSArray(),
+ NOT_TENURED, FIXED_DOUBLE_ARRAY_TYPE);
+ } else {
+ HValue* elems_size = Add<HConstant>(FixedArray::SizeFor(length));
+ object_elements = Add<HAllocate>(elems_size, HType::JSArray(),
+ NOT_TENURED, FIXED_ARRAY_TYPE);
+ }
Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
object_elements);
@@ -1725,60 +1783,41 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
}
-HInstruction* HGraphBuilder::BuildUnaryMathOp(
- HValue* input, Handle<Type> type, Token::Value operation) {
- // We only handle the numeric cases here
- type = handle(
- Type::Intersect(type, handle(Type::Number(), isolate())), isolate());
-
- switch (operation) {
- default:
- UNREACHABLE();
- case Token::SUB: {
- HInstruction* instr =
- NewUncasted<HMul>(input, graph()->GetConstantMinus1());
- Representation rep = Representation::FromType(type);
- if (type->Is(Type::None())) {
- Add<HDeoptimize>(Deoptimizer::SOFT);
- }
- if (instr->IsBinaryOperation()) {
- HBinaryOperation* binop = HBinaryOperation::cast(instr);
- binop->set_observed_input_representation(1, rep);
- binop->set_observed_input_representation(2, rep);
- }
- return instr;
- }
- case Token::BIT_NOT:
- if (type->Is(Type::None())) {
- Add<HDeoptimize>(Deoptimizer::SOFT);
- }
- return New<HBitNot>(input);
- }
-}
-
-
void HGraphBuilder::BuildCompareNil(
HValue* value,
Handle<Type> type,
int position,
HIfContinuation* continuation) {
IfBuilder if_nil(this, position);
- bool needs_or = false;
+ bool some_case_handled = false;
+ bool some_case_missing = false;
+
if (type->Maybe(Type::Null())) {
- if (needs_or) if_nil.Or();
+ if (some_case_handled) if_nil.Or();
if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
- needs_or = true;
+ some_case_handled = true;
+ } else {
+ some_case_missing = true;
}
+
if (type->Maybe(Type::Undefined())) {
- if (needs_or) if_nil.Or();
+ if (some_case_handled) if_nil.Or();
if_nil.If<HCompareObjectEqAndBranch>(value,
graph()->GetConstantUndefined());
- needs_or = true;
+ some_case_handled = true;
+ } else {
+ some_case_missing = true;
}
+
if (type->Maybe(Type::Undetectable())) {
- if (needs_or) if_nil.Or();
+ if (some_case_handled) if_nil.Or();
if_nil.If<HIsUndetectableAndBranch>(value);
+ some_case_handled = true;
} else {
+ some_case_missing = true;
+ }
+
+ if (some_case_missing) {
if_nil.Then();
if_nil.Else();
if (type->NumClasses() == 1) {
@@ -1789,7 +1828,7 @@ void HGraphBuilder::BuildCompareNil(
// emitted below is the actual monomorphic map.
BuildCheckMap(value, type->Classes().Current());
} else {
- if_nil.Deopt();
+ if_nil.Deopt("Too many undetectable types");
}
}
@@ -1800,7 +1839,8 @@ void HGraphBuilder::BuildCompareNil(
HValue* HGraphBuilder::BuildCreateAllocationMemento(HValue* previous_object,
int previous_object_size,
HValue* alloc_site) {
- ASSERT(alloc_site != NULL);
+ // TODO(mvstanton): ASSERT altered to CHECK to diagnose chromium bug 284577
+ CHECK(alloc_site != NULL);
HInnerAllocatedObject* alloc_memento = Add<HInnerAllocatedObject>(
previous_object, previous_object_size);
Handle<Map> alloc_memento_map(
@@ -1862,7 +1902,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
// map, because we can just load the map in that case.
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
return builder()->AddInstruction(
- builder()->BuildLoadNamedField(constructor_function_, access, NULL));
+ builder()->BuildLoadNamedField(constructor_function_, access));
}
HInstruction* native_context = builder()->BuildGetNativeContext();
@@ -1883,7 +1923,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
// Find the map near the constructor function
HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
return builder()->AddInstruction(
- builder()->BuildLoadNamedField(constructor_function_, access, NULL));
+ builder()->BuildLoadNamedField(constructor_function_, access));
}
@@ -2016,7 +2056,7 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
function_state_= &initial_function_state_;
- InitializeAstVisitor();
+ InitializeAstVisitor(info->isolate());
}
@@ -2088,7 +2128,8 @@ HGraph::HGraph(CompilationInfo* info)
depends_on_empty_array_proto_elements_(false),
type_change_checksum_(0),
maximum_environment_size_(0),
- no_side_effects_scope_count_(0) {
+ no_side_effects_scope_count_(0),
+ disallow_adding_new_values_(false) {
if (info->IsStub()) {
HydrogenCodeStub* stub = info->code_stub();
CodeStubInterfaceDescriptor* descriptor =
@@ -2588,7 +2629,7 @@ void ValueContext::ReturnValue(HValue* value) {
// The value is tracked in the bailout environment, and communicated
// through the environment as the result of the expression.
if (!arguments_allowed() && value->CheckFlag(HValue::kIsArguments)) {
- owner()->Bailout("bad value context for arguments value");
+ owner()->Bailout(kBadValueContextForArgumentsValue);
}
owner()->Push(value);
}
@@ -2640,7 +2681,7 @@ void EffectContext::ReturnContinuation(HIfContinuation* continuation,
void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
- return owner()->Bailout("bad value context for arguments object value");
+ return owner()->Bailout(kBadValueContextForArgumentsObjectValue);
}
owner()->AddInstruction(instr);
owner()->Push(instr);
@@ -2653,7 +2694,7 @@ void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->HasObservableSideEffects());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
- return owner()->Bailout("bad value context for arguments object value");
+ return owner()->Bailout(kBadValueContextForArgumentsObjectValue);
}
HBasicBlock* materialize_false = owner()->graph()->CreateBasicBlock();
HBasicBlock* materialize_true = owner()->graph()->CreateBasicBlock();
@@ -2743,17 +2784,7 @@ void TestContext::BuildBranch(HValue* value) {
// branch.
HOptimizedGraphBuilder* builder = owner();
if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
- builder->Bailout("arguments object value in a test context");
- }
- if (value->IsConstant()) {
- HConstant* constant_value = HConstant::cast(value);
- if (constant_value->BooleanValue()) {
- builder->current_block()->Goto(if_true(), builder->function_state());
- } else {
- builder->current_block()->Goto(if_false(), builder->function_state());
- }
- builder->set_current_block(NULL);
- return;
+ builder->Bailout(kArgumentsObjectValueInATestContext);
}
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
@@ -2789,7 +2820,7 @@ void TestContext::BuildBranch(HValue* value) {
} while (false)
-void HOptimizedGraphBuilder::Bailout(const char* reason) {
+void HOptimizedGraphBuilder::Bailout(BailoutReason reason) {
current_info()->set_bailout_reason(reason);
SetStackOverflow();
}
@@ -2848,16 +2879,16 @@ void HOptimizedGraphBuilder::VisitExpressions(
bool HOptimizedGraphBuilder::BuildGraph() {
if (current_info()->function()->is_generator()) {
- Bailout("function is a generator");
+ Bailout(kFunctionIsAGenerator);
return false;
}
Scope* scope = current_info()->scope();
if (scope->HasIllegalRedeclaration()) {
- Bailout("function with illegal redeclaration");
+ Bailout(kFunctionWithIllegalRedeclaration);
return false;
}
if (scope->calls_eval()) {
- Bailout("function calls eval");
+ Bailout(kFunctionCallsEval);
return false;
}
SetUpScope(scope);
@@ -2923,8 +2954,7 @@ bool HOptimizedGraphBuilder::BuildGraph() {
}
-bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
- *bailout_reason = SmartArrayPointer<char>();
+bool HGraph::Optimize(BailoutReason* bailout_reason) {
OrderBlocks();
AssignDominators();
@@ -2945,19 +2975,20 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
Run<HPropagateDeoptimizingMarkPhase>();
if (!CheckConstPhiUses()) {
- *bailout_reason = SmartArrayPointer<char>(StrDup(
- "Unsupported phi use of const variable"));
+ *bailout_reason = kUnsupportedPhiUseOfConstVariable;
return false;
}
Run<HRedundantPhiEliminationPhase>();
if (!CheckArgumentsPhiUses()) {
- *bailout_reason = SmartArrayPointer<char>(StrDup(
- "Unsupported phi use of arguments"));
+ *bailout_reason = kUnsupportedPhiUseOfArguments;
return false;
}
// Remove dead code and phis
if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
+
+ if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
+
CollectPhis();
if (has_osr()) osr()->FinishOsrValues();
@@ -2981,22 +3012,20 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
if (FLAG_use_canonicalizing) Run<HCanonicalizePhase>();
- if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
-
if (FLAG_use_gvn) Run<HGlobalValueNumberingPhase>();
if (FLAG_use_range) Run<HRangeAnalysisPhase>();
+ Run<HComputeChangeUndefinedToNaN>();
Run<HComputeMinusZeroChecksPhase>();
// Eliminate redundant stack checks on backwards branches.
Run<HStackCheckEliminationPhase>();
- if (FLAG_idefs) SetupInformativeDefinitions();
- if (FLAG_array_bounds_checks_elimination && !FLAG_idefs) {
+ if (FLAG_array_bounds_checks_elimination) {
Run<HBoundsCheckEliminationPhase>();
}
- if (FLAG_array_bounds_checks_hoisting && !FLAG_idefs) {
+ if (FLAG_array_bounds_checks_hoisting) {
Run<HBoundsCheckHoistingPhase>();
}
if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
@@ -3008,50 +3037,6 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
}
-void HGraph::SetupInformativeDefinitionsInBlock(HBasicBlock* block) {
- for (int phi_index = 0; phi_index < block->phis()->length(); phi_index++) {
- HPhi* phi = block->phis()->at(phi_index);
- phi->AddInformativeDefinitions();
- phi->SetFlag(HValue::kIDefsProcessingDone);
- // We do not support phis that "redefine just one operand".
- ASSERT(!phi->IsInformativeDefinition());
- }
-
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* i = it.Current();
- i->AddInformativeDefinitions();
- i->SetFlag(HValue::kIDefsProcessingDone);
- i->UpdateRedefinedUsesWhileSettingUpInformativeDefinitions();
- }
-}
-
-
-// This method is recursive, so if its stack frame is large it could
-// cause a stack overflow.
-// To keep the individual stack frames small we do the actual work inside
-// SetupInformativeDefinitionsInBlock();
-void HGraph::SetupInformativeDefinitionsRecursively(HBasicBlock* block) {
- SetupInformativeDefinitionsInBlock(block);
- for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
- SetupInformativeDefinitionsRecursively(block->dominated_blocks()->at(i));
- }
-
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- HInstruction* i = it.Current();
- if (i->IsBoundsCheck()) {
- HBoundsCheck* check = HBoundsCheck::cast(i);
- check->ApplyIndexChange();
- }
- }
-}
-
-
-void HGraph::SetupInformativeDefinitions() {
- HPhase phase("H_Setup informative definitions", this);
- SetupInformativeDefinitionsRecursively(entry_block());
-}
-
-
void HGraph::RestoreActualValues() {
HPhase phase("H_Restore actual values", this);
@@ -3134,7 +3119,7 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
// not have declarations).
if (scope->arguments() != NULL) {
if (!scope->arguments()->IsStackAllocated()) {
- return Bailout("context-allocated arguments");
+ return Bailout(kContextAllocatedArguments);
}
environment()->Bind(scope->arguments(),
@@ -3145,7 +3130,9 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
- CHECK_ALIVE(Visit(statements->at(i)));
+ Statement* stmt = statements->at(i);
+ CHECK_ALIVE(Visit(stmt));
+ if (stmt->IsJump()) break;
}
}
@@ -3155,7 +3142,7 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (stmt->scope() != NULL) {
- return Bailout("ScopedBlock");
+ return Bailout(kScopedBlock);
}
BreakAndContinueInfo break_info(stmt);
{ BreakAndContinueScope push(&break_info, this);
@@ -3367,7 +3354,7 @@ void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("WithStatement");
+ return Bailout(kWithStatement);
}
@@ -3382,12 +3369,12 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
ZoneList<CaseClause*>* clauses = stmt->cases();
int clause_count = clauses->length();
if (clause_count > kCaseClauseLimit) {
- return Bailout("SwitchStatement: too many clauses");
+ return Bailout(kSwitchStatementTooManyClauses);
}
ASSERT(stmt->switch_type() != SwitchStatement::UNKNOWN_SWITCH);
if (stmt->switch_type() == SwitchStatement::GENERIC_SWITCH) {
- return Bailout("SwitchStatement: mixed or non-literal switch labels");
+ return Bailout(kSwitchStatementMixedOrNonLiteralSwitchLabels);
}
HValue* context = environment()->context();
@@ -3433,7 +3420,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
if (stmt->switch_type() == SwitchStatement::SMI_SWITCH) {
if (!clause->compare_type()->Is(Type::Smi())) {
- Add<HDeoptimize>(Deoptimizer::SOFT);
+ Add<HDeoptimize>("Non-smi switch type", Deoptimizer::SOFT);
}
HCompareNumericAndBranch* compare_ =
@@ -3679,16 +3666,16 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
ASSERT(current_block()->HasPredecessor());
if (!FLAG_optimize_for_in) {
- return Bailout("ForInStatement optimization is disabled");
+ return Bailout(kForInStatementOptimizationIsDisabled);
}
if (stmt->for_in_type() != ForInStatement::FAST_FOR_IN) {
- return Bailout("ForInStatement is not fast case");
+ return Bailout(kForInStatementIsNotFastCase);
}
if (!stmt->each()->IsVariableProxy() ||
!stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
- return Bailout("ForInStatement with non-local each variable");
+ return Bailout(kForInStatementWithNonLocalEachVariable);
}
Variable* each_var = stmt->each()->AsVariableProxy()->var();
@@ -3782,7 +3769,7 @@ void HOptimizedGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("ForOfStatement");
+ return Bailout(kForOfStatement);
}
@@ -3790,7 +3777,7 @@ void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("TryCatchStatement");
+ return Bailout(kTryCatchStatement);
}
@@ -3799,7 +3786,7 @@ void HOptimizedGraphBuilder::VisitTryFinallyStatement(
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("TryFinallyStatement");
+ return Bailout(kTryFinallyStatement);
}
@@ -3807,7 +3794,7 @@ void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("DebuggerStatement");
+ return Bailout(kDebuggerStatement);
}
@@ -3853,7 +3840,7 @@ void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral(
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("SharedFunctionInfoLiteral");
+ return Bailout(kSharedFunctionInfoLiteral);
}
@@ -3933,7 +3920,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case Variable::UNALLOCATED: {
if (IsLexicalVariableMode(variable->mode())) {
// TODO(rossberg): should this be an ASSERT?
- return Bailout("reference to global lexical variable");
+ return Bailout(kReferenceToGlobalLexicalVariable);
}
// Handle known global constants like 'undefined' specially to avoid a
// load from a global cell for them.
@@ -3990,7 +3977,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
if (value == graph()->GetConstantHole()) {
ASSERT(IsDeclaredVariableMode(variable->mode()) &&
variable->mode() != VAR);
- return Bailout("reference to uninitialized variable");
+ return Bailout(kReferenceToUninitializedVariable);
}
return ast_context()->ReturnValue(value);
}
@@ -4002,7 +3989,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
case Variable::LOOKUP:
- return Bailout("reference to a variable which requires dynamic lookup");
+ return Bailout(kReferenceToAVariableWhichRequiresDynamicLookup);
}
}
@@ -4033,13 +4020,20 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
}
+static bool CanInlinePropertyAccess(Map* type) {
+ return type->IsJSObjectMap() &&
+ !type->is_dictionary_map() &&
+ !type->has_named_interceptor();
+}
+
+
static void LookupInPrototypes(Handle<Map> map,
Handle<String> name,
LookupResult* lookup) {
while (map->prototype()->IsJSObject()) {
Handle<JSObject> holder(JSObject::cast(map->prototype()));
- if (!holder->HasFastProperties()) break;
map = Handle<Map>(holder->map());
+ if (!CanInlinePropertyAccess(*map)) break;
map->LookupDescriptor(*holder, *name, lookup);
if (lookup->IsFound()) return;
}
@@ -4119,12 +4113,9 @@ static bool LookupSetter(Handle<Map> map,
// size of all objects that are part of the graph.
static bool IsFastLiteral(Handle<JSObject> boilerplate,
int max_depth,
- int* max_properties,
- int* data_size,
- int* pointer_size) {
+ int* max_properties) {
if (boilerplate->map()->is_deprecated()) {
- Handle<Object> result =
- JSObject::TryMigrateInstance(boilerplate);
+ Handle<Object> result = JSObject::TryMigrateInstance(boilerplate);
if (result->IsSmi()) return false;
}
@@ -4135,9 +4126,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 &&
elements->map() != isolate->heap()->fixed_cow_array_map()) {
- if (boilerplate->HasFastDoubleElements()) {
- *data_size += FixedDoubleArray::SizeFor(elements->length());
- } else if (boilerplate->HasFastObjectElements()) {
+ if (boilerplate->HasFastObjectElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
for (int i = 0; i < length; i++) {
@@ -4147,15 +4136,12 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
- max_properties,
- data_size,
- pointer_size)) {
+ max_properties)) {
return false;
}
}
}
- *pointer_size += FixedArray::SizeFor(length);
- } else {
+ } else if (!boilerplate->HasFastDoubleElements()) {
return false;
}
}
@@ -4170,7 +4156,6 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
- Representation representation = details.representation();
int index = descriptors->GetFieldIndex(i);
if ((*max_properties)-- == 0) return false;
Handle<Object> value(boilerplate->InObjectPropertyAt(index), isolate);
@@ -4178,18 +4163,12 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
- max_properties,
- data_size,
- pointer_size)) {
+ max_properties)) {
return false;
}
- } else if (representation.IsDouble()) {
- *data_size += HeapNumber::kSize;
}
}
}
-
- *pointer_size += boilerplate->map()->instance_size();
return true;
}
@@ -4199,32 +4178,21 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
- HValue* context = environment()->context();
HInstruction* literal;
// Check whether to use fast or slow deep-copying for boilerplate.
- int data_size = 0;
- int pointer_size = 0;
int max_properties = kMaxFastLiteralProperties;
- Handle<Object> original_boilerplate(closure->literals()->get(
+ Handle<Object> boilerplate(closure->literals()->get(
expr->literal_index()), isolate());
- if (original_boilerplate->IsJSObject() &&
- IsFastLiteral(Handle<JSObject>::cast(original_boilerplate),
+ if (boilerplate->IsJSObject() &&
+ IsFastLiteral(Handle<JSObject>::cast(boilerplate),
kMaxFastLiteralDepth,
- &max_properties,
- &data_size,
- &pointer_size)) {
- Handle<JSObject> original_boilerplate_object =
- Handle<JSObject>::cast(original_boilerplate);
+ &max_properties)) {
Handle<JSObject> boilerplate_object =
- DeepCopy(original_boilerplate_object);
+ Handle<JSObject>::cast(boilerplate);
- literal = BuildFastLiteral(context,
- boilerplate_object,
- original_boilerplate_object,
+ literal = BuildFastLiteral(boilerplate_object,
Handle<Object>::null(),
- data_size,
- pointer_size,
DONT_TRACK_ALLOCATION_SITE);
} else {
NoObservableSideEffectsScope no_effects(this);
@@ -4301,7 +4269,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::PROTOTYPE:
case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
- return Bailout("Object literal with complex property");
+ return Bailout(kObjectLiteralWithComplexProperty);
default: UNREACHABLE();
}
}
@@ -4326,7 +4294,6 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(current_block()->HasPredecessor());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
- HValue* context = environment()->context();
HInstruction* literal;
Handle<AllocationSite> site;
@@ -4340,7 +4307,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
isolate(), literals, expr->constant_elements());
if (raw_boilerplate.is_null()) {
- return Bailout("array boilerplate creation failed");
+ return Bailout(kArrayBoilerplateCreationFailed);
}
site = isolate()->factory()->NewAllocationSite();
@@ -4360,10 +4327,10 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(!raw_boilerplate.is_null());
ASSERT(site->IsLiteralSite());
- Handle<JSObject> original_boilerplate_object =
+ Handle<JSObject> boilerplate_object =
Handle<JSObject>::cast(raw_boilerplate);
ElementsKind boilerplate_elements_kind =
- Handle<JSObject>::cast(original_boilerplate_object)->GetElementsKind();
+ Handle<JSObject>::cast(boilerplate_object)->GetElementsKind();
// TODO(mvstanton): This heuristic is only a temporary solution. In the
// end, we want to quit creating allocation site info after a certain number
@@ -4372,26 +4339,12 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
boilerplate_elements_kind);
// Check whether to use fast or slow deep-copying for boilerplate.
- int data_size = 0;
- int pointer_size = 0;
int max_properties = kMaxFastLiteralProperties;
- HCheckMaps* type_check = NULL;
- if (IsFastLiteral(original_boilerplate_object,
+ if (IsFastLiteral(boilerplate_object,
kMaxFastLiteralDepth,
- &max_properties,
- &data_size,
- &pointer_size)) {
- if (mode == TRACK_ALLOCATION_SITE) {
- pointer_size += AllocationMemento::kSize;
- }
-
- Handle<JSObject> boilerplate_object = DeepCopy(original_boilerplate_object);
- literal = BuildFastLiteral(context,
- boilerplate_object,
- original_boilerplate_object,
+ &max_properties)) {
+ literal = BuildFastLiteral(boilerplate_object,
site,
- data_size,
- pointer_size,
mode);
} else {
NoObservableSideEffectsScope no_effects(this);
@@ -4411,9 +4364,8 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
3);
// De-opt if elements kind changed from boilerplate_elements_kind.
- Handle<Map> map = Handle<Map>(original_boilerplate_object->map(),
- isolate());
- type_check = Add<HCheckMaps>(literal, map, top_info());
+ Handle<Map> map = Handle<Map>(boilerplate_object->map(), isolate());
+ literal = Add<HCheckMaps>(literal, map, top_info());
}
// The array is expected in the bailout environment during computation
@@ -4432,9 +4384,9 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
CHECK_ALIVE(VisitForValue(subexpr));
HValue* value = Pop();
- if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
+ if (!Smi::IsValid(i)) return Bailout(kNonSmiKeyInArrayLiteral);
- elements = AddLoadElements(literal, type_check);
+ elements = AddLoadElements(literal);
HValue* key = Add<HConstant>(i);
@@ -4469,8 +4421,8 @@ static bool ComputeLoadStoreField(Handle<Map> type,
LookupResult* lookup,
bool is_store) {
ASSERT(!is_store || !type->is_observed());
- if (type->has_named_interceptor()) {
- lookup->InterceptorResult(NULL);
+ if (!CanInlinePropertyAccess(*type)) {
+ lookup->NotFound();
return false;
}
// If we directly find a field, the access can be inlined.
@@ -4496,7 +4448,7 @@ HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
- HValue* object,
+ HValue* checked_object,
Handle<String> name,
HValue* value,
Handle<Map> map,
@@ -4513,7 +4465,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
if (proto_result.IsProperty()) {
// If the inherited property could induce readonly-ness, bail out.
if (proto_result.IsReadOnly() || !proto_result.IsCacheable()) {
- Bailout("improper object on prototype chain for store");
+ Bailout(kImproperObjectOnPrototypeChainForStore);
return NULL;
}
// We only need to check up to the preexisting property.
@@ -4526,9 +4478,9 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
ASSERT(proto->GetPrototype(isolate())->IsNull());
}
ASSERT(proto->IsJSObject());
- Add<HCheckPrototypeMaps>(
+ BuildCheckPrototypeMaps(
Handle<JSObject>(JSObject::cast(map->prototype())),
- Handle<JSObject>(JSObject::cast(proto)), top_info());
+ Handle<JSObject>(JSObject::cast(proto)));
}
HObjectAccess field_access = HObjectAccess::ForField(map, lookup, name);
@@ -4548,11 +4500,12 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
AddStoreMapConstant(heap_number, isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
value);
- instr = New<HStoreNamedField>(object, heap_number_access,
- heap_number);
+ instr = New<HStoreNamedField>(checked_object->ActualValue(),
+ heap_number_access,
+ heap_number);
} else {
// Already holds a HeapNumber; load the box and write its value field.
- HInstruction* heap_number = Add<HLoadNamedField>(object,
+ HInstruction* heap_number = Add<HLoadNamedField>(checked_object,
heap_number_access);
heap_number->set_type(HType::HeapNumber());
instr = New<HStoreNamedField>(heap_number,
@@ -4561,12 +4514,15 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
}
} else {
// This is a normal store.
- instr = New<HStoreNamedField>(object, field_access, value);
+ instr = New<HStoreNamedField>(checked_object->ActualValue(),
+ field_access,
+ value);
}
if (transition_to_field) {
Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
- instr->SetTransition(transition, top_info());
+ HConstant* transition_constant = Add<HConstant>(transition);
+ instr->SetTransition(transition_constant, top_info());
// TODO(fschneider): Record the new map type of the object in the IR to
// enable elimination of redundant checks after the transition store.
instr->SetGVNFlag(kChangesMaps);
@@ -4597,8 +4553,8 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
// Handle a store to a known field.
LookupResult lookup(isolate());
if (ComputeLoadStoreField(map, name, &lookup, true)) {
- AddCheckMap(object, map);
- return BuildStoreNamedField(object, name, value, map, &lookup);
+ HCheckMaps* checked_object = AddCheckMap(object, map);
+ return BuildStoreNamedField(checked_object, name, value, map, &lookup);
}
// No luck, do a generic store.
@@ -4609,8 +4565,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
static bool CanLoadPropertyFromPrototype(Handle<Map> map,
Handle<Name> name,
LookupResult* lookup) {
- if (map->has_named_interceptor()) return false;
- if (map->is_dictionary_map()) return false;
+ if (!CanInlinePropertyAccess(*map)) return false;
map->LookupDescriptor(NULL, *name, lookup);
if (lookup->IsFound()) return false;
return true;
@@ -4618,7 +4573,6 @@ static bool CanLoadPropertyFromPrototype(Handle<Map> map,
HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
- Property* expr,
HValue* object,
SmallMapList* types,
Handle<String> name) {
@@ -4656,8 +4610,8 @@ HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
if (count == types->length()) {
// Everything matched; can use monomorphic load.
BuildCheckHeapObject(object);
- HCheckMaps* type_check = Add<HCheckMaps>(object, types);
- return BuildLoadNamedField(object, access, type_check);
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, types);
+ return BuildLoadNamedField(checked_object, access);
}
if (count != 0) return NULL;
@@ -4678,35 +4632,137 @@ HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
if (!lookup.IsField()) return NULL;
BuildCheckHeapObject(object);
- HCheckMaps* type_check = Add<HCheckMaps>(object, types);
+ Add<HCheckMaps>(object, types);
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
- Add<HCheckPrototypeMaps>(
- Handle<JSObject>::cast(prototype), holder, top_info());
- HValue* holder_value = Add<HConstant>(holder);
- return BuildLoadNamedField(holder_value,
- HObjectAccess::ForField(holder_map, &lookup, name), type_check);
+ HValue* checked_holder = BuildCheckPrototypeMaps(
+ Handle<JSObject>::cast(prototype), holder);
+ return BuildLoadNamedField(checked_holder,
+ HObjectAccess::ForField(holder_map, &lookup, name));
+}
+
+
+// Returns true if an instance of this map can never find a property with this
+// name in its prototype chain. This means all prototypes up to the top are
+// fast and don't have the name in them. It would be good if we could optimize
+// polymorphic loads where the property is sometimes found in the prototype
+// chain.
+static bool PrototypeChainCanNeverResolve(
+ Handle<Map> map, Handle<String> name) {
+ Isolate* isolate = map->GetIsolate();
+ Object* current = map->prototype();
+ while (current != isolate->heap()->null_value()) {
+ if (current->IsJSGlobalProxy() ||
+ current->IsGlobalObject() ||
+ !current->IsJSObject() ||
+ !CanInlinePropertyAccess(JSObject::cast(current)->map()) ||
+ JSObject::cast(current)->IsAccessCheckNeeded()) {
+ return false;
+ }
+
+ LookupResult lookup(isolate);
+ Map* map = JSObject::cast(current)->map();
+ map->LookupDescriptor(NULL, *name, &lookup);
+ if (lookup.IsFound()) return false;
+ if (!lookup.IsCacheable()) return false;
+ current = JSObject::cast(current)->GetPrototype();
+ }
+ return true;
}
void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
- Property* expr,
+ int position,
+ BailoutId ast_id,
HValue* object,
SmallMapList* types,
Handle<String> name) {
- HInstruction* instr = TryLoadPolymorphicAsMonomorphic(
- expr, object, types, name);
- if (instr == NULL) {
- // Something did not match; must use a polymorphic load.
- BuildCheckHeapObject(object);
+ HInstruction* instr = TryLoadPolymorphicAsMonomorphic(object, types, name);
+ if (instr != NULL) {
+ instr->set_position(position);
+ return ast_context()->ReturnInstruction(instr, ast_id);
+ }
+
+ // Something did not match; must use a polymorphic load.
+ int count = 0;
+ HBasicBlock* join = NULL;
+ for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
+ Handle<Map> map = types->at(i);
+ LookupResult lookup(isolate());
+ if (ComputeLoadStoreField(map, name, &lookup, false) ||
+ (lookup.IsCacheable() &&
+ CanInlinePropertyAccess(*map) &&
+ (lookup.IsConstant() ||
+ (!lookup.IsFound() &&
+ PrototypeChainCanNeverResolve(map, name))))) {
+ if (count == 0) {
+ BuildCheckHeapObject(object);
+ join = graph()->CreateBasicBlock();
+ }
+ ++count;
+ HBasicBlock* if_true = graph()->CreateBasicBlock();
+ HBasicBlock* if_false = graph()->CreateBasicBlock();
+ HCompareMap* compare =
+ new(zone()) HCompareMap(object, map, if_true, if_false);
+ current_block()->Finish(compare);
+
+ set_current_block(if_true);
+
+ // TODO(verwaest): Merge logic with BuildLoadNamedMonomorphic.
+ if (lookup.IsField()) {
+ HObjectAccess access = HObjectAccess::ForField(map, &lookup, name);
+ HLoadNamedField* load = BuildLoadNamedField(compare, access);
+ load->set_position(position);
+ AddInstruction(load);
+ if (!ast_context()->IsEffect()) Push(load);
+ } else if (lookup.IsConstant()) {
+ Handle<Object> constant(lookup.GetConstantFromMap(*map), isolate());
+ HConstant* hconstant = Add<HConstant>(constant);
+ if (!ast_context()->IsEffect()) Push(hconstant);
+ } else {
+ ASSERT(!lookup.IsFound());
+ if (map->prototype()->IsJSObject()) {
+ Handle<JSObject> prototype(JSObject::cast(map->prototype()));
+ Handle<JSObject> holder = prototype;
+ while (holder->map()->prototype()->IsJSObject()) {
+ holder = handle(JSObject::cast(holder->map()->prototype()));
+ }
+ BuildCheckPrototypeMaps(prototype, holder);
+ }
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
+ }
+
+ current_block()->Goto(join);
+ set_current_block(if_false);
+ }
+ }
+
+ // Finish up. Unconditionally deoptimize if we've handled all the maps we
+ // know about and do not want to handle ones we've never seen. Otherwise
+ // use a generic IC.
+ if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic load", join);
+ } else {
HValue* context = environment()->context();
- instr = new(zone()) HLoadNamedFieldPolymorphic(
- context, object, types, name, zone());
+ HInstruction* load = new(zone()) HLoadNamedGeneric(context, object, name);
+ load->set_position(position);
+ AddInstruction(load);
+ if (!ast_context()->IsEffect()) Push(load);
+
+ if (join != NULL) {
+ current_block()->Goto(join);
+ } else {
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ return;
+ }
}
- instr->set_position(expr->position());
- return ast_context()->ReturnInstruction(instr, expr->id());
+ ASSERT(join != NULL);
+ join->SetJoinId(ast_id);
+ set_current_block(join);
+ if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
}
@@ -4714,8 +4770,7 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
int position,
BailoutId assignment_id,
HValue* object,
- HValue* store_value,
- HValue* result_value,
+ HValue* value,
SmallMapList* types,
Handle<String> name) {
// Use monomorphic store if property lookup results in the same field index
@@ -4757,18 +4812,18 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic(
// Everything matched; can use monomorphic store.
BuildCheckHeapObject(object);
- Add<HCheckMaps>(object, types);
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, types);
HInstruction* store;
CHECK_ALIVE_OR_RETURN(
store = BuildStoreNamedField(
- object, name, store_value, types->at(count - 1), &lookup),
+ checked_object, name, value, types->at(count - 1), &lookup),
true);
- if (!ast_context()->IsEffect()) Push(result_value);
+ if (!ast_context()->IsEffect()) Push(value);
store->set_position(position);
AddInstruction(store);
Add<HSimulate>(assignment_id);
if (!ast_context()->IsEffect()) Drop(1);
- ast_context()->ReturnValue(result_value);
+ ast_context()->ReturnValue(value);
return true;
}
@@ -4777,13 +4832,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
int position,
BailoutId assignment_id,
HValue* object,
- HValue* store_value,
- HValue* result_value,
+ HValue* value,
SmallMapList* types,
Handle<String> name) {
if (TryStorePolymorphicAsMonomorphic(
- position, assignment_id, object,
- store_value, result_value, types, name)) {
+ position, assignment_id, object, value, types, name)) {
return;
}
@@ -4810,11 +4863,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
set_current_block(if_true);
HInstruction* instr;
CHECK_ALIVE(instr = BuildStoreNamedField(
- object, name, store_value, map, &lookup));
+ compare, name, value, map, &lookup));
instr->set_position(position);
// Goto will add the HSimulate for the store.
AddInstruction(instr);
- if (!ast_context()->IsEffect()) Push(result_value);
+ if (!ast_context()->IsEffect()) Push(value);
current_block()->Goto(join);
set_current_block(if_false);
@@ -4825,15 +4878,15 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- FinishExitWithHardDeoptimization(join);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic store", join);
} else {
- HInstruction* instr = BuildStoreNamedGeneric(object, name, store_value);
+ HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
instr->set_position(position);
AddInstruction(instr);
if (join != NULL) {
if (!ast_context()->IsEffect()) {
- Push(result_value);
+ Push(value);
}
current_block()->Goto(join);
} else {
@@ -4844,12 +4897,12 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
if (ast_context()->IsEffect()) {
Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
} else {
- Push(result_value);
+ Push(value);
Add<HSimulate>(assignment_id, REMOVABLE_SIMULATE);
Drop(1);
}
}
- return ast_context()->ReturnValue(result_value);
+ return ast_context()->ReturnValue(value);
}
}
@@ -4862,37 +4915,111 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
}
-void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- CHECK_ALIVE(VisitForValue(prop->obj()));
+static bool ComputeReceiverTypes(Expression* expr,
+ HValue* receiver,
+ SmallMapList** t) {
+ SmallMapList* types = expr->GetReceiverTypes();
+ *t = types;
+ bool monomorphic = expr->IsMonomorphic();
+ if (types != NULL && receiver->HasMonomorphicJSObjectType()) {
+ Map* root_map = receiver->GetMonomorphicJSObjectMap()->FindRootMap();
+ types->FilterForPossibleTransitions(root_map);
+ monomorphic = types->length() == 1;
+ }
+ return monomorphic && CanInlinePropertyAccess(*types->first());
+}
- if (prop->key()->IsPropertyName()) {
- // Named store.
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* value = environment()->ExpressionStackAt(0);
- HValue* object = environment()->ExpressionStackAt(1);
- if (expr->IsUninitialized()) Add<HDeoptimize>(Deoptimizer::SOFT);
- return BuildStoreNamed(expr, expr->id(), expr->position(),
- expr->AssignmentId(), prop, object, value, value);
- } else {
+void HOptimizedGraphBuilder::BuildStore(Expression* expr,
+ Property* prop,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool is_uninitialized) {
+ HValue* value = environment()->ExpressionStackAt(0);
+
+ if (!prop->key()->IsPropertyName()) {
// Keyed store.
- CHECK_ALIVE(VisitForValue(prop->key()));
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* value = environment()->ExpressionStackAt(0);
HValue* key = environment()->ExpressionStackAt(1);
HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
- HandleKeyedElementAccess(object, key, value, expr, expr->AssignmentId(),
+ HandleKeyedElementAccess(object, key, value, expr, return_id,
expr->position(),
true, // is_store
&has_side_effects);
Drop(3);
Push(value);
- Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(return_id, REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
}
+
+ // Named store.
+ HValue* object = environment()->ExpressionStackAt(1);
+
+ if (is_uninitialized) {
+ Add<HDeoptimize>("Insufficient type feedback for property assignment",
+ Deoptimizer::SOFT);
+ }
+
+ Literal* key = prop->key()->AsLiteral();
+ Handle<String> name = Handle<String>::cast(key->value());
+ ASSERT(!name.is_null());
+
+ HInstruction* instr = NULL;
+
+ SmallMapList* types;
+ bool monomorphic = ComputeReceiverTypes(expr, object, &types);
+
+ if (monomorphic) {
+ Handle<Map> map = types->first();
+ Handle<JSFunction> setter;
+ Handle<JSObject> holder;
+ if (LookupSetter(map, name, &setter, &holder)) {
+ AddCheckConstantFunction(holder, object, map);
+ if (FLAG_inline_accessors &&
+ TryInlineSetter(setter, ast_id, return_id, value)) {
+ return;
+ }
+ Drop(2);
+ Add<HPushArgument>(object);
+ Add<HPushArgument>(value);
+ instr = new(zone()) HCallConstantFunction(setter, 2);
+ } else {
+ Drop(2);
+ CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
+ name,
+ value,
+ map));
+ }
+ } else if (types != NULL && types->length() > 1) {
+ Drop(2);
+ return HandlePolymorphicStoreNamedField(
+ expr->position(), ast_id, object, value, types, name);
+ } else {
+ Drop(2);
+ instr = BuildStoreNamedGeneric(object, name, value);
+ }
+
+ if (!ast_context()->IsEffect()) Push(value);
+ instr->set_position(expr->position());
+ AddInstruction(instr);
+ if (instr->HasObservableSideEffects()) {
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+ }
+ if (!ast_context()->IsEffect()) Drop(1);
+ return ast_context()->ReturnValue(value);
+}
+
+
+void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ if (!prop->key()->IsPropertyName()) {
+ CHECK_ALIVE(VisitForValue(prop->key()));
+ }
+ CHECK_ALIVE(VisitForValue(expr->value()));
+ BuildStore(expr, prop, expr->id(),
+ expr->AssignmentId(), expr->IsUninitialized());
}
@@ -4919,7 +5046,8 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
builder.Then();
builder.Else();
- Add<HDeoptimize>(Deoptimizer::EAGER);
+ Add<HDeoptimize>("Constant global variable assignment",
+ Deoptimizer::EAGER);
builder.End();
}
HInstruction* instr =
@@ -4940,70 +5068,6 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
-void HOptimizedGraphBuilder::BuildStoreNamed(Expression* expr,
- BailoutId id,
- int position,
- BailoutId assignment_id,
- Property* prop,
- HValue* object,
- HValue* store_value,
- HValue* result_value) {
- Literal* key = prop->key()->AsLiteral();
- Handle<String> name = Handle<String>::cast(key->value());
- ASSERT(!name.is_null());
-
- HInstruction* instr = NULL;
- SmallMapList* types = expr->GetReceiverTypes();
- bool monomorphic = expr->IsMonomorphic();
- Handle<Map> map;
- if (monomorphic) {
- map = types->first();
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> setter;
- Handle<JSObject> holder;
- if (LookupSetter(map, name, &setter, &holder)) {
- AddCheckConstantFunction(holder, object, map);
- // Don't try to inline if the result_value is different from the
- // store_value. That case isn't handled yet by the inlining.
- if (result_value == store_value &&
- FLAG_inline_accessors &&
- TryInlineSetter(setter, id, assignment_id, store_value)) {
- return;
- }
- Drop(2);
- Add<HPushArgument>(object);
- Add<HPushArgument>(store_value);
- instr = new(zone()) HCallConstantFunction(setter, 2);
- } else {
- Drop(2);
- CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object,
- name,
- store_value,
- map));
- }
- } else if (types != NULL && types->length() > 1) {
- Drop(2);
- return HandlePolymorphicStoreNamedField(
- position, id, object,
- store_value, result_value, types, name);
- } else {
- Drop(2);
- instr = BuildStoreNamedGeneric(object, name, store_value);
- }
-
- if (!ast_context()->IsEffect()) Push(result_value);
- instr->set_position(position);
- AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(id, REMOVABLE_SIMULATE);
- }
- if (!ast_context()->IsEffect()) Drop(1);
- return ast_context()->ReturnValue(result_value);
-}
-
-
void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Expression* target = expr->target();
VariableProxy* proxy = target->AsVariableProxy();
@@ -5017,7 +5081,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
if (var->mode() == LET) {
- return Bailout("unsupported let compound assignment");
+ return Bailout(kUnsupportedLetCompoundAssignment);
}
CHECK_ALIVE(VisitForValue(operation));
@@ -5033,7 +5097,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
case Variable::PARAMETER:
case Variable::LOCAL:
if (var->mode() == CONST) {
- return Bailout("unsupported const compound assignment");
+ return Bailout(kUnsupportedConstCompoundAssignment);
}
BindIfLive(var, Top());
break;
@@ -5049,8 +5113,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
if (var == current_info()->scope()->parameter(i)) {
- Bailout(
- "assignment to parameter, function uses arguments object");
+ Bailout(kAssignmentToParameterFunctionUsesArgumentsObject);
}
}
}
@@ -5081,96 +5144,37 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
}
case Variable::LOOKUP:
- return Bailout("compound assignment to lookup slot");
+ return Bailout(kCompoundAssignmentToLookupSlot);
}
return ast_context()->ReturnValue(Pop());
} else if (prop != NULL) {
- if (prop->key()->IsPropertyName()) {
- // Named property.
- CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* object = Top();
-
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map;
- HInstruction* load = NULL;
- SmallMapList* types = prop->GetReceiverTypes();
- bool monomorphic = prop->IsMonomorphic();
- if (monomorphic) {
- map = types->first();
- // We can't generate code for a monomorphic dict mode load so
- // just pretend it is not monomorphic.
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- load = BuildCallGetter(object, map, getter, holder);
- } else {
- load = BuildLoadNamedMonomorphic(object, name, prop, map);
- }
- } else if (types != NULL && types->length() > 1) {
- load = TryLoadPolymorphicAsMonomorphic(prop, object, types, name);
- }
- if (load == NULL) load = BuildLoadNamedGeneric(object, name, prop);
- PushAndAdd(load);
- if (load->HasObservableSideEffects()) {
- Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
- }
-
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* right = Pop();
- HValue* left = Pop();
-
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
- }
-
- return BuildStoreNamed(prop, expr->id(), expr->position(),
- expr->AssignmentId(), prop, object, instr, instr);
- } else {
- // Keyed property.
- CHECK_ALIVE(VisitForValue(prop->obj()));
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ HValue* object = Top();
+ HValue* key = NULL;
+ if ((!prop->IsStringLength() &&
+ !prop->IsFunctionPrototype() &&
+ !prop->key()->IsPropertyName()) ||
+ prop->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(prop->key()));
- HValue* obj = environment()->ExpressionStackAt(1);
- HValue* key = environment()->ExpressionStackAt(0);
-
- bool has_side_effects = false;
- HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, prop, prop->LoadId(), RelocInfo::kNoPosition,
- false, // is_store
- &has_side_effects);
- Push(load);
- if (has_side_effects) Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
-
- CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* right = Pop();
- HValue* left = Pop();
-
- HInstruction* instr = BuildBinaryOperation(operation, left, right);
- PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) {
- Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
- }
+ key = Top();
+ }
- HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
- RelocInfo::kNoPosition,
- true, // is_store
- &has_side_effects);
+ CHECK_ALIVE(PushLoad(prop, object, key, expr->position()));
- // Drop the simulated receiver, key, and value. Return the value.
- Drop(3);
- Push(instr);
- ASSERT(has_side_effects); // Stores always have side effects.
- Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
- return ast_context()->ReturnValue(Pop());
- }
+ CHECK_ALIVE(VisitForValue(expr->value()));
+ HValue* right = Pop();
+ HValue* left = Pop();
+ HInstruction* instr = BuildBinaryOperation(operation, left, right);
+ PushAndAdd(instr);
+ if (instr->HasObservableSideEffects()) {
+ Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE);
+ }
+ BuildStore(expr, prop, expr->id(),
+ expr->AssignmentId(), expr->IsUninitialized());
} else {
- return Bailout("invalid lhs in compound assignment");
+ return Bailout(kInvalidLhsInCompoundAssignment);
}
}
@@ -5207,11 +5211,11 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
}
} else if (var->mode() == CONST_HARMONY) {
if (expr->op() != Token::INIT_CONST_HARMONY) {
- return Bailout("non-initializer assignment to const");
+ return Bailout(kNonInitializerAssignmentToConst);
}
}
- if (proxy->IsArguments()) return Bailout("assignment to arguments");
+ if (proxy->IsArguments()) return Bailout(kAssignmentToArguments);
// Handle the assignment.
switch (var->location()) {
@@ -5230,7 +5234,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
if (var->mode() == LET && expr->op() == Token::ASSIGN) {
HValue* env_value = environment()->Lookup(var);
if (env_value == graph()->GetConstantHole()) {
- return Bailout("assignment to let variable before initialization");
+ return Bailout(kAssignmentToLetVariableBeforeInitialization);
}
}
// We do not allow the arguments object to occur in a context where it
@@ -5252,7 +5256,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
if (var == current_info()->scope()->parameter(i)) {
- return Bailout("assignment to parameter in arguments object");
+ return Bailout(kAssignmentToParameterInArgumentsObject);
}
}
}
@@ -5293,10 +5297,10 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
}
case Variable::LOOKUP:
- return Bailout("assignment to LOOKUP variable");
+ return Bailout(kAssignmentToLOOKUPVariable);
}
} else {
- return Bailout("invalid left-hand side in assignment");
+ return Bailout(kInvalidLeftHandSideInAssignment);
}
}
@@ -5321,38 +5325,33 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
HThrow* instr = Add<HThrow>(value);
instr->set_position(expr->position());
Add<HSimulate>(expr->id());
- current_block()->FinishExit(new(zone()) HAbnormalExit);
- set_current_block(NULL);
}
HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- HObjectAccess access,
- HValue* typecheck) {
+ HObjectAccess access) {
if (FLAG_track_double_fields && access.representation().IsDouble()) {
// load the heap number
HLoadNamedField* heap_number = Add<HLoadNamedField>(
object, access.WithRepresentation(Representation::Tagged()));
heap_number->set_type(HType::HeapNumber());
// load the double value from it
- return New<HLoadNamedField>(heap_number,
- HObjectAccess::ForHeapNumberValue(),
- typecheck);
+ return New<HLoadNamedField>(
+ heap_number, HObjectAccess::ForHeapNumberValue());
}
- return New<HLoadNamedField>(object, access, typecheck);
+ return New<HLoadNamedField>(object, access);
}
HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* object,
- HValue* typecheck) {
+ HValue* checked_string) {
if (FLAG_fold_constants && object->IsConstant()) {
HConstant* constant = HConstant::cast(object);
if (constant->HasStringValue()) {
return New<HConstant>(constant->StringValue()->length());
}
}
- return BuildLoadNamedField(
- object, HObjectAccess::ForStringLength(), typecheck);
+ return BuildLoadNamedField(checked_string, HObjectAccess::ForStringLength());
}
@@ -5361,7 +5360,8 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
Handle<String> name,
Property* expr) {
if (expr->IsUninitialized()) {
- Add<HDeoptimize>(Deoptimizer::SOFT);
+ Add<HDeoptimize>("Insufficient type feedback for generic named load",
+ Deoptimizer::SOFT);
}
HValue* context = environment()->context();
return new(zone()) HLoadNamedGeneric(context, object, name);
@@ -5382,7 +5382,6 @@ HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
HValue* object,
Handle<String> name,
- Property* expr,
Handle<Map> map) {
// Handle a load from a known field.
ASSERT(!map->is_dictionary_map());
@@ -5390,18 +5389,19 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
// Handle access to various length properties
if (name->Equals(isolate()->heap()->length_string())) {
if (map->instance_type() == JS_ARRAY_TYPE) {
- HCheckMaps* type_check = AddCheckMap(object, map);
- return New<HLoadNamedField>(object,
- HObjectAccess::ForArrayLength(map->elements_kind()), type_check);
+ HCheckMaps* checked_object = AddCheckMap(object, map);
+ return New<HLoadNamedField>(
+ checked_object, HObjectAccess::ForArrayLength(map->elements_kind()));
}
}
LookupResult lookup(isolate());
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsField()) {
- HCheckMaps* type_check = AddCheckMap(object, map);
- return BuildLoadNamedField(object,
- HObjectAccess::ForField(map, &lookup, name), type_check);
+ HCheckMaps* checked_object = AddCheckMap(object, map);
+ ASSERT(map->IsJSObjectMap());
+ return BuildLoadNamedField(
+ checked_object, HObjectAccess::ForField(map, &lookup, name));
}
// Handle a load of a constant known function.
@@ -5411,17 +5411,22 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
return New<HConstant>(constant);
}
+ if (lookup.IsFound()) {
+ // Cannot handle the property, do a generic load instead.
+ HValue* context = environment()->context();
+ return new(zone()) HLoadNamedGeneric(context, object, name);
+ }
+
// Handle a load from a known field somewhere in the prototype chain.
LookupInPrototypes(map, name, &lookup);
if (lookup.IsField()) {
Handle<JSObject> prototype(JSObject::cast(map->prototype()));
Handle<JSObject> holder(lookup.holder());
+ AddCheckMap(object, map);
+ HValue* checked_holder = BuildCheckPrototypeMaps(prototype, holder);
Handle<Map> holder_map(holder->map());
- HCheckMaps* type_check = AddCheckMap(object, map);
- Add<HCheckPrototypeMaps>(prototype, holder, top_info());
- HValue* holder_value = Add<HConstant>(holder);
- return BuildLoadNamedField(holder_value,
- HObjectAccess::ForField(holder_map, &lookup, name), type_check);
+ return BuildLoadNamedField(
+ checked_holder, HObjectAccess::ForField(holder_map, &lookup, name));
}
// Handle a load of a constant function somewhere in the prototype chain.
@@ -5430,13 +5435,14 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddCheckMap(object, map);
- Add<HCheckPrototypeMaps>(prototype, holder, top_info());
+ BuildCheckPrototypeMaps(prototype, holder);
Handle<Object> constant(lookup.GetConstantFromMap(*holder_map), isolate());
return New<HConstant>(constant);
}
// No luck, do a generic load.
- return BuildLoadNamedGeneric(object, name, expr);
+ HValue* context = environment()->context();
+ return new(zone()) HLoadNamedGeneric(context, object, name);
}
@@ -5447,6 +5453,22 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
}
+LoadKeyedHoleMode HOptimizedGraphBuilder::BuildKeyedHoleMode(Handle<Map> map) {
+ // Loads from a "stock" fast holey double arrays can elide the hole check.
+ LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE;
+ if (*map == isolate()->get_initial_js_array_map(FAST_HOLEY_DOUBLE_ELEMENTS) &&
+ isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+ Handle<JSObject> prototype(JSObject::cast(map->prototype()), isolate());
+ Handle<JSObject> object_prototype = isolate()->initial_object_prototype();
+ BuildCheckPrototypeMaps(prototype, object_prototype);
+ load_mode = ALLOW_RETURN_HOLE;
+ graph()->MarkDependsOnEmptyArrayProtoElements();
+ }
+
+ return load_mode;
+}
+
+
HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
HValue* object,
HValue* key,
@@ -5455,26 +5477,18 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
Handle<Map> map,
bool is_store,
KeyedAccessStoreMode store_mode) {
- HCheckMaps* mapcheck = Add<HCheckMaps>(object, map, top_info(), dependency);
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, map, top_info(),
+ dependency);
if (dependency) {
- mapcheck->ClearGVNFlag(kDependsOnElementsKind);
- }
-
- // Loads from a "stock" fast holey double arrays can elide the hole check.
- LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE;
- if (*map == isolate()->get_initial_js_array_map(FAST_HOLEY_DOUBLE_ELEMENTS) &&
- isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()), isolate());
- Handle<JSObject> object_prototype = isolate()->initial_object_prototype();
- Add<HCheckPrototypeMaps>(prototype, object_prototype, top_info());
- load_mode = ALLOW_RETURN_HOLE;
- graph()->MarkDependsOnEmptyArrayProtoElements();
+ checked_object->ClearGVNFlag(kDependsOnElementsKind);
}
+ LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
return BuildUncheckedMonomorphicElementAccess(
- object, key, val,
- mapcheck, map->instance_type() == JS_ARRAY_TYPE,
- map->elements_kind(), is_store, load_mode, store_mode);
+ checked_object, key, val,
+ map->instance_type() == JS_ARRAY_TYPE,
+ map->elements_kind(), is_store,
+ load_mode, store_mode);
}
@@ -5491,9 +5505,11 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
bool has_smi_or_object_maps = false;
bool has_js_array_access = false;
bool has_non_js_array_access = false;
+ bool has_seen_holey_elements = false;
Handle<Map> most_general_consolidated_map;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
+ if (!map->IsJSObjectMap()) return NULL;
// Don't allow mixing of JSArrays with JSObjects.
if (map->instance_type() == JS_ARRAY_TYPE) {
if (has_non_js_array_access) return NULL;
@@ -5513,6 +5529,10 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
} else {
return NULL;
}
+ // Remember if we've ever seen holey elements.
+ if (IsHoleyElementsKind(map->elements_kind())) {
+ has_seen_holey_elements = true;
+ }
// Remember the most general elements kind, the code for its load will
// properly handle all of the more specific cases.
if ((i == 0) || IsMoreGeneralElementsKindTransition(
@@ -5523,11 +5543,16 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
}
if (!has_double_maps && !has_smi_or_object_maps) return NULL;
- HCheckMaps* check_maps = Add<HCheckMaps>(object, maps);
+ HCheckMaps* checked_object = Add<HCheckMaps>(object, maps);
+ // FAST_ELEMENTS is considered more general than FAST_HOLEY_SMI_ELEMENTS.
+ // If we've seen both, the consolidated load must use FAST_HOLEY_ELEMENTS.
+ ElementsKind consolidated_elements_kind = has_seen_holey_elements
+ ? GetHoleyElementsKind(most_general_consolidated_map->elements_kind())
+ : most_general_consolidated_map->elements_kind();
HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
- object, key, val, check_maps,
+ checked_object, key, val,
most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
- most_general_consolidated_map->elements_kind(),
+ consolidated_elements_kind,
false, NEVER_RETURN_HOLE, STANDARD_STORE);
return instr;
}
@@ -5537,7 +5562,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* object,
HValue* key,
HValue* val,
- Expression* prop,
+ SmallMapList* maps,
BailoutId ast_id,
int position,
bool is_store,
@@ -5545,7 +5570,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
bool* has_side_effects) {
*has_side_effects = false;
BuildCheckHeapObject(object);
- SmallMapList* maps = prop->GetReceiverTypes();
if (!is_store) {
HInstruction* consolidated_load =
@@ -5601,7 +5625,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
if (untransitionable_maps.length() == 1) {
Handle<Map> untransitionable_map = untransitionable_maps[0];
HInstruction* instr = NULL;
- if (untransitionable_map->has_slow_elements_kind()) {
+ if (untransitionable_map->has_slow_elements_kind() ||
+ !untransitionable_map->IsJSObjectMap()) {
instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
: BuildLoadKeyedGeneric(object, key));
} else {
@@ -5614,14 +5639,11 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
return is_store ? NULL : instr;
}
- HInstruction* checkspec =
- AddInstruction(HCheckInstanceType::NewIsSpecObject(object, zone()));
HBasicBlock* join = graph()->CreateBasicBlock();
- HInstruction* elements = AddLoadElements(object, checkspec);
-
for (int i = 0; i < untransitionable_maps.length(); ++i) {
Handle<Map> map = untransitionable_maps[i];
+ if (!map->IsJSObjectMap()) continue;
ElementsKind elements_kind = map->elements_kind();
HBasicBlock* this_map = graph()->CreateBasicBlock();
HBasicBlock* other_map = graph()->CreateBasicBlock();
@@ -5630,40 +5652,22 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
current_block()->Finish(mapcompare);
set_current_block(this_map);
- HInstruction* checked_key = NULL;
HInstruction* access = NULL;
- if (IsFastElementsKind(elements_kind)) {
- if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
- Add<HCheckMaps>(
- elements, isolate()->factory()->fixed_array_map(),
- top_info(), mapcompare);
- }
- if (map->instance_type() == JS_ARRAY_TYPE) {
- HInstruction* length = Add<HLoadNamedField>(
- object, HObjectAccess::ForArrayLength(elements_kind), mapcompare);
- checked_key = Add<HBoundsCheck>(key, length);
- } else {
- HInstruction* length = AddLoadFixedArrayLength(elements);
- checked_key = Add<HBoundsCheck>(key, length);
- }
- access = AddFastElementAccess(
- elements, checked_key, val, mapcompare,
- elements_kind, is_store, NEVER_RETURN_HOLE, STANDARD_STORE);
- } else if (IsDictionaryElementsKind(elements_kind)) {
- if (is_store) {
- access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
- } else {
- access = AddInstruction(BuildLoadKeyedGeneric(object, key));
- }
+ if (IsDictionaryElementsKind(elements_kind)) {
+ access = is_store
+ ? AddInstruction(BuildStoreKeyedGeneric(object, key, val))
+ : AddInstruction(BuildLoadKeyedGeneric(object, key));
} else {
- ASSERT(IsExternalArrayElementsKind(elements_kind));
- HInstruction* length = AddLoadFixedArrayLength(elements);
- checked_key = Add<HBoundsCheck>(key, length);
- HLoadExternalArrayPointer* external_elements =
- Add<HLoadExternalArrayPointer>(elements);
- access = AddExternalArrayElementAccess(
- external_elements, checked_key, val,
- mapcompare, elements_kind, is_store);
+ ASSERT(IsFastElementsKind(elements_kind) ||
+ IsExternalArrayElementsKind(elements_kind));
+ LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
+ // Happily, mapcompare is a checked object.
+ access = BuildUncheckedMonomorphicElementAccess(
+ mapcompare, key, val,
+ map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind, is_store,
+ load_mode,
+ store_mode);
}
*has_side_effects |= access->HasObservableSideEffects();
// The caller will use has_side_effects and add a correct Simulate.
@@ -5679,7 +5683,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
- FinishExitWithHardDeoptimization(join);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic element access",
+ join);
set_current_block(join);
return is_store ? NULL : Pop();
}
@@ -5696,8 +5701,12 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
- if (expr->IsMonomorphic()) {
- Handle<Map> map = expr->GetMonomorphicReceiverType();
+
+ SmallMapList* types;
+ bool monomorphic = ComputeReceiverTypes(expr, obj, &types);
+
+ if (monomorphic) {
+ Handle<Map> map = types->first();
if (map->has_slow_elements_kind()) {
instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
: BuildLoadKeyedGeneric(obj, key);
@@ -5707,20 +5716,21 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
instr = BuildMonomorphicElementAccess(
obj, key, val, NULL, map, is_store, expr->GetStoreMode());
}
- } else if (expr->GetReceiverTypes() != NULL &&
- !expr->GetReceiverTypes()->is_empty()) {
+ } else if (types != NULL && !types->is_empty()) {
return HandlePolymorphicElementAccess(
- obj, key, val, expr, ast_id, position, is_store,
+ obj, key, val, types, ast_id, position, is_store,
expr->GetStoreMode(), has_side_effects);
} else {
if (is_store) {
if (expr->IsAssignment() && expr->AsAssignment()->IsUninitialized()) {
- Add<HDeoptimize>(Deoptimizer::SOFT);
+ Add<HDeoptimize>("Insufficient type feedback for keyed store",
+ Deoptimizer::SOFT);
}
instr = BuildStoreKeyedGeneric(obj, key, val);
} else {
if (expr->AsProperty()->IsUninitialized()) {
- Add<HDeoptimize>(Deoptimizer::SOFT);
+ Add<HDeoptimize>("Insufficient type feedback for keyed load",
+ Deoptimizer::SOFT);
}
instr = BuildLoadKeyedGeneric(obj, key);
}
@@ -5798,8 +5808,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
}
} else {
Push(graph()->GetArgumentsObject());
- VisitForValue(expr->key());
- if (HasStackOverflow() || current_block() == NULL) return true;
+ CHECK_ALIVE_OR_RETURN(VisitForValue(expr->key()), true);
HValue* key = Pop();
Drop(1); // Arguments object.
if (function_state()->outer() == NULL) {
@@ -5824,15 +5833,20 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
}
-void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
-
- if (TryArgumentsAccess(expr)) return;
+void HOptimizedGraphBuilder::PushLoad(Property* expr,
+ HValue* object,
+ HValue* key,
+ int position) {
+ ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
+ Push(object);
+ if (key != NULL) Push(key);
+ BuildLoad(expr, position, expr->LoadId());
+}
- CHECK_ALIVE(VisitForValue(expr->obj()));
+void HOptimizedGraphBuilder::BuildLoad(Property* expr,
+ int position,
+ BailoutId ast_id) {
HInstruction* instr = NULL;
if (expr->IsStringLength()) {
HValue* string = Pop();
@@ -5841,7 +5855,6 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
instr = BuildLoadStringLength(string, checkstring);
} else if (expr->IsStringAccess()) {
- CHECK_ALIVE(VisitForValue(expr->key()));
HValue* index = Pop();
HValue* string = Pop();
HValue* context = environment()->context();
@@ -5857,59 +5870,105 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
} else if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- SmallMapList* types = expr->GetReceiverTypes();
HValue* object = Top();
- Handle<Map> map;
- bool monomorphic = false;
- if (expr->IsMonomorphic()) {
- map = types->first();
- monomorphic = !map->is_dictionary_map();
- } else if (object->HasMonomorphicJSObjectType()) {
- map = object->GetMonomorphicJSObjectMap();
- monomorphic = !map->is_dictionary_map();
- }
+ SmallMapList* types;
+ bool monomorphic = ComputeReceiverTypes(expr, object, &types);
+
if (monomorphic) {
+ Handle<Map> map = types->first();
Handle<JSFunction> getter;
Handle<JSObject> holder;
if (LookupGetter(map, name, &getter, &holder)) {
AddCheckConstantFunction(holder, Top(), map);
- if (FLAG_inline_accessors && TryInlineGetter(getter, expr)) return;
+ if (FLAG_inline_accessors &&
+ TryInlineGetter(getter, ast_id, expr->LoadId())) {
+ return;
+ }
Add<HPushArgument>(Pop());
instr = new(zone()) HCallConstantFunction(getter, 1);
} else {
- instr = BuildLoadNamedMonomorphic(Pop(), name, expr, map);
+ instr = BuildLoadNamedMonomorphic(Pop(), name, map);
}
} else if (types != NULL && types->length() > 1) {
- return HandlePolymorphicLoadNamedField(expr, Pop(), types, name);
+ return HandlePolymorphicLoadNamedField(
+ position, ast_id, Pop(), types, name);
} else {
instr = BuildLoadNamedGeneric(Pop(), name, expr);
}
} else {
- CHECK_ALIVE(VisitForValue(expr->key()));
-
HValue* key = Pop();
HValue* obj = Pop();
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr, expr->id(), expr->position(),
+ obj, key, NULL, expr, ast_id, position,
false, // is_store
&has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
} else {
Push(load);
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
Drop(1);
}
}
return ast_context()->ReturnValue(load);
}
- instr->set_position(expr->position());
- return ast_context()->ReturnInstruction(instr, expr->id());
+ instr->set_position(position);
+ return ast_context()->ReturnInstruction(instr, ast_id);
+}
+
+
+void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
+ ASSERT(!HasStackOverflow());
+ ASSERT(current_block() != NULL);
+ ASSERT(current_block()->HasPredecessor());
+
+ if (TryArgumentsAccess(expr)) return;
+
+ CHECK_ALIVE(VisitForValue(expr->obj()));
+ if ((!expr->IsStringLength() &&
+ !expr->IsFunctionPrototype() &&
+ !expr->key()->IsPropertyName()) ||
+ expr->IsStringAccess()) {
+ CHECK_ALIVE(VisitForValue(expr->key()));
+ }
+
+ BuildLoad(expr, expr->position(), expr->id());
+}
+
+
+HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant,
+ CompilationInfo* info) {
+ HConstant* constant_value = New<HConstant>(constant);
+
+ if (constant->map()->CanOmitMapChecks()) {
+ constant->map()->AddDependentCompilationInfo(
+ DependentCode::kPrototypeCheckGroup, info);
+ return constant_value;
+ }
+
+ AddInstruction(constant_value);
+ HCheckMaps* check =
+ Add<HCheckMaps>(constant_value, handle(constant->map()), info);
+ check->ClearGVNFlag(kDependsOnElementsKind);
+ return check;
+}
+
+
+HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
+ Handle<JSObject> holder) {
+ while (!prototype.is_identical_to(holder)) {
+ BuildConstantMapCheck(prototype, top_info());
+ prototype = handle(JSObject::cast(prototype->GetPrototype()));
+ }
+
+ HInstruction* checked_object = BuildConstantMapCheck(prototype, top_info());
+ if (!checked_object->IsLinked()) AddInstruction(checked_object);
+ return checked_object;
}
@@ -5917,7 +5976,7 @@ void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map) {
if (!holder.is_null()) {
Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
- Add<HCheckPrototypeMaps>(prototype, holder, top_info());
+ BuildCheckPrototypeMaps(prototype, holder);
}
}
@@ -6140,7 +6199,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
// that the environment stack matches the depth on deopt that it otherwise
// would have had after a successful call.
Drop(argument_count - (ast_context()->IsEffect() ? 0 : 1));
- FinishExitWithHardDeoptimization(join);
+ FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join);
} else {
HValue* context = environment()->context();
HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
@@ -6289,7 +6348,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function.
SetStackOverflow();
- target_shared->DisableOptimization("parse/scope error");
+ target_shared->DisableOptimization(kParseScopeError);
}
TraceInline(target, caller, "parse failure");
return false;
@@ -6309,7 +6368,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
return false;
}
AstProperties::Flags* flags(function->flags());
- if (flags->Contains(kDontInline) || flags->Contains(kDontOptimize)) {
+ if (flags->Contains(kDontInline) || function->dont_optimize()) {
TraceInline(target, caller, "target contains unsupported syntax [late]");
return false;
}
@@ -6428,7 +6487,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
// Bail out if the inline function did, as we cannot residualize a call
// instead.
TraceInline(target, caller, "inline graph construction failed");
- target_shared->DisableOptimization("inlining bailed out");
+ target_shared->DisableOptimization(kInliningBailedOut);
inline_bailout_ = true;
delete target_state;
return true;
@@ -6555,13 +6614,14 @@ bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
- Property* prop) {
+ BailoutId ast_id,
+ BailoutId return_id) {
return TryInline(CALL_AS_METHOD,
getter,
0,
NULL,
- prop->id(),
- prop->LoadId(),
+ ast_id,
+ return_id,
GETTER_CALL_RETURN);
}
@@ -6658,9 +6718,9 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HValue* string = Pop();
HValue* context = environment()->context();
ASSERT(!expr->holder().is_null());
- Add<HCheckPrototypeMaps>(Call::GetPrototypeForPrimitiveCheck(
+ BuildCheckPrototypeMaps(Call::GetPrototypeForPrimitiveCheck(
STRING_CHECK, expr->holder()->GetIsolate()),
- expr->holder(), top_info());
+ expr->holder());
HInstruction* char_code =
BuildStringCharCodeAt(string, index);
if (id == kStringCharCodeAt) {
@@ -6735,8 +6795,6 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
} else if (exponent == 2.0) {
result = HMul::New(zone(), context, left, left);
}
- } else if (right->EqualsInteger32Constant(2)) {
- result = HMul::New(zone(), context, left, left);
}
if (result == NULL) {
@@ -6818,14 +6876,12 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
// Found pattern f.apply(receiver, arguments).
- VisitForValue(prop->obj());
- if (HasStackOverflow() || current_block() == NULL) return true;
+ CHECK_ALIVE_OR_RETURN(VisitForValue(prop->obj()), true);
HValue* function = Top();
AddCheckConstantFunction(expr->holder(), function, function_map);
Drop(1);
- VisitForValue(args->at(0));
- if (HasStackOverflow() || current_block() == NULL) return true;
+ CHECK_ALIVE_OR_RETURN(VisitForValue(args->at(0)), true);
HValue* receiver = Pop();
if (function_state()->outer() == NULL) {
@@ -6856,7 +6912,8 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
Handle<JSFunction> known_function;
if (function->IsConstant()) {
HConstant* constant_function = HConstant::cast(function);
- known_function = Handle<JSFunction>::cast(constant_function->handle());
+ known_function = Handle<JSFunction>::cast(
+ constant_function->handle(isolate()));
int args_count = arguments_count - 1; // Excluding receiver.
if (TryInlineApply(known_function, expr, args_count)) return true;
}
@@ -6918,23 +6975,19 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- SmallMapList* types = expr->GetReceiverTypes();
+ HValue* receiver =
+ environment()->ExpressionStackAt(expr->arguments()->length());
- bool monomorphic = expr->IsMonomorphic();
- Handle<Map> receiver_map;
- if (monomorphic) {
- receiver_map = (types == NULL || types->is_empty())
- ? Handle<Map>::null()
- : types->first();
+ SmallMapList* types;
+ bool was_monomorphic = expr->IsMonomorphic();
+ bool monomorphic = ComputeReceiverTypes(expr, receiver, &types);
+ if (!was_monomorphic && monomorphic) {
+ monomorphic = expr->ComputeTarget(types->first(), name);
}
- HValue* receiver =
- environment()->ExpressionStackAt(expr->arguments()->length());
if (monomorphic) {
- if (TryInlineBuiltinMethodCall(expr,
- receiver,
- receiver_map,
- expr->check_type())) {
+ Handle<Map> map = types->first();
+ if (TryInlineBuiltinMethodCall(expr, receiver, map, expr->check_type())) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
expr->target()->ShortPrint();
@@ -6952,7 +7005,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
call = PreProcessCall(
new(zone()) HCallNamed(context, name, argument_count));
} else {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ AddCheckConstantFunction(expr->holder(), receiver, map);
if (TryInlineCall(expr)) return;
call = PreProcessCall(
@@ -6973,7 +7026,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
} else {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- return Bailout("possible direct call to eval");
+ return Bailout(kPossibleDirectCallToEval);
}
bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
@@ -7000,7 +7053,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Pop();
- Add<HCheckFunction>(function, expr->target());
+ Add<HCheckValue>(function, expr->target());
// Replace the global object with the global receiver.
HGlobalReceiver* global_receiver = Add<HGlobalReceiver>(global_object);
@@ -7052,7 +7105,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
HGlobalReceiver* receiver = New<HGlobalReceiver>(global);
PushAndAdd(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- Add<HCheckFunction>(function, expr->target());
+ Add<HCheckValue>(function, expr->target());
if (TryInlineBuiltinFunctionCall(expr, true)) { // Drop the function.
if (FLAG_trace_inlining) {
@@ -7115,7 +7168,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
HValue* function = Top();
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<JSFunction> constructor = expr->target();
- HValue* check = Add<HCheckFunction>(function, constructor);
+ HValue* check = Add<HCheckValue>(function, constructor);
// Force completion of inobject slack tracking before generating
// allocation code to finalize instance size.
@@ -7204,10 +7257,10 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* constructor = HPushArgument::cast(Top())->argument();
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- HCallNew* call;
+ HBinaryCall* call;
if (expr->target().is_identical_to(array_function)) {
Handle<Cell> cell = expr->allocation_info_cell();
- Add<HCheckFunction>(constructor, array_function);
+ Add<HCheckValue>(constructor, array_function);
call = new(zone()) HCallNewArray(context, constructor, argument_count,
cell, expr->elements_kind());
} else {
@@ -7241,7 +7294,7 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (expr->is_jsruntime()) {
- return Bailout("call to a JavaScript runtime function");
+ return Bailout(kCallToAJavaScriptRuntimeFunction);
}
const Runtime::Function* function = expr->function();
@@ -7281,8 +7334,6 @@ void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
case Token::DELETE: return VisitDelete(expr);
case Token::VOID: return VisitVoid(expr);
case Token::TYPEOF: return VisitTypeof(expr);
- case Token::SUB: return VisitSub(expr);
- case Token::BIT_NOT: return VisitBitNot(expr);
case Token::NOT: return VisitNot(expr);
default: UNREACHABLE();
}
@@ -7308,7 +7359,7 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
} else if (proxy != NULL) {
Variable* var = proxy->var();
if (var->IsUnallocated()) {
- Bailout("delete with global variable");
+ Bailout(kDeleteWithGlobalVariable);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is not
// really a variable, though we implement it as one. The
@@ -7318,7 +7369,7 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
: graph()->GetConstantFalse();
return ast_context()->ReturnValue(value);
} else {
- Bailout("delete with non-global variable");
+ Bailout(kDeleteWithNonGlobalVariable);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -7344,24 +7395,6 @@ void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
}
-void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
- CHECK_ALIVE(VisitForValue(expr->expression()));
- Handle<Type> operand_type = expr->expression()->bounds().lower;
- HValue* value = TruncateToNumber(Pop(), &operand_type);
- HInstruction* instr = BuildUnaryMathOp(value, operand_type, Token::SUB);
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
-void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
- CHECK_ALIVE(VisitForValue(expr->expression()));
- Handle<Type> operand_type = expr->expression()->bounds().lower;
- HValue* value = TruncateToNumber(Pop(), &operand_type);
- HInstruction* instr = BuildUnaryMathOp(value, operand_type, Token::BIT_NOT);
- return ast_context()->ReturnInstruction(instr, expr->id());
-}
-
-
void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
@@ -7435,13 +7468,33 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
HConstant* delta = (expr->op() == Token::INC)
? graph()->GetConstant1()
: graph()->GetConstantMinus1();
- HInstruction* instr = Add<HAdd>(Top(), delta);
+ HInstruction* instr = AddUncasted<HAdd>(Top(), delta);
+ if (instr->IsAdd()) {
+ HAdd* add = HAdd::cast(instr);
+ add->set_observed_input_representation(1, rep);
+ add->set_observed_input_representation(2, Representation::Smi());
+ }
instr->SetFlag(HInstruction::kCannotBeTagged);
instr->ClearAllSideEffects();
return instr;
}
+void HOptimizedGraphBuilder::BuildStoreForEffect(Expression* expr,
+ Property* prop,
+ BailoutId ast_id,
+ BailoutId return_id,
+ HValue* object,
+ HValue* key,
+ HValue* value) {
+ EffectContext for_effect(this);
+ Push(object);
+ if (key != NULL) Push(key);
+ Push(value);
+ BuildStore(expr, prop, ast_id, return_id);
+}
+
+
void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -7450,7 +7503,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
if (proxy == NULL && prop == NULL) {
- return Bailout("invalid lhs in count operation");
+ return Bailout(kInvalidLhsInCountOperation);
}
// Match the full code generator stack by simulating an extra stack
@@ -7464,7 +7517,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
if (var->mode() == CONST) {
- return Bailout("unsupported count operation with const");
+ return Bailout(kUnsupportedCountOperationWithConst);
}
// Argument of the count operation is a variable, not a property.
ASSERT(prop == NULL);
@@ -7498,7 +7551,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
int count = current_info()->scope()->num_parameters();
for (int i = 0; i < count; ++i) {
if (var == current_info()->scope()->parameter(i)) {
- return Bailout("assignment to parameter in arguments object");
+ return Bailout(kAssignmentToParameterInArgumentsObject);
}
}
}
@@ -7515,89 +7568,45 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
case Variable::LOOKUP:
- return Bailout("lookup variable in count operation");
+ return Bailout(kLookupVariableInCountOperation);
}
- } else {
- // Argument of the count operation is a property.
- ASSERT(prop != NULL);
-
- if (prop->key()->IsPropertyName()) {
- // Named property.
- if (returns_original_input) Push(graph()->GetConstantUndefined());
-
- CHECK_ALIVE(VisitForValue(prop->obj()));
- HValue* object = Top();
-
- Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- Handle<Map> map;
- HInstruction* load = NULL;
- bool monomorphic = prop->IsMonomorphic();
- SmallMapList* types = prop->GetReceiverTypes();
- if (monomorphic) {
- map = types->first();
- if (map->is_dictionary_map()) monomorphic = false;
- }
- if (monomorphic) {
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- load = BuildCallGetter(object, map, getter, holder);
- } else {
- load = BuildLoadNamedMonomorphic(object, name, prop, map);
- }
- } else if (types != NULL && types->length() > 1) {
- load = TryLoadPolymorphicAsMonomorphic(prop, object, types, name);
- }
- if (load == NULL) load = BuildLoadNamedGeneric(object, name, prop);
- PushAndAdd(load);
- if (load->HasObservableSideEffects()) {
- Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
- }
+ Drop(returns_original_input ? 2 : 1);
+ return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
+ }
- after = BuildIncrement(returns_original_input, expr);
- HValue* result = returns_original_input ? Pop() : after;
+ // Argument of the count operation is a property.
+ ASSERT(prop != NULL);
+ if (returns_original_input) Push(graph()->GetConstantUndefined());
- return BuildStoreNamed(prop, expr->id(), expr->position(),
- expr->AssignmentId(), prop, object, after, result);
- } else {
- // Keyed property.
- if (returns_original_input) Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitForValue(prop->obj()));
+ HValue* object = Top();
- CHECK_ALIVE(VisitForValue(prop->obj()));
- CHECK_ALIVE(VisitForValue(prop->key()));
- HValue* obj = environment()->ExpressionStackAt(1);
- HValue* key = environment()->ExpressionStackAt(0);
-
- bool has_side_effects = false;
- HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, prop, prop->LoadId(), RelocInfo::kNoPosition,
- false, // is_store
- &has_side_effects);
- Push(load);
- if (has_side_effects) Add<HSimulate>(prop->LoadId(), REMOVABLE_SIMULATE);
-
- after = BuildIncrement(returns_original_input, expr);
- input = environment()->ExpressionStackAt(0);
-
- HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
- RelocInfo::kNoPosition,
- true, // is_store
- &has_side_effects);
-
- // Drop the key and the original value from the bailout environment.
- // Overwrite the receiver with the result of the operation, and the
- // placeholder with the original value if necessary.
- Drop(2);
- environment()->SetExpressionStackAt(0, after);
- if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- ASSERT(has_side_effects); // Stores always have side effects.
- Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
+ HValue* key = NULL;
+ if ((!prop->IsStringLength() &&
+ !prop->IsFunctionPrototype() &&
+ !prop->key()->IsPropertyName()) ||
+ prop->IsStringAccess()) {
+ CHECK_ALIVE(VisitForValue(prop->key()));
+ key = Top();
+ }
+
+ CHECK_ALIVE(PushLoad(prop, object, key, expr->position()));
+
+ after = BuildIncrement(returns_original_input, expr);
+
+ if (returns_original_input) {
+ input = Pop();
+ // Drop object and key to push it again in the effect context below.
+ Drop(key == NULL ? 1 : 2);
+ environment()->SetExpressionStackAt(0, input);
+ CHECK_ALIVE(BuildStoreForEffect(
+ expr, prop, expr->id(), expr->AssignmentId(), object, key, after));
+ return ast_context()->ReturnValue(Pop());
}
- Drop(returns_original_input ? 2 : 1);
- return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
+ environment()->SetExpressionStackAt(0, after);
+ return BuildStore(expr, prop, expr->id(), expr->AssignmentId());
}
@@ -7720,12 +7729,14 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
}
if (left_type->Is(Type::None())) {
- Add<HDeoptimize>(Deoptimizer::SOFT);
+ Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation",
+ Deoptimizer::SOFT);
// TODO(rossberg): we should be able to get rid of non-continuous defaults.
left_type = handle(Type::Any(), isolate());
}
if (right_type->Is(Type::None())) {
- Add<HDeoptimize>(Deoptimizer::SOFT);
+ Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
+ Deoptimizer::SOFT);
right_type = handle(Type::Any(), isolate());
}
HInstruction* instr = NULL;
@@ -7957,12 +7968,15 @@ void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
}
-static bool IsLiteralCompareBool(HValue* left,
+static bool IsLiteralCompareBool(Isolate* isolate,
+ HValue* left,
Token::Value op,
HValue* right) {
return op == Token::EQ_STRICT &&
- ((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) ||
- (right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean()));
+ ((left->IsConstant() &&
+ HConstant::cast(left)->handle(isolate)->IsBoolean()) ||
+ (right->IsConstant() &&
+ HConstant::cast(right)->handle(isolate)->IsBoolean()));
}
@@ -8014,7 +8028,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HValue* left = Pop();
Token::Value op = expr->op();
- if (IsLiteralCompareBool(left, op, right)) {
+ if (IsLiteralCompareBool(isolate(), left, op, right)) {
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
result->set_position(expr->position());
@@ -8052,7 +8066,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
- Add<HCheckFunction>(right, target);
+ Add<HCheckValue>(right, target);
HInstanceOfKnownGlobal* result =
new(zone()) HInstanceOfKnownGlobal(context, left, target);
result->set_position(expr->position());
@@ -8075,7 +8089,9 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// Cases handled below depend on collected type feedback. They should
// soft deoptimize when there is no type feedback.
if (combined_type->Is(Type::None())) {
- Add<HDeoptimize>(Deoptimizer::SOFT);
+ Add<HDeoptimize>("Insufficient type feedback for combined type "
+ "of binary operation",
+ Deoptimizer::SOFT);
combined_type = left_type = right_type = handle(Type::Any(), isolate());
}
@@ -8104,7 +8120,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
}
default:
- return Bailout("Unsupported non-primitive compare");
+ return Bailout(kUnsupportedNonPrimitiveCompare);
}
} else if (combined_type->Is(Type::InternalizedString()) &&
Token::IsEqualityOp(op)) {
@@ -8144,21 +8160,23 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
- HIfContinuation continuation;
if (expr->op() == Token::EQ_STRICT) {
- IfBuilder if_nil(this);
- if_nil.If<HCompareObjectEqAndBranch>(
- value, (nil == kNullValue) ? graph()->GetConstantNull()
- : graph()->GetConstantUndefined());
- if_nil.Then();
- if_nil.Else();
- if_nil.CaptureContinuation(&continuation);
+ HConstant* nil_constant = nil == kNullValue
+ ? graph()->GetConstantNull()
+ : graph()->GetConstantUndefined();
+ HCompareObjectEqAndBranch* instr =
+ New<HCompareObjectEqAndBranch>(value, nil_constant);
+ instr->set_position(expr->position());
+ return ast_context()->ReturnControl(instr, expr->id());
+ } else {
+ ASSERT_EQ(Token::EQ, expr->op());
+ Handle<Type> type = expr->combined_type()->Is(Type::None())
+ ? handle(Type::Any(), isolate_)
+ : expr->combined_type();
+ HIfContinuation continuation;
+ BuildCompareNil(value, type, expr->position(), &continuation);
return ast_context()->ReturnContinuation(&continuation, expr->id());
}
- Handle<Type> type = expr->combined_type()->Is(Type::None())
- ? handle(Type::Any(), isolate_) : expr->combined_type();
- BuildCompareNil(value, type, expr->position(), &continuation);
- return ast_context()->ReturnContinuation(&continuation, expr->id());
}
@@ -8175,160 +8193,88 @@ HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
- HValue* context,
Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
- Handle<Object> allocation_site,
- int data_size,
- int pointer_size,
+ Handle<Object> allocation_site_object,
AllocationSiteMode mode) {
NoObservableSideEffectsScope no_effects(this);
- HInstruction* target = NULL;
- HInstruction* data_target = NULL;
-
- if (isolate()->heap()->GetPretenureMode() == TENURED) {
- if (data_size != 0) {
- HValue* size_in_bytes = Add<HConstant>(data_size);
- data_target = Add<HAllocate>(size_in_bytes, HType::JSObject(), TENURED,
- FIXED_DOUBLE_ARRAY_TYPE);
- Handle<Map> free_space_map = isolate()->factory()->free_space_map();
- AddStoreMapConstant(data_target, free_space_map);
- HObjectAccess access =
- HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset);
- Add<HStoreNamedField>(data_target, access, size_in_bytes);
- }
- if (pointer_size != 0) {
- HValue* size_in_bytes = Add<HConstant>(pointer_size);
- target = Add<HAllocate>(size_in_bytes, HType::JSObject(), TENURED,
- JS_OBJECT_TYPE);
- }
- } else {
- InstanceType instance_type = boilerplate_object->map()->instance_type();
- HValue* size_in_bytes = Add<HConstant>(data_size + pointer_size);
- target = Add<HAllocate>(size_in_bytes, HType::JSObject(), NOT_TENURED,
- instance_type);
- }
-
- int offset = 0;
- int data_offset = 0;
- BuildEmitDeepCopy(boilerplate_object, original_boilerplate_object,
- allocation_site, target, &offset, data_target,
- &data_offset, mode);
- return target;
-}
-
+ Handle<FixedArrayBase> elements(boilerplate_object->elements());
+ int object_size = boilerplate_object->map()->instance_size();
+ int object_offset = object_size;
-void HOptimizedGraphBuilder::BuildEmitDeepCopy(
- Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
- Handle<Object> allocation_site_object,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset,
- AllocationSiteMode mode) {
+ InstanceType instance_type = boilerplate_object->map()->instance_type();
bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- boilerplate_object->map()->CanTrackAllocationSite();
+ AllocationSite::CanTrack(instance_type);
// If using allocation sites, then the payload on the site should already
// be filled in as a valid (boilerplate) array.
ASSERT(!create_allocation_site_info ||
AllocationSite::cast(*allocation_site_object)->IsLiteralSite());
- HInstruction* allocation_site = NULL;
-
if (create_allocation_site_info) {
- allocation_site = Add<HConstant>(allocation_site_object);
+ object_size += AllocationMemento::kSize;
}
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(boilerplate_object->elements());
- Handle<FixedArrayBase> original_elements(
- original_boilerplate_object->elements());
- ElementsKind kind = boilerplate_object->map()->elements_kind();
+ ASSERT(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
+ HType type = instance_type == JS_ARRAY_TYPE
+ ? HType::JSArray() : HType::JSObject();
+ HValue* object_size_constant = Add<HConstant>(object_size);
+ HInstruction* object = Add<HAllocate>(object_size_constant, type,
+ isolate()->heap()->GetPretenureMode(), instance_type);
+
+
+ BuildEmitObjectHeader(boilerplate_object, object);
+
+ if (create_allocation_site_info) {
+ HInstruction* allocation_site = Add<HConstant>(allocation_site_object);
+ BuildCreateAllocationMemento(object, object_offset, allocation_site);
+ }
- int object_offset = *offset;
- int object_size = boilerplate_object->map()->instance_size();
int elements_size = (elements->length() > 0 &&
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
- int elements_offset = 0;
- if (data_target != NULL && boilerplate_object->HasFastDoubleElements()) {
- elements_offset = *data_offset;
- *data_offset += elements_size;
- } else {
- // Place elements right after this object.
- elements_offset = *offset + object_size;
- *offset += elements_size;
+ HInstruction* object_elements = NULL;
+ if (elements_size > 0) {
+ HValue* object_elements_size = Add<HConstant>(elements_size);
+ if (boilerplate_object->HasFastDoubleElements()) {
+ object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
+ isolate()->heap()->GetPretenureMode(), FIXED_DOUBLE_ARRAY_TYPE);
+ } else {
+ object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
+ isolate()->heap()->GetPretenureMode(), FIXED_ARRAY_TYPE);
+ }
}
- // Increase the offset so that subsequent objects end up right after this
- // object (and it's elements if they are allocated in the same space).
- *offset += object_size;
+ BuildInitElementsInObjectHeader(boilerplate_object, object, object_elements);
+
// Copy object elements if non-COW.
- HValue* object_elements = BuildEmitObjectHeader(boilerplate_object, target,
- data_target, object_offset, elements_offset, elements_size);
if (object_elements != NULL) {
- BuildEmitElements(elements, original_elements, kind, object_elements,
- target, offset, data_target, data_offset);
+ BuildEmitElements(boilerplate_object, elements, object_elements);
}
// Copy in-object properties.
if (boilerplate_object->map()->NumberOfFields() != 0) {
- HValue* object_properties =
- Add<HInnerAllocatedObject>(target, object_offset);
- BuildEmitInObjectProperties(boilerplate_object, original_boilerplate_object,
- object_properties, target, offset, data_target, data_offset);
- }
-
- // Create allocation site info.
- if (mode == TRACK_ALLOCATION_SITE &&
- boilerplate_object->map()->CanTrackAllocationSite()) {
- elements_offset += AllocationMemento::kSize;
- *offset += AllocationMemento::kSize;
- BuildCreateAllocationMemento(target, JSArray::kSize, allocation_site);
+ BuildEmitInObjectProperties(boilerplate_object, object);
}
+ return object;
}
-HValue* HOptimizedGraphBuilder::BuildEmitObjectHeader(
+void HOptimizedGraphBuilder::BuildEmitObjectHeader(
Handle<JSObject> boilerplate_object,
- HInstruction* target,
- HInstruction* data_target,
- int object_offset,
- int elements_offset,
- int elements_size) {
+ HInstruction* object) {
ASSERT(boilerplate_object->properties()->length() == 0);
- HValue* result = NULL;
- HValue* object_header = Add<HInnerAllocatedObject>(target, object_offset);
Handle<Map> boilerplate_object_map(boilerplate_object->map());
- AddStoreMapConstant(object_header, boilerplate_object_map);
-
- HInstruction* elements;
- if (elements_size == 0) {
- Handle<Object> elements_field =
- Handle<Object>(boilerplate_object->elements(), isolate());
- elements = Add<HConstant>(elements_field);
- } else {
- if (data_target != NULL && boilerplate_object->HasFastDoubleElements()) {
- elements = Add<HInnerAllocatedObject>(data_target, elements_offset);
- } else {
- elements = Add<HInnerAllocatedObject>(target, elements_offset);
- }
- result = elements;
- }
- Add<HStoreNamedField>(object_header, HObjectAccess::ForElementsPointer(),
- elements);
+ AddStoreMapConstant(object, boilerplate_object_map);
Handle<Object> properties_field =
Handle<Object>(boilerplate_object->properties(), isolate());
ASSERT(*properties_field == isolate()->heap()->empty_fixed_array());
HInstruction* properties = Add<HConstant>(properties_field);
HObjectAccess access = HObjectAccess::ForPropertiesPointer();
- Add<HStoreNamedField>(object_header, access, properties);
+ Add<HStoreNamedField>(object, access, properties);
if (boilerplate_object->IsJSArray()) {
Handle<JSArray> boilerplate_array =
@@ -8338,22 +8284,30 @@ HValue* HOptimizedGraphBuilder::BuildEmitObjectHeader(
HInstruction* length = Add<HConstant>(length_field);
ASSERT(boilerplate_array->length()->IsSmi());
- Add<HStoreNamedField>(object_header, HObjectAccess::ForArrayLength(
+ Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(
boilerplate_array->GetElementsKind()), length);
}
+}
- return result;
+
+void HOptimizedGraphBuilder::BuildInitElementsInObjectHeader(
+ Handle<JSObject> boilerplate_object,
+ HInstruction* object,
+ HInstruction* object_elements) {
+ ASSERT(boilerplate_object->properties()->length() == 0);
+ if (object_elements == NULL) {
+ Handle<Object> elements_field =
+ Handle<Object>(boilerplate_object->elements(), isolate());
+ object_elements = Add<HConstant>(elements_field);
+ }
+ Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+ object_elements);
}
void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
- HValue* object_properties,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset) {
+ HInstruction* object) {
Handle<DescriptorArray> descriptors(
boilerplate_object->map()->instance_descriptors());
int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
@@ -8377,31 +8331,20 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- Handle<JSObject> original_value_object = Handle<JSObject>::cast(
- Handle<Object>(original_boilerplate_object->InObjectPropertyAt(index),
- isolate()));
- HInstruction* value_instruction = Add<HInnerAllocatedObject>(target,
- *offset);
-
- Add<HStoreNamedField>(object_properties, access, value_instruction);
- BuildEmitDeepCopy(value_object, original_value_object,
- Handle<Object>::null(), target,
- offset, data_target, data_offset,
- DONT_TRACK_ALLOCATION_SITE);
+ HInstruction* result =
+ BuildFastLiteral(value_object,
+ Handle<Object>::null(), DONT_TRACK_ALLOCATION_SITE);
+ Add<HStoreNamedField>(object, access, result);
} else {
Representation representation = details.representation();
HInstruction* value_instruction = Add<HConstant>(value);
if (representation.IsDouble()) {
// Allocate a HeapNumber box and store the value into it.
- HInstruction* double_box;
- if (data_target != NULL) {
- double_box = Add<HInnerAllocatedObject>(data_target, *data_offset);
- *data_offset += HeapNumber::kSize;
- } else {
- double_box = Add<HInnerAllocatedObject>(target, *offset);
- *offset += HeapNumber::kSize;
- }
+ HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
+ HInstruction* double_box =
+ Add<HAllocate>(heap_number_constant, HType::HeapNumber(),
+ isolate()->heap()->GetPretenureMode(), HEAP_NUMBER_TYPE);
AddStoreMapConstant(double_box,
isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(),
@@ -8409,7 +8352,7 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
value_instruction = double_box;
}
- Add<HStoreNamedField>(object_properties, access, value_instruction);
+ Add<HStoreNamedField>(object, access, value_instruction);
}
}
@@ -8420,31 +8363,25 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
ASSERT(boilerplate_object->IsJSObject());
int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
HObjectAccess access = HObjectAccess::ForJSObjectOffset(property_offset);
- Add<HStoreNamedField>(object_properties, access, value_instruction);
+ Add<HStoreNamedField>(object, access, value_instruction);
}
}
void HOptimizedGraphBuilder::BuildEmitElements(
+ Handle<JSObject> boilerplate_object,
Handle<FixedArrayBase> elements,
- Handle<FixedArrayBase> original_elements,
- ElementsKind kind,
- HValue* object_elements,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset) {
+ HValue* object_elements) {
+ ElementsKind kind = boilerplate_object->map()->elements_kind();
int elements_length = elements->length();
HValue* object_elements_length = Add<HConstant>(elements_length);
-
BuildInitializeElementsHeader(object_elements, kind, object_elements_length);
// Copy elements backing store content.
if (elements->IsFixedDoubleArray()) {
BuildEmitFixedDoubleArray(elements, kind, object_elements);
} else if (elements->IsFixedArray()) {
- BuildEmitFixedArray(elements, original_elements, kind, object_elements,
- target, offset, data_target, data_offset);
+ BuildEmitFixedArray(elements, kind, object_elements);
} else {
UNREACHABLE();
}
@@ -8472,32 +8409,20 @@ void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
void HOptimizedGraphBuilder::BuildEmitFixedArray(
Handle<FixedArrayBase> elements,
- Handle<FixedArrayBase> original_elements,
ElementsKind kind,
- HValue* object_elements,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset) {
+ HValue* object_elements) {
HInstruction* boilerplate_elements = Add<HConstant>(elements);
int elements_length = elements->length();
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- Handle<FixedArray> original_fast_elements =
- Handle<FixedArray>::cast(original_elements);
for (int i = 0; i < elements_length; i++) {
Handle<Object> value(fast_elements->get(i), isolate());
HValue* key_constant = Add<HConstant>(i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- Handle<JSObject> original_value_object = Handle<JSObject>::cast(
- Handle<Object>(original_fast_elements->get(i), isolate()));
- HInstruction* value_instruction = Add<HInnerAllocatedObject>(target,
- *offset);
- Add<HStoreKeyed>(object_elements, key_constant, value_instruction, kind);
- BuildEmitDeepCopy(value_object, original_value_object,
- Handle<Object>::null(), target,
- offset, data_target, data_offset,
- DONT_TRACK_ALLOCATION_SITE);
+ HInstruction* result =
+ BuildFastLiteral(value_object,
+ Handle<Object>::null(), DONT_TRACK_ALLOCATION_SITE);
+ Add<HStoreKeyed>(object_elements, key_constant, result, kind);
} else {
HInstruction* value_instruction =
Add<HLoadKeyed>(boilerplate_elements, key_constant,
@@ -8567,7 +8492,7 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
}
break;
case Variable::LOOKUP:
- return Bailout("unsupported lookup slot in declaration");
+ return Bailout(kUnsupportedLookupSlotInDeclaration);
}
}
@@ -8605,7 +8530,7 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
break;
}
case Variable::LOOKUP:
- return Bailout("unsupported lookup slot in declaration");
+ return Bailout(kUnsupportedLookupSlotInDeclaration);
}
}
@@ -8726,7 +8651,7 @@ void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
- return Bailout("inlined runtime function: IsNonNegativeSmi");
+ return Bailout(kInlinedRuntimeFunctionIsNonNegativeSmi);
}
@@ -8742,8 +8667,7 @@ void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
CallRuntime* call) {
- return Bailout(
- "inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
+ return Bailout(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf);
}
@@ -8797,7 +8721,7 @@ void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
// The special form detected by IsClassOfTest is detected before we get here
// and does not cause a bailout.
- return Bailout("inlined runtime function: ClassOf");
+ return Bailout(kInlinedRuntimeFunctionClassOf);
}
@@ -9014,7 +8938,7 @@ void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
// Support for fast native caches.
void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
- return Bailout("inlined runtime function: GetFromCache");
+ return Bailout(kInlinedRuntimeFunctionGetFromCache);
}
@@ -9144,7 +9068,7 @@ void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
// Check whether two RegExps are equivalent
void HOptimizedGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
- return Bailout("inlined runtime function: IsRegExpEquivalent");
+ return Bailout(kInlinedRuntimeFunctionIsRegExpEquivalent);
}
@@ -9158,18 +9082,18 @@ void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
- return Bailout("inlined runtime function: FastAsciiArrayJoin");
+ return Bailout(kInlinedRuntimeFunctionFastAsciiArrayJoin);
}
// Support for generators.
void HOptimizedGraphBuilder::GenerateGeneratorNext(CallRuntime* call) {
- return Bailout("inlined runtime function: GeneratorNext");
+ return Bailout(kInlinedRuntimeFunctionGeneratorNext);
}
void HOptimizedGraphBuilder::GenerateGeneratorThrow(CallRuntime* call) {
- return Bailout("inlined runtime function: GeneratorThrow");
+ return Bailout(kInlinedRuntimeFunctionGeneratorThrow);
}
@@ -9498,7 +9422,7 @@ void HTracer::TraceCompilation(CompilationInfo* info) {
void HTracer::TraceLithium(const char* name, LChunk* chunk) {
- ASSERT(!FLAG_parallel_recompilation);
+ ASSERT(!FLAG_concurrent_recompilation);
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, chunk->graph(), chunk);
@@ -9506,7 +9430,7 @@ void HTracer::TraceLithium(const char* name, LChunk* chunk) {
void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
- ASSERT(!FLAG_parallel_recompilation);
+ ASSERT(!FLAG_concurrent_recompilation);
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, graph, NULL);
@@ -9716,15 +9640,15 @@ void HStatistics::Initialize(CompilationInfo* info) {
void HStatistics::Print() {
PrintF("Timing results:\n");
- int64_t sum = 0;
- for (int i = 0; i < timing_.length(); ++i) {
- sum += timing_[i];
+ TimeDelta sum;
+ for (int i = 0; i < times_.length(); ++i) {
+ sum += times_[i];
}
for (int i = 0; i < names_.length(); ++i) {
PrintF("%32s", names_[i]);
- double ms = static_cast<double>(timing_[i]) / 1000;
- double percent = static_cast<double>(timing_[i]) * 100 / sum;
+ double ms = times_[i].InMillisecondsF();
+ double percent = times_[i].PercentOf(sum);
PrintF(" %8.3f ms / %4.1f %% ", ms, percent);
unsigned size = sizes_[i];
@@ -9734,29 +9658,29 @@ void HStatistics::Print() {
PrintF("----------------------------------------"
"---------------------------------------\n");
- int64_t total = create_graph_ + optimize_graph_ + generate_code_;
+ TimeDelta total = create_graph_ + optimize_graph_ + generate_code_;
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Create graph",
- static_cast<double>(create_graph_) / 1000,
- static_cast<double>(create_graph_) * 100 / total);
+ create_graph_.InMillisecondsF(),
+ create_graph_.PercentOf(total));
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Optimize graph",
- static_cast<double>(optimize_graph_) / 1000,
- static_cast<double>(optimize_graph_) * 100 / total);
+ optimize_graph_.InMillisecondsF(),
+ optimize_graph_.PercentOf(total));
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Generate and install code",
- static_cast<double>(generate_code_) / 1000,
- static_cast<double>(generate_code_) * 100 / total);
+ generate_code_.InMillisecondsF(),
+ generate_code_.PercentOf(total));
PrintF("----------------------------------------"
"---------------------------------------\n");
PrintF("%32s %8.3f ms (%.1f times slower than full code gen)\n",
"Total",
- static_cast<double>(total) / 1000,
- static_cast<double>(total) / full_code_gen_);
+ total.InMillisecondsF(),
+ total.TimesOf(full_code_gen_));
double source_size_in_kb = static_cast<double>(source_size_) / 1024;
double normalized_time = source_size_in_kb > 0
- ? (static_cast<double>(total) / 1000) / source_size_in_kb
+ ? total.InMillisecondsF() / source_size_in_kb
: 0;
double normalized_size_in_kb = source_size_in_kb > 0
? total_size_ / 1024 / source_size_in_kb
@@ -9767,17 +9691,17 @@ void HStatistics::Print() {
}
-void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
+void HStatistics::SaveTiming(const char* name, TimeDelta time, unsigned size) {
total_size_ += size;
for (int i = 0; i < names_.length(); ++i) {
if (strcmp(names_[i], name) == 0) {
- timing_[i] += ticks;
+ times_[i] += time;
sizes_[i] += size;
return;
}
}
names_.Add(name);
- timing_.Add(ticks);
+ times_.Add(time);
sizes_.Add(size);
}
diff --git a/chromium/v8/src/hydrogen.h b/chromium/v8/src/hydrogen.h
index 20dc1a3e0ec..0ecbbca1bf6 100644
--- a/chromium/v8/src/hydrogen.h
+++ b/chromium/v8/src/hydrogen.h
@@ -53,10 +53,10 @@ class LChunk;
class LiveRange;
-class HBasicBlock: public ZoneObject {
+class HBasicBlock V8_FINAL : public ZoneObject {
public:
explicit HBasicBlock(HGraph* graph);
- virtual ~HBasicBlock() { }
+ ~HBasicBlock() { }
// Simple accessors.
int block_id() const { return block_id_; }
@@ -220,7 +220,7 @@ class HBasicBlock: public ZoneObject {
};
-class HPredecessorIterator BASE_EMBEDDED {
+class HPredecessorIterator V8_FINAL BASE_EMBEDDED {
public:
explicit HPredecessorIterator(HBasicBlock* block)
: predecessor_list_(block->predecessors()), current_(0) { }
@@ -235,7 +235,7 @@ class HPredecessorIterator BASE_EMBEDDED {
};
-class HInstructionIterator BASE_EMBEDDED {
+class HInstructionIterator V8_FINAL BASE_EMBEDDED {
public:
explicit HInstructionIterator(HBasicBlock* block)
: instr_(block->first()) {
@@ -255,7 +255,7 @@ class HInstructionIterator BASE_EMBEDDED {
};
-class HLoopInformation: public ZoneObject {
+class HLoopInformation V8_FINAL : public ZoneObject {
public:
HLoopInformation(HBasicBlock* loop_header, Zone* zone)
: back_edges_(4, zone),
@@ -264,7 +264,7 @@ class HLoopInformation: public ZoneObject {
stack_check_(NULL) {
blocks_.Add(loop_header, zone);
}
- virtual ~HLoopInformation() {}
+ ~HLoopInformation() {}
const ZoneList<HBasicBlock*>* back_edges() const { return &back_edges_; }
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
@@ -303,7 +303,7 @@ class HLoopInformation: public ZoneObject {
class BoundsCheckTable;
class InductionVariableBlocksTable;
-class HGraph: public ZoneObject {
+class HGraph V8_FINAL : public ZoneObject {
public:
explicit HGraph(CompilationInfo* info);
@@ -320,7 +320,6 @@ class HGraph: public ZoneObject {
bool ProcessArgumentsObject();
void OrderBlocks();
void AssignDominators();
- void SetupInformativeDefinitions();
void RestoreActualValues();
// Returns false if there are phi-uses of the arguments-object
@@ -334,9 +333,9 @@ class HGraph: public ZoneObject {
void CollectPhis();
void set_undefined_constant(HConstant* constant) {
- undefined_constant_.set(constant);
+ constant_undefined_.set(constant);
}
- HConstant* GetConstantUndefined() const { return undefined_constant_.get(); }
+ HConstant* GetConstantUndefined() const { return constant_undefined_.get(); }
HConstant* GetConstant0();
HConstant* GetConstant1();
HConstant* GetConstantMinus1();
@@ -346,6 +345,14 @@ class HGraph: public ZoneObject {
HConstant* GetConstantNull();
HConstant* GetInvalidContext();
+ bool IsConstantUndefined(HConstant* constant);
+ bool IsConstant0(HConstant* constant);
+ bool IsConstant1(HConstant* constant);
+ bool IsConstantMinus1(HConstant* constant);
+ bool IsConstantTrue(HConstant* constant);
+ bool IsConstantFalse(HConstant* constant);
+ bool IsConstantHole(HConstant* constant);
+ bool IsConstantNull(HConstant* constant);
bool IsStandardConstant(HConstant* constant);
HBasicBlock* CreateBasicBlock();
@@ -360,6 +367,7 @@ class HGraph: public ZoneObject {
int GetMaximumValueID() const { return values_.length(); }
int GetNextBlockID() { return next_block_id_++; }
int GetNextValueID(HValue* value) {
+ ASSERT(!disallow_adding_new_values_);
values_.Add(value, zone());
return values_.length() - 1;
}
@@ -367,8 +375,11 @@ class HGraph: public ZoneObject {
if (id >= 0 && id < values_.length()) return values_[id];
return NULL;
}
+ void DisallowAddingNewValues() {
+ disallow_adding_new_values_ = true;
+ }
- bool Optimize(SmartArrayPointer<char>* bailout_reason);
+ bool Optimize(BailoutReason* bailout_reason);
#ifdef DEBUG
void Verify(bool do_full_verify) const;
@@ -468,9 +479,6 @@ class HGraph: public ZoneObject {
phase.Run();
}
- void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
- void SetupInformativeDefinitionsInBlock(HBasicBlock* block);
- void SetupInformativeDefinitionsRecursively(HBasicBlock* block);
void EliminateRedundantBoundsChecksUsingInductionVariables();
Isolate* isolate_;
@@ -481,7 +489,7 @@ class HGraph: public ZoneObject {
ZoneList<HValue*> values_;
ZoneList<HPhi*>* phi_list_;
ZoneList<HInstruction*>* uint32_instructions_;
- SetOncePointer<HConstant> undefined_constant_;
+ SetOncePointer<HConstant> constant_undefined_;
SetOncePointer<HConstant> constant_0_;
SetOncePointer<HConstant> constant_1_;
SetOncePointer<HConstant> constant_minus1_;
@@ -504,6 +512,7 @@ class HGraph: public ZoneObject {
int type_change_checksum_;
int maximum_environment_size_;
int no_side_effects_scope_count_;
+ bool disallow_adding_new_values_;
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -523,7 +532,7 @@ enum FrameType {
};
-class HEnvironment: public ZoneObject {
+class HEnvironment V8_FINAL : public ZoneObject {
public:
HEnvironment(HEnvironment* outer,
Scope* scope,
@@ -557,9 +566,6 @@ class HEnvironment: public ZoneObject {
void set_entry(HEnterInlined* entry) { entry_ = entry; }
int length() const { return values_.length(); }
- bool is_special_index(int i) const {
- return i >= parameter_count() && i < parameter_count() + specials_count();
- }
int first_expression_index() const {
return parameter_count() + specials_count() + local_count();
@@ -678,8 +684,15 @@ class HEnvironment: public ZoneObject {
}
bool is_local_index(int i) const {
- return i >= first_local_index() &&
- i < first_expression_index();
+ return i >= first_local_index() && i < first_expression_index();
+ }
+
+ bool is_parameter_index(int i) const {
+ return i >= 0 && i < parameter_count();
+ }
+
+ bool is_special_index(int i) const {
+ return i >= parameter_count() && i < parameter_count() + specials_count();
}
void PrintTo(StringStream* stream);
@@ -793,33 +806,37 @@ class AstContext {
};
-class EffectContext: public AstContext {
+class EffectContext V8_FINAL : public AstContext {
public:
explicit EffectContext(HOptimizedGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {
}
virtual ~EffectContext();
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
+ virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+ virtual void ReturnInstruction(HInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
+ virtual void ReturnControl(HControlInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id);
+ BailoutId ast_id) V8_OVERRIDE;
};
-class ValueContext: public AstContext {
+class ValueContext V8_FINAL : public AstContext {
public:
ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
: AstContext(owner, Expression::kValue), flag_(flag) {
}
virtual ~ValueContext();
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
+ virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+ virtual void ReturnInstruction(HInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
+ virtual void ReturnControl(HControlInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id);
+ BailoutId ast_id) V8_OVERRIDE;
bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
@@ -828,7 +845,7 @@ class ValueContext: public AstContext {
};
-class TestContext: public AstContext {
+class TestContext V8_FINAL : public AstContext {
public:
TestContext(HOptimizedGraphBuilder* owner,
Expression* condition,
@@ -840,11 +857,13 @@ class TestContext: public AstContext {
if_false_(if_false) {
}
- virtual void ReturnValue(HValue* value);
- virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id);
- virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id);
+ virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+ virtual void ReturnInstruction(HInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
+ virtual void ReturnControl(HControlInstruction* instr,
+ BailoutId ast_id) V8_OVERRIDE;
virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id);
+ BailoutId ast_id) V8_OVERRIDE;
static TestContext* cast(AstContext* context) {
ASSERT(context->IsTest());
@@ -866,7 +885,7 @@ class TestContext: public AstContext {
};
-class FunctionState {
+class FunctionState V8_FINAL {
public:
FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
@@ -933,7 +952,7 @@ class FunctionState {
};
-class HIfContinuation {
+class HIfContinuation V8_FINAL {
public:
HIfContinuation() { continuation_captured_ = false; }
~HIfContinuation() { ASSERT(!continuation_captured_); }
@@ -1056,7 +1075,7 @@ class HGraphBuilder {
template<class I, class P1, class P2>
I* Add(P1 p1, P2 p2) {
- return static_cast<I*>(AddUncasted<I>(p1, p2));
+ return I::cast(AddUncasted<I>(p1, p2));
}
template<class I, class P1, class P2, class P3>
@@ -1223,10 +1242,9 @@ class HGraphBuilder {
bool is_jsarray);
HInstruction* BuildUncheckedMonomorphicElementAccess(
- HValue* object,
+ HValue* checked_object,
HValue* key,
HValue* val,
- HCheckMaps* mapcheck,
bool is_js_array,
ElementsKind elements_kind,
bool is_store,
@@ -1251,13 +1269,10 @@ class HGraphBuilder {
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode);
- HLoadNamedField* BuildLoadNamedField(
- HValue* object,
- HObjectAccess access,
- HValue* typecheck);
- HInstruction* BuildLoadStringLength(HValue* object, HValue* typecheck);
- HStoreNamedField* AddStoreMapConstant(HValue *object, Handle<Map>);
- HLoadNamedField* AddLoadElements(HValue *object, HValue *typecheck);
+ HLoadNamedField* BuildLoadNamedField(HValue* object, HObjectAccess access);
+ HInstruction* BuildLoadStringLength(HValue* object, HValue* checked_value);
+ HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map>);
+ HLoadNamedField* AddLoadElements(HValue* object);
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin);
@@ -1266,12 +1281,13 @@ class HGraphBuilder {
void PushAndAdd(HInstruction* instr);
- void FinishExitWithHardDeoptimization(HBasicBlock* continuation);
+ void FinishExitWithHardDeoptimization(const char* reason,
+ HBasicBlock* continuation);
void AddIncrementCounter(StatsCounter* counter,
HValue* context);
- class IfBuilder {
+ class IfBuilder V8_FINAL {
public:
explicit IfBuilder(HGraphBuilder* builder,
int position = RelocInfo::kNoPosition);
@@ -1370,10 +1386,10 @@ class HGraphBuilder {
void Else();
void End();
- void Deopt();
- void ElseDeopt() {
+ void Deopt(const char* reason);
+ void ElseDeopt(const char* reason) {
Else();
- Deopt();
+ Deopt(reason);
}
void Return(HValue* value);
@@ -1401,7 +1417,7 @@ class HGraphBuilder {
HBasicBlock* merge_block_;
};
- class LoopBuilder {
+ class LoopBuilder V8_FINAL {
public:
enum Direction {
kPreIncrement,
@@ -1413,6 +1429,11 @@ class HGraphBuilder {
LoopBuilder(HGraphBuilder* builder,
HValue* context,
Direction direction);
+ LoopBuilder(HGraphBuilder* builder,
+ HValue* context,
+ Direction direction,
+ HValue* increment_amount);
+
~LoopBuilder() {
ASSERT(finished_);
}
@@ -1421,6 +1442,9 @@ class HGraphBuilder {
HValue* initial,
HValue* terminating,
Token::Value token);
+
+ void Break();
+
void EndBody();
private:
@@ -1428,11 +1452,13 @@ class HGraphBuilder {
HGraphBuilder* builder_;
HValue* context_;
+ HValue* increment_amount_;
HInstruction* increment_;
HPhi* phi_;
HBasicBlock* header_block_;
HBasicBlock* body_block_;
HBasicBlock* exit_block_;
+ HBasicBlock* exit_trampoline_block_;
Direction direction_;
bool finished_;
};
@@ -1442,7 +1468,7 @@ class HGraphBuilder {
void BuildNewSpaceArrayCheck(HValue* length,
ElementsKind kind);
- class JSArrayBuilder {
+ class JSArrayBuilder V8_FINAL {
public:
JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
@@ -1532,9 +1558,6 @@ class HGraphBuilder {
ElementsKind kind,
int length);
- HInstruction* BuildUnaryMathOp(
- HValue* value, Handle<Type> type, Token::Value token);
-
void BuildCompareNil(
HValue* value,
Handle<Type> type,
@@ -1545,6 +1568,11 @@ class HGraphBuilder {
int previous_object_size,
HValue* payload);
+ HInstruction* BuildConstantMapCheck(Handle<JSObject> constant,
+ CompilationInfo* info);
+ HInstruction* BuildCheckPrototypeMaps(Handle<JSObject> prototype,
+ Handle<JSObject> holder);
+
HInstruction* BuildGetNativeContext();
HInstruction* BuildGetArrayFunction();
@@ -1562,13 +1590,13 @@ class HGraphBuilder {
template<>
inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
- Deoptimizer::BailoutType type) {
+ const char* reason, Deoptimizer::BailoutType type) {
if (type == Deoptimizer::SOFT) {
isolate()->counters()->soft_deopts_requested()->Increment();
if (FLAG_always_opt) return NULL;
}
if (current_block()->IsDeoptimizing()) return NULL;
- HDeoptimize* instr = New<HDeoptimize>(type);
+ HDeoptimize* instr = New<HDeoptimize>(reason, type);
AddInstruction(instr);
if (type == Deoptimizer::SOFT) {
isolate()->counters()->soft_deopts_inserted()->Increment();
@@ -1581,8 +1609,8 @@ inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
template<>
inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
- Deoptimizer::BailoutType type) {
- return static_cast<HDeoptimize*>(AddUncasted<HDeoptimize>(type));
+ const char* reason, Deoptimizer::BailoutType type) {
+ return static_cast<HDeoptimize*>(AddUncasted<HDeoptimize>(reason, type));
}
@@ -1597,22 +1625,6 @@ inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(
template<>
-inline HInstruction* HGraphBuilder::NewUncasted<HLoadNamedField>(
- HValue* object, HObjectAccess access) {
- return NewUncasted<HLoadNamedField>(object, access,
- static_cast<HValue*>(NULL));
-}
-
-
-template<>
-inline HInstruction* HGraphBuilder::AddUncasted<HLoadNamedField>(
- HValue* object, HObjectAccess access) {
- return AddUncasted<HLoadNamedField>(object, access,
- static_cast<HValue*>(NULL));
-}
-
-
-template<>
inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(BailoutId id) {
return AddUncasted<HSimulate>(id, FIXED_SIMULATE);
}
@@ -1640,12 +1652,13 @@ inline HInstruction* HGraphBuilder::NewUncasted<HContext>() {
}
-class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
+class HOptimizedGraphBuilder V8_FINAL
+ : public HGraphBuilder, public AstVisitor {
public:
// A class encapsulating (lazily-allocated) break and continue blocks for
// a breakable statement. Separated from BreakAndContinueScope so that it
// can have a separate lifetime.
- class BreakAndContinueInfo BASE_EMBEDDED {
+ class BreakAndContinueInfo V8_FINAL BASE_EMBEDDED {
public:
explicit BreakAndContinueInfo(BreakableStatement* target,
int drop_extra = 0)
@@ -1671,7 +1684,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
// A helper class to maintain a stack of current BreakAndContinueInfo
// structures mirroring BreakableStatement nesting.
- class BreakAndContinueScope BASE_EMBEDDED {
+ class BreakAndContinueScope V8_FINAL BASE_EMBEDDED {
public:
BreakAndContinueScope(BreakAndContinueInfo* info,
HOptimizedGraphBuilder* owner)
@@ -1697,7 +1710,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
explicit HOptimizedGraphBuilder(CompilationInfo* info);
- virtual bool BuildGraph();
+ virtual bool BuildGraph() V8_OVERRIDE;
// Simple accessors.
BreakAndContinueScope* break_scope() const { return break_scope_; }
@@ -1707,7 +1720,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* context() { return environment()->context(); }
- void Bailout(const char* reason);
+ void Bailout(BailoutReason reason);
HBasicBlock* CreateJoin(HBasicBlock* first,
HBasicBlock* second,
@@ -1788,8 +1801,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
void VisitDelete(UnaryOperation* expr);
void VisitVoid(UnaryOperation* expr);
void VisitTypeof(UnaryOperation* expr);
- void VisitSub(UnaryOperation* expr);
- void VisitBitNot(UnaryOperation* expr);
void VisitNot(UnaryOperation* expr);
void VisitComma(BinaryOperation* expr);
@@ -1885,9 +1896,9 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
void SetUpScope(Scope* scope);
- virtual void VisitStatements(ZoneList<Statement*>* statements);
+ virtual void VisitStatements(ZoneList<Statement*>* statements) V8_OVERRIDE;
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node) V8_OVERRIDE;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
@@ -1917,7 +1928,9 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
bool TryInlineCall(Call* expr, bool drop_extra = false);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
- bool TryInlineGetter(Handle<JSFunction> getter, Property* prop);
+ bool TryInlineGetter(Handle<JSFunction> getter,
+ BailoutId ast_id,
+ BailoutId return_id);
bool TryInlineSetter(Handle<JSFunction> setter,
BailoutId id,
BailoutId assignment_id,
@@ -1945,26 +1958,24 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicLoadNamedField(Property* expr,
+ void HandlePolymorphicLoadNamedField(int position,
+ BailoutId return_id,
HValue* object,
SmallMapList* types,
Handle<String> name);
- HInstruction* TryLoadPolymorphicAsMonomorphic(Property* expr,
- HValue* object,
+ HInstruction* TryLoadPolymorphicAsMonomorphic(HValue* object,
SmallMapList* types,
Handle<String> name);
void HandlePolymorphicStoreNamedField(int position,
BailoutId assignment_id,
HValue* object,
HValue* value,
- HValue* result,
SmallMapList* types,
Handle<String> name);
bool TryStorePolymorphicAsMonomorphic(int position,
BailoutId assignment_id,
HValue* object,
HValue* value,
- HValue* result,
SmallMapList* types,
Handle<String> name);
void HandlePolymorphicCallNamed(Call* expr,
@@ -1997,6 +2008,8 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* val,
SmallMapList* maps);
+ LoadKeyedHoleMode BuildKeyedHoleMode(Handle<Map> map);
+
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
@@ -2008,7 +2021,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* HandlePolymorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
- Expression* prop,
+ SmallMapList* maps,
BailoutId ast_id,
int position,
bool is_store,
@@ -2033,19 +2046,31 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
Handle<JSObject> holder);
HInstruction* BuildLoadNamedMonomorphic(HValue* object,
Handle<String> name,
- Property* expr,
Handle<Map> map);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
- void BuildStoreNamed(Expression* expression,
- BailoutId id,
- int position,
- BailoutId assignment_id,
- Property* prop,
- HValue* object,
- HValue* store_value,
- HValue* result_value);
+ void BuildLoad(Property* property,
+ int position,
+ BailoutId ast_id);
+ void PushLoad(Property* property,
+ HValue* object,
+ HValue* key,
+ int position);
+
+ void BuildStoreForEffect(Expression* expression,
+ Property* prop,
+ BailoutId ast_id,
+ BailoutId return_id,
+ HValue* object,
+ HValue* key,
+ HValue* value);
+
+ void BuildStore(Expression* expression,
+ Property* prop,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool is_uninitialized = false);
HInstruction* BuildStoreNamedField(HValue* object,
Handle<String> name,
@@ -2067,60 +2092,31 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HInstruction* BuildThisFunction();
- HInstruction* BuildFastLiteral(HValue* context,
- Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
+ HInstruction* BuildFastLiteral(Handle<JSObject> boilerplate_object,
Handle<Object> allocation_site,
- int data_size,
- int pointer_size,
AllocationSiteMode mode);
- void BuildEmitDeepCopy(Handle<JSObject> boilerplat_object,
- Handle<JSObject> object,
- Handle<Object> allocation_site,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset,
- AllocationSiteMode mode);
-
- MUST_USE_RESULT HValue* BuildEmitObjectHeader(
- Handle<JSObject> boilerplat_object,
- HInstruction* target,
- HInstruction* data_target,
- int object_offset,
- int elements_offset,
- int elements_size);
+ void BuildEmitObjectHeader(Handle<JSObject> boilerplate_object,
+ HInstruction* object);
+
+ void BuildInitElementsInObjectHeader(Handle<JSObject> boilerplate_object,
+ HInstruction* object,
+ HInstruction* object_elements);
void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
- HValue* object_properties,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset);
-
- void BuildEmitElements(Handle<FixedArrayBase> elements,
- Handle<FixedArrayBase> original_elements,
- ElementsKind kind,
- HValue* object_elements,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset);
+ HInstruction* object);
+
+ void BuildEmitElements(Handle<JSObject> boilerplate_object,
+ Handle<FixedArrayBase> elements,
+ HValue* object_elements);
void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
ElementsKind kind,
HValue* object_elements);
void BuildEmitFixedArray(Handle<FixedArrayBase> elements,
- Handle<FixedArrayBase> original_elements,
ElementsKind kind,
- HValue* object_elements,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset);
+ HValue* object_elements);
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
@@ -2166,44 +2162,40 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
Zone* AstContext::zone() const { return owner_->zone(); }
-class HStatistics: public Malloced {
+class HStatistics V8_FINAL: public Malloced {
public:
HStatistics()
- : timing_(5),
+ : times_(5),
names_(5),
sizes_(5),
- create_graph_(0),
- optimize_graph_(0),
- generate_code_(0),
total_size_(0),
- full_code_gen_(0),
source_size_(0) { }
void Initialize(CompilationInfo* info);
void Print();
- void SaveTiming(const char* name, int64_t ticks, unsigned size);
+ void SaveTiming(const char* name, TimeDelta time, unsigned size);
- void IncrementFullCodeGen(int64_t full_code_gen) {
+ void IncrementFullCodeGen(TimeDelta full_code_gen) {
full_code_gen_ += full_code_gen;
}
- void IncrementSubtotals(int64_t create_graph,
- int64_t optimize_graph,
- int64_t generate_code) {
+ void IncrementSubtotals(TimeDelta create_graph,
+ TimeDelta optimize_graph,
+ TimeDelta generate_code) {
create_graph_ += create_graph;
optimize_graph_ += optimize_graph;
generate_code_ += generate_code;
}
private:
- List<int64_t> timing_;
+ List<TimeDelta> times_;
List<const char*> names_;
List<unsigned> sizes_;
- int64_t create_graph_;
- int64_t optimize_graph_;
- int64_t generate_code_;
+ TimeDelta create_graph_;
+ TimeDelta optimize_graph_;
+ TimeDelta generate_code_;
unsigned total_size_;
- int64_t full_code_gen_;
+ TimeDelta full_code_gen_;
double source_size_;
};
@@ -2225,7 +2217,7 @@ class HPhase : public CompilationPhase {
};
-class HTracer: public Malloced {
+class HTracer V8_FINAL : public Malloced {
public:
explicit HTracer(int isolate_id)
: trace_(&string_allocator_), indent_(0) {
@@ -2246,7 +2238,7 @@ class HTracer: public Malloced {
void TraceLiveRanges(const char* name, LAllocator* allocator);
private:
- class Tag BASE_EMBEDDED {
+ class Tag V8_FINAL BASE_EMBEDDED {
public:
Tag(HTracer* tracer, const char* name) {
name_ = name;
@@ -2311,7 +2303,7 @@ class HTracer: public Malloced {
};
-class NoObservableSideEffectsScope {
+class NoObservableSideEffectsScope V8_FINAL {
public:
explicit NoObservableSideEffectsScope(HGraphBuilder* builder) :
builder_(builder) {
diff --git a/chromium/v8/src/i18n.cc b/chromium/v8/src/i18n.cc
new file mode 100644
index 00000000000..0ae19c8232d
--- /dev/null
+++ b/chromium/v8/src/i18n.cc
@@ -0,0 +1,1070 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#include "i18n.h"
+
+#include "unicode/brkiter.h"
+#include "unicode/calendar.h"
+#include "unicode/coll.h"
+#include "unicode/curramt.h"
+#include "unicode/dcfmtsym.h"
+#include "unicode/decimfmt.h"
+#include "unicode/dtfmtsym.h"
+#include "unicode/dtptngen.h"
+#include "unicode/locid.h"
+#include "unicode/numfmt.h"
+#include "unicode/numsys.h"
+#include "unicode/rbbi.h"
+#include "unicode/smpdtfmt.h"
+#include "unicode/timezone.h"
+#include "unicode/uchar.h"
+#include "unicode/ucol.h"
+#include "unicode/ucurr.h"
+#include "unicode/unum.h"
+#include "unicode/uversion.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+bool ExtractStringSetting(Isolate* isolate,
+ Handle<JSObject> options,
+ const char* key,
+ icu::UnicodeString* setting) {
+ Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key));
+ MaybeObject* maybe_object = options->GetProperty(*str);
+ Object* object;
+ if (maybe_object->ToObject(&object) && object->IsString()) {
+ v8::String::Utf8Value utf8_string(
+ v8::Utils::ToLocal(Handle<String>(String::cast(object))));
+ *setting = icu::UnicodeString::fromUTF8(*utf8_string);
+ return true;
+ }
+ return false;
+}
+
+
+bool ExtractIntegerSetting(Isolate* isolate,
+ Handle<JSObject> options,
+ const char* key,
+ int32_t* value) {
+ Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key));
+ MaybeObject* maybe_object = options->GetProperty(*str);
+ Object* object;
+ if (maybe_object->ToObject(&object) && object->IsNumber()) {
+ object->ToInt32(value);
+ return true;
+ }
+ return false;
+}
+
+
+bool ExtractBooleanSetting(Isolate* isolate,
+ Handle<JSObject> options,
+ const char* key,
+ bool* value) {
+ Handle<String> str = isolate->factory()->NewStringFromAscii(CStrVector(key));
+ MaybeObject* maybe_object = options->GetProperty(*str);
+ Object* object;
+ if (maybe_object->ToObject(&object) && object->IsBoolean()) {
+ *value = object->BooleanValue();
+ return true;
+ }
+ return false;
+}
+
+
+icu::SimpleDateFormat* CreateICUDateFormat(
+ Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
+ // Create time zone as specified by the user. We have to re-create time zone
+ // since calendar takes ownership.
+ icu::TimeZone* tz = NULL;
+ icu::UnicodeString timezone;
+ if (ExtractStringSetting(isolate, options, "timeZone", &timezone)) {
+ tz = icu::TimeZone::createTimeZone(timezone);
+ } else {
+ tz = icu::TimeZone::createDefault();
+ }
+
+ // Create a calendar using locale, and apply time zone to it.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Calendar* calendar =
+ icu::Calendar::createInstance(tz, icu_locale, status);
+
+ // Make formatter from skeleton. Calendar and numbering system are added
+ // to the locale as Unicode extension (if they were specified at all).
+ icu::SimpleDateFormat* date_format = NULL;
+ icu::UnicodeString skeleton;
+ if (ExtractStringSetting(isolate, options, "skeleton", &skeleton)) {
+ icu::DateTimePatternGenerator* generator =
+ icu::DateTimePatternGenerator::createInstance(icu_locale, status);
+ icu::UnicodeString pattern;
+ if (U_SUCCESS(status)) {
+ pattern = generator->getBestPattern(skeleton, status);
+ delete generator;
+ }
+
+ date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
+ if (U_SUCCESS(status)) {
+ date_format->adoptCalendar(calendar);
+ }
+ }
+
+ if (U_FAILURE(status)) {
+ delete calendar;
+ delete date_format;
+ date_format = NULL;
+ }
+
+ return date_format;
+}
+
+
+void SetResolvedDateSettings(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ icu::SimpleDateFormat* date_format,
+ Handle<JSObject> resolved) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString pattern;
+ date_format->toPattern(pattern);
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("pattern")),
+ isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
+ pattern.length())),
+ NONE,
+ kNonStrictMode);
+
+ // Set time zone and calendar.
+ const icu::Calendar* calendar = date_format->getCalendar();
+ const char* calendar_name = calendar->getType();
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("calendar")),
+ isolate->factory()->NewStringFromAscii(CStrVector(calendar_name)),
+ NONE,
+ kNonStrictMode);
+
+ const icu::TimeZone& tz = calendar->getTimeZone();
+ icu::UnicodeString time_zone;
+ tz.getID(time_zone);
+
+ icu::UnicodeString canonical_time_zone;
+ icu::TimeZone::getCanonicalID(time_zone, canonical_time_zone, status);
+ if (U_SUCCESS(status)) {
+ if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("timeZone")),
+ isolate->factory()->NewStringFromAscii(CStrVector("UTC")),
+ NONE,
+ kNonStrictMode);
+ } else {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("timeZone")),
+ isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(
+ canonical_time_zone.getBuffer()),
+ canonical_time_zone.length())),
+ NONE,
+ kNonStrictMode);
+ }
+ }
+
+ // Ugly hack. ICU doesn't expose numbering system in any way, so we have
+ // to assume that for given locale NumberingSystem constructor produces the
+ // same digits as NumberFormat/Calendar would.
+ status = U_ZERO_ERROR;
+ icu::NumberingSystem* numbering_system =
+ icu::NumberingSystem::createInstance(icu_locale, status);
+ if (U_SUCCESS(status)) {
+ const char* ns = numbering_system->getName();
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
+ isolate->factory()->NewStringFromAscii(CStrVector(ns)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
+ isolate->factory()->undefined_value(),
+ NONE,
+ kNonStrictMode);
+ }
+ delete numbering_system;
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ NONE,
+ kNonStrictMode);
+ }
+}
+
+
+template<int internal_fields, EternalHandles::SingletonHandle field>
+Handle<ObjectTemplateInfo> GetEternal(Isolate* isolate) {
+ if (isolate->eternal_handles()->Exists(field)) {
+ return Handle<ObjectTemplateInfo>::cast(
+ isolate->eternal_handles()->GetSingleton(field));
+ }
+ v8::Local<v8::ObjectTemplate> raw_template(v8::ObjectTemplate::New());
+ raw_template->SetInternalFieldCount(internal_fields);
+ return Handle<ObjectTemplateInfo>::cast(
+ isolate->eternal_handles()->CreateSingleton(
+ isolate,
+ *v8::Utils::OpenHandle(*raw_template),
+ field));
+}
+
+
+icu::DecimalFormat* CreateICUNumberFormat(
+ Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
+ // Make formatter from options. Numbering system is added
+ // to the locale as Unicode extension (if it was specified at all).
+ UErrorCode status = U_ZERO_ERROR;
+ icu::DecimalFormat* number_format = NULL;
+ icu::UnicodeString style;
+ icu::UnicodeString currency;
+ if (ExtractStringSetting(isolate, options, "style", &style)) {
+ if (style == UNICODE_STRING_SIMPLE("currency")) {
+ icu::UnicodeString display;
+ ExtractStringSetting(isolate, options, "currency", &currency);
+ ExtractStringSetting(isolate, options, "currencyDisplay", &display);
+
+#if (U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM <= 6)
+ icu::NumberFormat::EStyles format_style;
+ if (display == UNICODE_STRING_SIMPLE("code")) {
+ format_style = icu::NumberFormat::kIsoCurrencyStyle;
+ } else if (display == UNICODE_STRING_SIMPLE("name")) {
+ format_style = icu::NumberFormat::kPluralCurrencyStyle;
+ } else {
+ format_style = icu::NumberFormat::kCurrencyStyle;
+ }
+#else // ICU version is 4.8 or above (we ignore versions below 4.0).
+ UNumberFormatStyle format_style;
+ if (display == UNICODE_STRING_SIMPLE("code")) {
+ format_style = UNUM_CURRENCY_ISO;
+ } else if (display == UNICODE_STRING_SIMPLE("name")) {
+ format_style = UNUM_CURRENCY_PLURAL;
+ } else {
+ format_style = UNUM_CURRENCY;
+ }
+#endif
+
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, format_style, status));
+ } else if (style == UNICODE_STRING_SIMPLE("percent")) {
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createPercentInstance(icu_locale, status));
+ if (U_FAILURE(status)) {
+ delete number_format;
+ return NULL;
+ }
+ // Make sure 1.1% doesn't go into 2%.
+ number_format->setMinimumFractionDigits(1);
+ } else {
+ // Make a decimal instance by default.
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, status));
+ }
+ }
+
+ if (U_FAILURE(status)) {
+ delete number_format;
+ return NULL;
+ }
+
+ // Set all options.
+ if (!currency.isEmpty()) {
+ number_format->setCurrency(currency.getBuffer(), status);
+ }
+
+ int32_t digits;
+ if (ExtractIntegerSetting(
+ isolate, options, "minimumIntegerDigits", &digits)) {
+ number_format->setMinimumIntegerDigits(digits);
+ }
+
+ if (ExtractIntegerSetting(
+ isolate, options, "minimumFractionDigits", &digits)) {
+ number_format->setMinimumFractionDigits(digits);
+ }
+
+ if (ExtractIntegerSetting(
+ isolate, options, "maximumFractionDigits", &digits)) {
+ number_format->setMaximumFractionDigits(digits);
+ }
+
+ bool significant_digits_used = false;
+ if (ExtractIntegerSetting(
+ isolate, options, "minimumSignificantDigits", &digits)) {
+ number_format->setMinimumSignificantDigits(digits);
+ significant_digits_used = true;
+ }
+
+ if (ExtractIntegerSetting(
+ isolate, options, "maximumSignificantDigits", &digits)) {
+ number_format->setMaximumSignificantDigits(digits);
+ significant_digits_used = true;
+ }
+
+ number_format->setSignificantDigitsUsed(significant_digits_used);
+
+ bool grouping;
+ if (ExtractBooleanSetting(isolate, options, "useGrouping", &grouping)) {
+ number_format->setGroupingUsed(grouping);
+ }
+
+ // Set rounding mode.
+ number_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
+
+ return number_format;
+}
+
+
+void SetResolvedNumberSettings(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ icu::DecimalFormat* number_format,
+ Handle<JSObject> resolved) {
+ icu::UnicodeString pattern;
+ number_format->toPattern(pattern);
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("pattern")),
+ isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
+ pattern.length())),
+ NONE,
+ kNonStrictMode);
+
+ // Set resolved currency code in options.currency if not empty.
+ icu::UnicodeString currency(number_format->getCurrency());
+ if (!currency.isEmpty()) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("currency")),
+ isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(currency.getBuffer()),
+ currency.length())),
+ NONE,
+ kNonStrictMode);
+ }
+
+ // Ugly hack. ICU doesn't expose numbering system in any way, so we have
+ // to assume that for given locale NumberingSystem constructor produces the
+ // same digits as NumberFormat/Calendar would.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::NumberingSystem* numbering_system =
+ icu::NumberingSystem::createInstance(icu_locale, status);
+ if (U_SUCCESS(status)) {
+ const char* ns = numbering_system->getName();
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
+ isolate->factory()->NewStringFromAscii(CStrVector(ns)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("numberingSystem")),
+ isolate->factory()->undefined_value(),
+ NONE,
+ kNonStrictMode);
+ }
+ delete numbering_system;
+
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("useGrouping")),
+ isolate->factory()->ToBoolean(number_format->isGroupingUsed()),
+ NONE,
+ kNonStrictMode);
+
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(
+ CStrVector("minimumIntegerDigits")),
+ isolate->factory()->NewNumberFromInt(
+ number_format->getMinimumIntegerDigits()),
+ NONE,
+ kNonStrictMode);
+
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(
+ CStrVector("minimumFractionDigits")),
+ isolate->factory()->NewNumberFromInt(
+ number_format->getMinimumFractionDigits()),
+ NONE,
+ kNonStrictMode);
+
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(
+ CStrVector("maximumFractionDigits")),
+ isolate->factory()->NewNumberFromInt(
+ number_format->getMaximumFractionDigits()),
+ NONE,
+ kNonStrictMode);
+
+ Handle<String> key = isolate->factory()->NewStringFromAscii(
+ CStrVector("minimumSignificantDigits"));
+ if (resolved->HasLocalProperty(*key)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(
+ CStrVector("minimumSignificantDigits")),
+ isolate->factory()->NewNumberFromInt(
+ number_format->getMinimumSignificantDigits()),
+ NONE,
+ kNonStrictMode);
+ }
+
+ key = isolate->factory()->NewStringFromAscii(
+ CStrVector("maximumSignificantDigits"));
+ if (resolved->HasLocalProperty(*key)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(
+ CStrVector("maximumSignificantDigits")),
+ isolate->factory()->NewNumberFromInt(
+ number_format->getMaximumSignificantDigits()),
+ NONE,
+ kNonStrictMode);
+ }
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ NONE,
+ kNonStrictMode);
+ }
+}
+
+
+icu::Collator* CreateICUCollator(
+ Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
+ // Make collator from options.
+ icu::Collator* collator = NULL;
+ UErrorCode status = U_ZERO_ERROR;
+ collator = icu::Collator::createInstance(icu_locale, status);
+
+ if (U_FAILURE(status)) {
+ delete collator;
+ return NULL;
+ }
+
+ // Set flags first, and then override them with sensitivity if necessary.
+ bool numeric;
+ if (ExtractBooleanSetting(isolate, options, "numeric", &numeric)) {
+ collator->setAttribute(
+ UCOL_NUMERIC_COLLATION, numeric ? UCOL_ON : UCOL_OFF, status);
+ }
+
+ // Normalization is always on, by the spec. We are free to optimize
+ // if the strings are already normalized (but we don't have a way to tell
+ // that right now).
+ collator->setAttribute(UCOL_NORMALIZATION_MODE, UCOL_ON, status);
+
+ icu::UnicodeString case_first;
+ if (ExtractStringSetting(isolate, options, "caseFirst", &case_first)) {
+ if (case_first == UNICODE_STRING_SIMPLE("upper")) {
+ collator->setAttribute(UCOL_CASE_FIRST, UCOL_UPPER_FIRST, status);
+ } else if (case_first == UNICODE_STRING_SIMPLE("lower")) {
+ collator->setAttribute(UCOL_CASE_FIRST, UCOL_LOWER_FIRST, status);
+ } else {
+ // Default (false/off).
+ collator->setAttribute(UCOL_CASE_FIRST, UCOL_OFF, status);
+ }
+ }
+
+ icu::UnicodeString sensitivity;
+ if (ExtractStringSetting(isolate, options, "sensitivity", &sensitivity)) {
+ if (sensitivity == UNICODE_STRING_SIMPLE("base")) {
+ collator->setStrength(icu::Collator::PRIMARY);
+ } else if (sensitivity == UNICODE_STRING_SIMPLE("accent")) {
+ collator->setStrength(icu::Collator::SECONDARY);
+ } else if (sensitivity == UNICODE_STRING_SIMPLE("case")) {
+ collator->setStrength(icu::Collator::PRIMARY);
+ collator->setAttribute(UCOL_CASE_LEVEL, UCOL_ON, status);
+ } else {
+ // variant (default)
+ collator->setStrength(icu::Collator::TERTIARY);
+ }
+ }
+
+ bool ignore;
+ if (ExtractBooleanSetting(isolate, options, "ignorePunctuation", &ignore)) {
+ if (ignore) {
+ collator->setAttribute(UCOL_ALTERNATE_HANDLING, UCOL_SHIFTED, status);
+ }
+ }
+
+ return collator;
+}
+
+
+void SetResolvedCollatorSettings(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ icu::Collator* collator,
+ Handle<JSObject> resolved) {
+ UErrorCode status = U_ZERO_ERROR;
+
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("numeric")),
+ isolate->factory()->ToBoolean(
+ collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON),
+ NONE,
+ kNonStrictMode);
+
+ switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
+ case UCOL_LOWER_FIRST:
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
+ isolate->factory()->NewStringFromAscii(CStrVector("lower")),
+ NONE,
+ kNonStrictMode);
+ break;
+ case UCOL_UPPER_FIRST:
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
+ isolate->factory()->NewStringFromAscii(CStrVector("upper")),
+ NONE,
+ kNonStrictMode);
+ break;
+ default:
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("caseFirst")),
+ isolate->factory()->NewStringFromAscii(CStrVector("false")),
+ NONE,
+ kNonStrictMode);
+ }
+
+ switch (collator->getAttribute(UCOL_STRENGTH, status)) {
+ case UCOL_PRIMARY: {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("strength")),
+ isolate->factory()->NewStringFromAscii(CStrVector("primary")),
+ NONE,
+ kNonStrictMode);
+
+ // case level: true + s1 -> case, s1 -> base.
+ if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
+ isolate->factory()->NewStringFromAscii(CStrVector("case")),
+ NONE,
+ kNonStrictMode);
+ } else {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
+ isolate->factory()->NewStringFromAscii(CStrVector("base")),
+ NONE,
+ kNonStrictMode);
+ }
+ break;
+ }
+ case UCOL_SECONDARY:
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("strength")),
+ isolate->factory()->NewStringFromAscii(CStrVector("secondary")),
+ NONE,
+ kNonStrictMode);
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
+ isolate->factory()->NewStringFromAscii(CStrVector("accent")),
+ NONE,
+ kNonStrictMode);
+ break;
+ case UCOL_TERTIARY:
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("strength")),
+ isolate->factory()->NewStringFromAscii(CStrVector("tertiary")),
+ NONE,
+ kNonStrictMode);
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
+ isolate->factory()->NewStringFromAscii(CStrVector("variant")),
+ NONE,
+ kNonStrictMode);
+ break;
+ case UCOL_QUATERNARY:
+ // We shouldn't get quaternary and identical from ICU, but if we do
+ // put them into variant.
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("strength")),
+ isolate->factory()->NewStringFromAscii(CStrVector("quaternary")),
+ NONE,
+ kNonStrictMode);
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
+ isolate->factory()->NewStringFromAscii(CStrVector("variant")),
+ NONE,
+ kNonStrictMode);
+ break;
+ default:
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("strength")),
+ isolate->factory()->NewStringFromAscii(CStrVector("identical")),
+ NONE,
+ kNonStrictMode);
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("sensitivity")),
+ isolate->factory()->NewStringFromAscii(CStrVector("variant")),
+ NONE,
+ kNonStrictMode);
+ }
+
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("ignorePunctuation")),
+ isolate->factory()->ToBoolean(collator->getAttribute(
+ UCOL_ALTERNATE_HANDLING, status) == UCOL_SHIFTED),
+ NONE,
+ kNonStrictMode);
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ NONE,
+ kNonStrictMode);
+ }
+}
+
+
+icu::BreakIterator* CreateICUBreakIterator(
+ Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::BreakIterator* break_iterator = NULL;
+ icu::UnicodeString type;
+ if (!ExtractStringSetting(isolate, options, "type", &type)) return NULL;
+
+ if (type == UNICODE_STRING_SIMPLE("character")) {
+ break_iterator =
+ icu::BreakIterator::createCharacterInstance(icu_locale, status);
+ } else if (type == UNICODE_STRING_SIMPLE("sentence")) {
+ break_iterator =
+ icu::BreakIterator::createSentenceInstance(icu_locale, status);
+ } else if (type == UNICODE_STRING_SIMPLE("line")) {
+ break_iterator =
+ icu::BreakIterator::createLineInstance(icu_locale, status);
+ } else {
+ // Defualt is word iterator.
+ break_iterator =
+ icu::BreakIterator::createWordInstance(icu_locale, status);
+ }
+
+ if (U_FAILURE(status)) {
+ delete break_iterator;
+ return NULL;
+ }
+
+ return break_iterator;
+}
+
+
+void SetResolvedBreakIteratorSettings(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ icu::BreakIterator* break_iterator,
+ Handle<JSObject> resolved) {
+ UErrorCode status = U_ZERO_ERROR;
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ NONE,
+ kNonStrictMode);
+ } else {
+ // This would never happen, since we got the locale from ICU.
+ JSObject::SetProperty(
+ resolved,
+ isolate->factory()->NewStringFromAscii(CStrVector("locale")),
+ isolate->factory()->NewStringFromAscii(CStrVector("und")),
+ NONE,
+ kNonStrictMode);
+ }
+}
+
+} // namespace
+
+
+// static
+Handle<ObjectTemplateInfo> I18N::GetTemplate(Isolate* isolate) {
+ return GetEternal<1, i::EternalHandles::I18N_TEMPLATE_ONE>(isolate);
+}
+
+
+// static
+Handle<ObjectTemplateInfo> I18N::GetTemplate2(Isolate* isolate) {
+ return GetEternal<2, i::EternalHandles::I18N_TEMPLATE_TWO>(isolate);
+}
+
+
+// static
+icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::Utf8Value bcp47_locale(v8::Utils::ToLocal(locale));
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::SimpleDateFormat* date_format = CreateICUDateFormat(
+ isolate, icu_locale, options);
+ if (!date_format) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ date_format = CreateICUDateFormat(isolate, no_extension_locale, options);
+
+ // Set resolved settings (pattern, numbering system, calendar).
+ SetResolvedDateSettings(
+ isolate, no_extension_locale, date_format, resolved);
+ } else {
+ SetResolvedDateSettings(isolate, icu_locale, date_format, resolved);
+ }
+
+ return date_format;
+}
+
+
+icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
+ Isolate* isolate,
+ Handle<JSObject> obj) {
+ Handle<String> key =
+ isolate->factory()->NewStringFromAscii(CStrVector("dateFormat"));
+ if (obj->HasLocalProperty(*key)) {
+ return reinterpret_cast<icu::SimpleDateFormat*>(
+ obj->GetInternalField(0));
+ }
+
+ return NULL;
+}
+
+
+void DateFormat::DeleteDateFormat(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ delete reinterpret_cast<icu::SimpleDateFormat*>(Handle<JSObject>::cast(
+ v8::Utils::OpenPersistent(object))->GetInternalField(0));
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose();
+}
+
+
+icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::Utf8Value bcp47_locale(v8::Utils::ToLocal(locale));
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::DecimalFormat* number_format =
+ CreateICUNumberFormat(isolate, icu_locale, options);
+ if (!number_format) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ number_format = CreateICUNumberFormat(
+ isolate, no_extension_locale, options);
+
+ // Set resolved settings (pattern, numbering system).
+ SetResolvedNumberSettings(
+ isolate, no_extension_locale, number_format, resolved);
+ } else {
+ SetResolvedNumberSettings(isolate, icu_locale, number_format, resolved);
+ }
+
+ return number_format;
+}
+
+
+icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
+ Isolate* isolate,
+ Handle<JSObject> obj) {
+ Handle<String> key =
+ isolate->factory()->NewStringFromAscii(CStrVector("numberFormat"));
+ if (obj->HasLocalProperty(*key)) {
+ return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
+ }
+
+ return NULL;
+}
+
+
+void NumberFormat::DeleteNumberFormat(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ delete reinterpret_cast<icu::DecimalFormat*>(Handle<JSObject>::cast(
+ v8::Utils::OpenPersistent(object))->GetInternalField(0));
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose();
+}
+
+
+icu::Collator* Collator::InitializeCollator(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::Utf8Value bcp47_locale(v8::Utils::ToLocal(locale));
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::Collator* collator = CreateICUCollator(isolate, icu_locale, options);
+ if (!collator) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ collator = CreateICUCollator(isolate, no_extension_locale, options);
+
+ // Set resolved settings (pattern, numbering system).
+ SetResolvedCollatorSettings(
+ isolate, no_extension_locale, collator, resolved);
+ } else {
+ SetResolvedCollatorSettings(isolate, icu_locale, collator, resolved);
+ }
+
+ return collator;
+}
+
+
+icu::Collator* Collator::UnpackCollator(Isolate* isolate,
+ Handle<JSObject> obj) {
+ Handle<String> key =
+ isolate->factory()->NewStringFromAscii(CStrVector("collator"));
+ if (obj->HasLocalProperty(*key)) {
+ return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
+ }
+
+ return NULL;
+}
+
+
+void Collator::DeleteCollator(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ delete reinterpret_cast<icu::Collator*>(Handle<JSObject>::cast(
+ v8::Utils::OpenPersistent(object))->GetInternalField(0));
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose();
+}
+
+
+icu::BreakIterator* BreakIterator::InitializeBreakIterator(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved) {
+ // Convert BCP47 into ICU locale format.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+ v8::String::Utf8Value bcp47_locale(v8::Utils::ToLocal(locale));
+ if (bcp47_locale.length() != 0) {
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ if (U_FAILURE(status) || icu_length == 0) {
+ return NULL;
+ }
+ icu_locale = icu::Locale(icu_result);
+ }
+
+ icu::BreakIterator* break_iterator = CreateICUBreakIterator(
+ isolate, icu_locale, options);
+ if (!break_iterator) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ break_iterator = CreateICUBreakIterator(
+ isolate, no_extension_locale, options);
+
+ // Set resolved settings (locale).
+ SetResolvedBreakIteratorSettings(
+ isolate, no_extension_locale, break_iterator, resolved);
+ } else {
+ SetResolvedBreakIteratorSettings(
+ isolate, icu_locale, break_iterator, resolved);
+ }
+
+ return break_iterator;
+}
+
+
+icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
+ Handle<JSObject> obj) {
+ Handle<String> key =
+ isolate->factory()->NewStringFromAscii(CStrVector("breakIterator"));
+ if (obj->HasLocalProperty(*key)) {
+ return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
+ }
+
+ return NULL;
+}
+
+
+void BreakIterator::DeleteBreakIterator(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param) {
+ // First delete the hidden C++ object.
+ delete reinterpret_cast<icu::BreakIterator*>(Handle<JSObject>::cast(
+ v8::Utils::OpenPersistent(object))->GetInternalField(0));
+
+ delete reinterpret_cast<icu::UnicodeString*>(Handle<JSObject>::cast(
+ v8::Utils::OpenPersistent(object))->GetInternalField(1));
+
+ // Then dispose of the persistent handle to JS object.
+ object->Dispose();
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/i18n.h b/chromium/v8/src/i18n.h
new file mode 100644
index 00000000000..08e7f2b7137
--- /dev/null
+++ b/chromium/v8/src/i18n.h
@@ -0,0 +1,154 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+#ifndef V8_I18N_H_
+#define V8_I18N_H_
+
+#include "unicode/uversion.h"
+#include "v8.h"
+
+namespace U_ICU_NAMESPACE {
+class BreakIterator;
+class Collator;
+class DecimalFormat;
+class SimpleDateFormat;
+}
+
+namespace v8 {
+namespace internal {
+
+class I18N {
+ public:
+ // Creates an ObjectTemplate with one internal field.
+ static Handle<ObjectTemplateInfo> GetTemplate(Isolate* isolate);
+
+ // Creates an ObjectTemplate with two internal fields.
+ static Handle<ObjectTemplateInfo> GetTemplate2(Isolate* isolate);
+
+ private:
+ I18N();
+};
+
+
+class DateFormat {
+ public:
+ // Create a formatter for the specificied locale and options. Returns the
+ // resolved settings for the locale / options.
+ static icu::SimpleDateFormat* InitializeDateTimeFormat(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
+
+ // Unpacks date format object from corresponding JavaScript object.
+ static icu::SimpleDateFormat* UnpackDateFormat(Isolate* isolate,
+ Handle<JSObject> obj);
+
+ // Release memory we allocated for the DateFormat once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteDateFormat(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param);
+ private:
+ DateFormat();
+};
+
+
+class NumberFormat {
+ public:
+ // Create a formatter for the specificied locale and options. Returns the
+ // resolved settings for the locale / options.
+ static icu::DecimalFormat* InitializeNumberFormat(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
+
+ // Unpacks number format object from corresponding JavaScript object.
+ static icu::DecimalFormat* UnpackNumberFormat(Isolate* isolate,
+ Handle<JSObject> obj);
+
+ // Release memory we allocated for the NumberFormat once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteNumberFormat(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param);
+ private:
+ NumberFormat();
+};
+
+
+class Collator {
+ public:
+ // Create a collator for the specificied locale and options. Returns the
+ // resolved settings for the locale / options.
+ static icu::Collator* InitializeCollator(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
+
+ // Unpacks collator object from corresponding JavaScript object.
+ static icu::Collator* UnpackCollator(Isolate* isolate, Handle<JSObject> obj);
+
+ // Release memory we allocated for the Collator once the JS object that holds
+ // the pointer gets garbage collected.
+ static void DeleteCollator(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param);
+ private:
+ Collator();
+};
+
+class BreakIterator {
+ public:
+ // Create a BreakIterator for the specificied locale and options. Returns the
+ // resolved settings for the locale / options.
+ static icu::BreakIterator* InitializeBreakIterator(
+ Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
+
+ // Unpacks break iterator object from corresponding JavaScript object.
+ static icu::BreakIterator* UnpackBreakIterator(Isolate* isolate,
+ Handle<JSObject> obj);
+
+ // Release memory we allocated for the BreakIterator once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteBreakIterator(v8::Isolate* isolate,
+ Persistent<v8::Value>* object,
+ void* param);
+
+ private:
+ BreakIterator();
+};
+
+} } // namespace v8::internal
+
+#endif // V8_I18N_H_
diff --git a/chromium/v8/src/i18n.js b/chromium/v8/src/i18n.js
new file mode 100644
index 00000000000..a80fd4d9b4a
--- /dev/null
+++ b/chromium/v8/src/i18n.js
@@ -0,0 +1,2116 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// limitations under the License.
+
+// ECMAScript 402 API implementation.
+
+/**
+ * Intl object is a single object that has some named properties,
+ * all of which are constructors.
+ */
+$Object.defineProperty(global, "Intl", { enumerable: false, value: (function() {
+
+'use strict';
+
+var Intl = {};
+
+var undefined = global.undefined;
+
+var AVAILABLE_SERVICES = ['collator',
+ 'numberformat',
+ 'dateformat',
+ 'breakiterator'];
+
+/**
+ * Caches available locales for each service.
+ */
+var AVAILABLE_LOCALES = {
+ 'collator': undefined,
+ 'numberformat': undefined,
+ 'dateformat': undefined,
+ 'breakiterator': undefined
+};
+
+/**
+ * Caches default ICU locale.
+ */
+var DEFAULT_ICU_LOCALE = undefined;
+
+/**
+ * Unicode extension regular expression.
+ */
+var UNICODE_EXTENSION_RE = undefined;
+
+function GetUnicodeExtensionRE() {
+ if (UNICODE_EXTENSION_RE === undefined) {
+ UNICODE_EXTENSION_RE = new $RegExp('-u(-[a-z0-9]{2,8})+', 'g');
+ }
+ return UNICODE_EXTENSION_RE;
+}
+
+/**
+ * Matches any Unicode extension.
+ */
+var ANY_EXTENSION_RE = undefined;
+
+function GetAnyExtensionRE() {
+ if (ANY_EXTENSION_RE === undefined) {
+ ANY_EXTENSION_RE = new $RegExp('-[a-z0-9]{1}-.*', 'g');
+ }
+ return ANY_EXTENSION_RE;
+}
+
+/**
+ * Replace quoted text (single quote, anything but the quote and quote again).
+ */
+var QUOTED_STRING_RE = undefined;
+
+function GetQuotedStringRE() {
+ if (QUOTED_STRING_RE === undefined) {
+ QUOTED_STRING_RE = new $RegExp("'[^']+'", 'g');
+ }
+ return QUOTED_STRING_RE;
+}
+
+/**
+ * Matches valid service name.
+ */
+var SERVICE_RE = undefined;
+
+function GetServiceRE() {
+ if (SERVICE_RE === undefined) {
+ SERVICE_RE =
+ new $RegExp('^(collator|numberformat|dateformat|breakiterator)$');
+ }
+ return SERVICE_RE;
+}
+
+/**
+ * Validates a language tag against bcp47 spec.
+ * Actual value is assigned on first run.
+ */
+var LANGUAGE_TAG_RE = undefined;
+
+function GetLanguageTagRE() {
+ if (LANGUAGE_TAG_RE === undefined) {
+ BuildLanguageTagREs();
+ }
+ return LANGUAGE_TAG_RE;
+}
+
+/**
+ * Helps find duplicate variants in the language tag.
+ */
+var LANGUAGE_VARIANT_RE = undefined;
+
+function GetLanguageVariantRE() {
+ if (LANGUAGE_VARIANT_RE === undefined) {
+ BuildLanguageTagREs();
+ }
+ return LANGUAGE_VARIANT_RE;
+}
+
+/**
+ * Helps find duplicate singletons in the language tag.
+ */
+var LANGUAGE_SINGLETON_RE = undefined;
+
+function GetLanguageSingletonRE() {
+ if (LANGUAGE_SINGLETON_RE === undefined) {
+ BuildLanguageTagREs();
+ }
+ return LANGUAGE_SINGLETON_RE;
+}
+
+/**
+ * Matches valid IANA time zone names.
+ */
+var TIMEZONE_NAME_CHECK_RE = undefined;
+
+function GetTimezoneNameCheckRE() {
+ if (TIMEZONE_NAME_CHECK_RE === undefined) {
+ TIMEZONE_NAME_CHECK_RE =
+ new $RegExp('^([A-Za-z]+)/([A-Za-z]+)(?:_([A-Za-z]+))*$');
+ }
+ return TIMEZONE_NAME_CHECK_RE;
+}
+
+/**
+ * Maps ICU calendar names into LDML type.
+ */
+var ICU_CALENDAR_MAP = {
+ 'gregorian': 'gregory',
+ 'japanese': 'japanese',
+ 'buddhist': 'buddhist',
+ 'roc': 'roc',
+ 'persian': 'persian',
+ 'islamic-civil': 'islamicc',
+ 'islamic': 'islamic',
+ 'hebrew': 'hebrew',
+ 'chinese': 'chinese',
+ 'indian': 'indian',
+ 'coptic': 'coptic',
+ 'ethiopic': 'ethiopic',
+ 'ethiopic-amete-alem': 'ethioaa'
+};
+
+/**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a collator.
+ */
+var COLLATOR_KEY_MAP = {
+ 'kn': {'property': 'numeric', 'type': 'boolean'},
+ 'kf': {'property': 'caseFirst', 'type': 'string',
+ 'values': ['false', 'lower', 'upper']}
+};
+
+/**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a number format.
+ */
+var NUMBER_FORMAT_KEY_MAP = {
+ 'nu': {'property': undefined, 'type': 'string'}
+};
+
+/**
+ * Map of Unicode extensions to option properties, and their values and types,
+ * for a date/time format.
+ */
+var DATETIME_FORMAT_KEY_MAP = {
+ 'ca': {'property': undefined, 'type': 'string'},
+ 'nu': {'property': undefined, 'type': 'string'}
+};
+
+/**
+ * Allowed -u-co- values. List taken from:
+ * http://unicode.org/repos/cldr/trunk/common/bcp47/collation.xml
+ */
+var ALLOWED_CO_VALUES = [
+ 'big5han', 'dict', 'direct', 'ducet', 'gb2312', 'phonebk', 'phonetic',
+ 'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
+];
+
+/**
+ * Error message for when function object is created with new and it's not
+ * a constructor.
+ */
+var ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR =
+ 'Function object that\'s not a constructor was created with new';
+
+
+/**
+ * Adds bound method to the prototype of the given object.
+ */
+function addBoundMethod(obj, methodName, implementation, length) {
+ function getter() {
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject === undefined) {
+ throw new $TypeError('Method ' + methodName + ' called on a ' +
+ 'non-object or on a wrong type of object.');
+ }
+ var internalName = '__bound' + methodName + '__';
+ if (this[internalName] === undefined) {
+ var that = this;
+ var boundMethod;
+ if (length === undefined || length === 2) {
+ boundMethod = function(x, y) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+ return implementation(that, x, y);
+ }
+ } else if (length === 1) {
+ boundMethod = function(x) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+ return implementation(that, x);
+ }
+ } else {
+ boundMethod = function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+ // DateTimeFormat.format needs to be 0 arg method, but can stil
+ // receive optional dateValue param. If one was provided, pass it
+ // along.
+ if (%_ArgumentsLength() > 0) {
+ return implementation(that, %_Arguments(0));
+ } else {
+ return implementation(that);
+ }
+ }
+ }
+ %FunctionSetName(boundMethod, internalName);
+ %FunctionRemovePrototype(boundMethod);
+ %SetNativeFlag(boundMethod);
+ this[internalName] = boundMethod;
+ }
+ return this[internalName];
+ }
+
+ %FunctionSetName(getter, methodName);
+ %FunctionRemovePrototype(getter);
+ %SetNativeFlag(getter);
+
+ $Object.defineProperty(obj.prototype, methodName, {
+ get: getter,
+ enumerable: false,
+ configurable: true
+ });
+}
+
+
+/**
+ * Returns an intersection of locales and service supported locales.
+ * Parameter locales is treated as a priority list.
+ */
+function supportedLocalesOf(service, locales, options) {
+ if (service.match(GetServiceRE()) === null) {
+ throw new $Error('Internal error, wrong service type: ' + service);
+ }
+
+ // Provide defaults if matcher was not specified.
+ if (options === undefined) {
+ options = {};
+ } else {
+ options = toObject(options);
+ }
+
+ var matcher = options.localeMatcher;
+ if (matcher !== undefined) {
+ matcher = $String(matcher);
+ if (matcher !== 'lookup' && matcher !== 'best fit') {
+ throw new $RangeError('Illegal value for localeMatcher:' + matcher);
+ }
+ } else {
+ matcher = 'best fit';
+ }
+
+ var requestedLocales = initializeLocaleList(locales);
+
+ // Cache these, they don't ever change per service.
+ if (AVAILABLE_LOCALES[service] === undefined) {
+ AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
+ }
+
+ // Use either best fit or lookup algorithm to match locales.
+ if (matcher === 'best fit') {
+ return initializeLocaleList(bestFitSupportedLocalesOf(
+ requestedLocales, AVAILABLE_LOCALES[service]));
+ }
+
+ return initializeLocaleList(lookupSupportedLocalesOf(
+ requestedLocales, AVAILABLE_LOCALES[service]));
+}
+
+
+/**
+ * Returns the subset of the provided BCP 47 language priority list for which
+ * this service has a matching locale when using the BCP 47 Lookup algorithm.
+ * Locales appear in the same order in the returned list as in the input list.
+ */
+function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
+ var matchedLocales = [];
+ for (var i = 0; i < requestedLocales.length; ++i) {
+ // Remove -u- extension.
+ var locale = requestedLocales[i].replace(GetUnicodeExtensionRE(), '');
+ do {
+ if (availableLocales[locale] !== undefined) {
+ // Push requested locale not the resolved one.
+ matchedLocales.push(requestedLocales[i]);
+ break;
+ }
+ // Truncate locale if possible, if not break.
+ var pos = locale.lastIndexOf('-');
+ if (pos === -1) {
+ break;
+ }
+ locale = locale.substring(0, pos);
+ } while (true);
+ }
+
+ return matchedLocales;
+}
+
+
+/**
+ * Returns the subset of the provided BCP 47 language priority list for which
+ * this service has a matching locale when using the implementation
+ * dependent algorithm.
+ * Locales appear in the same order in the returned list as in the input list.
+ */
+function bestFitSupportedLocalesOf(requestedLocales, availableLocales) {
+ // TODO(cira): implement better best fit algorithm.
+ return lookupSupportedLocalesOf(requestedLocales, availableLocales);
+}
+
+
+/**
+ * Returns a getOption function that extracts property value for given
+ * options object. If property is missing it returns defaultValue. If value
+ * is out of range for that property it throws RangeError.
+ */
+function getGetOption(options, caller) {
+ if (options === undefined) {
+ throw new $Error('Internal ' + caller + ' error. ' +
+ 'Default options are missing.');
+ }
+
+ var getOption = function getOption(property, type, values, defaultValue) {
+ if (options[property] !== undefined) {
+ var value = options[property];
+ switch (type) {
+ case 'boolean':
+ value = $Boolean(value);
+ break;
+ case 'string':
+ value = $String(value);
+ break;
+ case 'number':
+ value = $Number(value);
+ break;
+ default:
+ throw new $Error('Internal error. Wrong value type.');
+ }
+ if (values !== undefined && values.indexOf(value) === -1) {
+ throw new $RangeError('Value ' + value + ' out of range for ' + caller +
+ ' options property ' + property);
+ }
+
+ return value;
+ }
+
+ return defaultValue;
+ }
+
+ return getOption;
+}
+
+
+/**
+ * Compares a BCP 47 language priority list requestedLocales against the locales
+ * in availableLocales and determines the best available language to meet the
+ * request. Two algorithms are available to match the locales: the Lookup
+ * algorithm described in RFC 4647 section 3.4, and an implementation dependent
+ * best-fit algorithm. Independent of the locale matching algorithm, options
+ * specified through Unicode locale extension sequences are negotiated
+ * separately, taking the caller's relevant extension keys and locale data as
+ * well as client-provided options into consideration. Returns an object with
+ * a locale property whose value is the language tag of the selected locale,
+ * and properties for each key in relevantExtensionKeys providing the selected
+ * value for that key.
+ */
+function resolveLocale(service, requestedLocales, options) {
+ requestedLocales = initializeLocaleList(requestedLocales);
+
+ var getOption = getGetOption(options, service);
+ var matcher = getOption('localeMatcher', 'string',
+ ['lookup', 'best fit'], 'best fit');
+ var resolved;
+ if (matcher === 'lookup') {
+ resolved = lookupMatcher(service, requestedLocales);
+ } else {
+ resolved = bestFitMatcher(service, requestedLocales);
+ }
+
+ return resolved;
+}
+
+
+/**
+ * Returns best matched supported locale and extension info using basic
+ * lookup algorithm.
+ */
+function lookupMatcher(service, requestedLocales) {
+ if (service.match(GetServiceRE()) === null) {
+ throw new $Error('Internal error, wrong service type: ' + service);
+ }
+
+ // Cache these, they don't ever change per service.
+ if (AVAILABLE_LOCALES[service] === undefined) {
+ AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
+ }
+
+ for (var i = 0; i < requestedLocales.length; ++i) {
+ // Remove all extensions.
+ var locale = requestedLocales[i].replace(GetAnyExtensionRE(), '');
+ do {
+ if (AVAILABLE_LOCALES[service][locale] !== undefined) {
+ // Return the resolved locale and extension.
+ var extensionMatch = requestedLocales[i].match(GetUnicodeExtensionRE());
+ var extension = (extensionMatch === null) ? '' : extensionMatch[0];
+ return {'locale': locale, 'extension': extension, 'position': i};
+ }
+ // Truncate locale if possible.
+ var pos = locale.lastIndexOf('-');
+ if (pos === -1) {
+ break;
+ }
+ locale = locale.substring(0, pos);
+ } while (true);
+ }
+
+ // Didn't find a match, return default.
+ if (DEFAULT_ICU_LOCALE === undefined) {
+ DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
+ }
+
+ return {'locale': DEFAULT_ICU_LOCALE, 'extension': '', 'position': -1};
+}
+
+
+/**
+ * Returns best matched supported locale and extension info using
+ * implementation dependend algorithm.
+ */
+function bestFitMatcher(service, requestedLocales) {
+ // TODO(cira): implement better best fit algorithm.
+ return lookupMatcher(service, requestedLocales);
+}
+
+
+/**
+ * Parses Unicode extension into key - value map.
+ * Returns empty object if the extension string is invalid.
+ * We are not concerned with the validity of the values at this point.
+ */
+function parseExtension(extension) {
+ var extensionSplit = extension.split('-');
+
+ // Assume ['', 'u', ...] input, but don't throw.
+ if (extensionSplit.length <= 2 ||
+ (extensionSplit[0] !== '' && extensionSplit[1] !== 'u')) {
+ return {};
+ }
+
+ // Key is {2}alphanum, value is {3,8}alphanum.
+ // Some keys may not have explicit values (booleans).
+ var extensionMap = {};
+ var previousKey = undefined;
+ for (var i = 2; i < extensionSplit.length; ++i) {
+ var length = extensionSplit[i].length;
+ var element = extensionSplit[i];
+ if (length === 2) {
+ extensionMap[element] = undefined;
+ previousKey = element;
+ } else if (length >= 3 && length <=8 && previousKey !== undefined) {
+ extensionMap[previousKey] = element;
+ previousKey = undefined;
+ } else {
+ // There is a value that's too long, or that doesn't have a key.
+ return {};
+ }
+ }
+
+ return extensionMap;
+}
+
+
+/**
+ * Converts parameter to an Object if possible.
+ */
+function toObject(value) {
+ if (value === undefined || value === null) {
+ throw new $TypeError('Value cannot be converted to an Object.');
+ }
+
+ return $Object(value);
+}
+
+
+/**
+ * Populates internalOptions object with boolean key-value pairs
+ * from extensionMap and options.
+ * Returns filtered extension (number and date format constructors use
+ * Unicode extensions for passing parameters to ICU).
+ * It's used for extension-option pairs only, e.g. kn-normalization, but not
+ * for 'sensitivity' since it doesn't have extension equivalent.
+ * Extensions like nu and ca don't have options equivalent, so we place
+ * undefined in the map.property to denote that.
+ */
+function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
+ var extension = '';
+
+ var updateExtension = function updateExtension(key, value) {
+ return '-' + key + '-' + $String(value);
+ }
+
+ var updateProperty = function updateProperty(property, type, value) {
+ if (type === 'boolean' && (typeof value === 'string')) {
+ value = (value === 'true') ? true : false;
+ }
+
+ if (property !== undefined) {
+ defineWEProperty(outOptions, property, value);
+ }
+ }
+
+ for (var key in keyValues) {
+ if (keyValues.hasOwnProperty(key)) {
+ var value = undefined;
+ var map = keyValues[key];
+ if (map.property !== undefined) {
+ // This may return true if user specifies numeric: 'false', since
+ // Boolean('nonempty') === true.
+ value = getOption(map.property, map.type, map.values);
+ }
+ if (value !== undefined) {
+ updateProperty(map.property, map.type, value);
+ extension += updateExtension(key, value);
+ continue;
+ }
+ // User options didn't have it, check Unicode extension.
+ // Here we want to convert strings 'true', 'false' into proper Boolean
+ // values (not a user error).
+ if (extensionMap.hasOwnProperty(key)) {
+ value = extensionMap[key];
+ if (value !== undefined) {
+ updateProperty(map.property, map.type, value);
+ extension += updateExtension(key, value);
+ } else if (map.type === 'boolean') {
+ // Boolean keys are allowed not to have values in Unicode extension.
+ // Those default to true.
+ updateProperty(map.property, map.type, true);
+ extension += updateExtension(key, true);
+ }
+ }
+ }
+ }
+
+ return extension === ''? '' : '-u' + extension;
+}
+
+
+/**
+ * Converts all OwnProperties into
+ * configurable: false, writable: false, enumerable: true.
+ */
+function freezeArray(array) {
+ array.forEach(function(element, index) {
+ $Object.defineProperty(array, index, {value: element,
+ configurable: false,
+ writable: false,
+ enumerable: true});
+ });
+
+ $Object.defineProperty(array, 'length', {value: array.length,
+ writable: false});
+
+ return array;
+}
+
+
+/**
+ * It's sometimes desireable to leave user requested locale instead of ICU
+ * supported one (zh-TW is equivalent to zh-Hant-TW, so we should keep shorter
+ * one, if that was what user requested).
+ * This function returns user specified tag if its maximized form matches ICU
+ * resolved locale. If not we return ICU result.
+ */
+function getOptimalLanguageTag(original, resolved) {
+ // Returns Array<Object>, where each object has maximized and base properties.
+ // Maximized: zh -> zh-Hans-CN
+ // Base: zh-CN-u-ca-gregory -> zh-CN
+ // Take care of grandfathered or simple cases.
+ if (original === resolved) {
+ return original;
+ }
+
+ var locales = %GetLanguageTagVariants([original, resolved]);
+ if (locales[0].maximized !== locales[1].maximized) {
+ return resolved;
+ }
+
+ // Preserve extensions of resolved locale, but swap base tags with original.
+ var resolvedBase = new $RegExp('^' + locales[1].base);
+ return resolved.replace(resolvedBase, locales[0].base);
+}
+
+
+/**
+ * Returns an Object that contains all of supported locales for a given
+ * service.
+ * In addition to the supported locales we add xx-ZZ locale for each xx-Yyyy-ZZ
+ * that is supported. This is required by the spec.
+ */
+function getAvailableLocalesOf(service) {
+ var available = %AvailableLocalesOf(service);
+
+ for (var i in available) {
+ if (available.hasOwnProperty(i)) {
+ var parts = i.match(/^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/);
+ if (parts !== null) {
+ // Build xx-ZZ. We don't care about the actual value,
+ // as long it's not undefined.
+ available[parts[1] + '-' + parts[3]] = null;
+ }
+ }
+ }
+
+ return available;
+}
+
+
+/**
+ * Defines a property and sets writable and enumerable to true.
+ * Configurable is false by default.
+ */
+function defineWEProperty(object, property, value) {
+ $Object.defineProperty(object, property,
+ {value: value, writable: true, enumerable: true});
+}
+
+
+/**
+ * Adds property to an object if the value is not undefined.
+ * Sets configurable descriptor to false.
+ */
+function addWEPropertyIfDefined(object, property, value) {
+ if (value !== undefined) {
+ defineWEProperty(object, property, value);
+ }
+}
+
+
+/**
+ * Defines a property and sets writable, enumerable and configurable to true.
+ */
+function defineWECProperty(object, property, value) {
+ $Object.defineProperty(object, property,
+ {value: value,
+ writable: true,
+ enumerable: true,
+ configurable: true});
+}
+
+
+/**
+ * Adds property to an object if the value is not undefined.
+ * Sets all descriptors to true.
+ */
+function addWECPropertyIfDefined(object, property, value) {
+ if (value !== undefined) {
+ defineWECProperty(object, property, value);
+ }
+}
+
+
+/**
+ * Returns titlecased word, aMeRricA -> America.
+ */
+function toTitleCaseWord(word) {
+ return word.substr(0, 1).toUpperCase() + word.substr(1).toLowerCase();
+}
+
+/**
+ * Canonicalizes the language tag, or throws in case the tag is invalid.
+ */
+function canonicalizeLanguageTag(localeID) {
+ // null is typeof 'object' so we have to do extra check.
+ if (typeof localeID !== 'string' && typeof localeID !== 'object' ||
+ localeID === null) {
+ throw new $TypeError('Language ID should be string or object.');
+ }
+
+ var localeString = $String(localeID);
+
+ if (isValidLanguageTag(localeString) === false) {
+ throw new $RangeError('Invalid language tag: ' + localeString);
+ }
+
+ // This call will strip -kn but not -kn-true extensions.
+ // ICU bug filled - http://bugs.icu-project.org/trac/ticket/9265.
+ // TODO(cira): check if -u-kn-true-kc-true-kh-true still throws after
+ // upgrade to ICU 4.9.
+ var tag = %CanonicalizeLanguageTag(localeString);
+ if (tag === 'invalid-tag') {
+ throw new $RangeError('Invalid language tag: ' + localeString);
+ }
+
+ return tag;
+}
+
+
+/**
+ * Returns an array where all locales are canonicalized and duplicates removed.
+ * Throws on locales that are not well formed BCP47 tags.
+ */
+function initializeLocaleList(locales) {
+ var seen = [];
+ if (locales === undefined) {
+ // Constructor is called without arguments.
+ seen = [];
+ } else {
+ // We allow single string localeID.
+ if (typeof locales === 'string') {
+ seen.push(canonicalizeLanguageTag(locales));
+ return freezeArray(seen);
+ }
+
+ var o = toObject(locales);
+ // Converts it to UInt32 (>>> is shr on 32bit integers).
+ var len = o.length >>> 0;
+
+ for (var k = 0; k < len; k++) {
+ if (k in o) {
+ var value = o[k];
+
+ var tag = canonicalizeLanguageTag(value);
+
+ if (seen.indexOf(tag) === -1) {
+ seen.push(tag);
+ }
+ }
+ }
+ }
+
+ return freezeArray(seen);
+}
+
+
+/**
+ * Validates the language tag. Section 2.2.9 of the bcp47 spec
+ * defines a valid tag.
+ *
+ * ICU is too permissible and lets invalid tags, like
+ * hant-cmn-cn, through.
+ *
+ * Returns false if the language tag is invalid.
+ */
+function isValidLanguageTag(locale) {
+ // Check if it's well-formed, including grandfadered tags.
+ if (GetLanguageTagRE().test(locale) === false) {
+ return false;
+ }
+
+ // Just return if it's a x- form. It's all private.
+ if (locale.indexOf('x-') === 0) {
+ return true;
+ }
+
+ // Check if there are any duplicate variants or singletons (extensions).
+
+ // Remove private use section.
+ locale = locale.split(/-x-/)[0];
+
+ // Skip language since it can match variant regex, so we start from 1.
+ // We are matching i-klingon here, but that's ok, since i-klingon-klingon
+ // is not valid and would fail LANGUAGE_TAG_RE test.
+ var variants = [];
+ var extensions = [];
+ var parts = locale.split(/-/);
+ for (var i = 1; i < parts.length; i++) {
+ var value = parts[i];
+ if (GetLanguageVariantRE().test(value) === true && extensions.length === 0) {
+ if (variants.indexOf(value) === -1) {
+ variants.push(value);
+ } else {
+ return false;
+ }
+ }
+
+ if (GetLanguageSingletonRE().test(value) === true) {
+ if (extensions.indexOf(value) === -1) {
+ extensions.push(value);
+ } else {
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+
+/**
+ * Builds a regular expresion that validates the language tag
+ * against bcp47 spec.
+ * Uses http://tools.ietf.org/html/bcp47, section 2.1, ABNF.
+ * Runs on load and initializes the global REs.
+ */
+function BuildLanguageTagREs() {
+ var alpha = '[a-zA-Z]';
+ var digit = '[0-9]';
+ var alphanum = '(' + alpha + '|' + digit + ')';
+ var regular = '(art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|' +
+ 'zh-min|zh-min-nan|zh-xiang)';
+ var irregular = '(en-GB-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|' +
+ 'i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|' +
+ 'i-tsu|sgn-BE-FR|sgn-BE-NL|sgn-CH-DE)';
+ var grandfathered = '(' + irregular + '|' + regular + ')';
+ var privateUse = '(x(-' + alphanum + '{1,8})+)';
+
+ var singleton = '(' + digit + '|[A-WY-Za-wy-z])';
+ LANGUAGE_SINGLETON_RE = new $RegExp('^' + singleton + '$', 'i');
+
+ var extension = '(' + singleton + '(-' + alphanum + '{2,8})+)';
+
+ var variant = '(' + alphanum + '{5,8}|(' + digit + alphanum + '{3}))';
+ LANGUAGE_VARIANT_RE = new $RegExp('^' + variant + '$', 'i');
+
+ var region = '(' + alpha + '{2}|' + digit + '{3})';
+ var script = '(' + alpha + '{4})';
+ var extLang = '(' + alpha + '{3}(-' + alpha + '{3}){0,2})';
+ var language = '(' + alpha + '{2,3}(-' + extLang + ')?|' + alpha + '{4}|' +
+ alpha + '{5,8})';
+ var langTag = language + '(-' + script + ')?(-' + region + ')?(-' +
+ variant + ')*(-' + extension + ')*(-' + privateUse + ')?';
+
+ var languageTag =
+ '^(' + langTag + '|' + privateUse + '|' + grandfathered + ')$';
+ LANGUAGE_TAG_RE = new $RegExp(languageTag, 'i');
+}
+
+/**
+ * Initializes the given object so it's a valid Collator instance.
+ * Useful for subclassing.
+ */
+function initializeCollator(collator, locales, options) {
+ if (collator.hasOwnProperty('__initializedIntlObject')) {
+ throw new $TypeError('Trying to re-initialize Collator object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var getOption = getGetOption(options, 'collator');
+
+ var internalOptions = {};
+
+ defineWEProperty(internalOptions, 'usage', getOption(
+ 'usage', 'string', ['sort', 'search'], 'sort'));
+
+ var sensitivity = getOption('sensitivity', 'string',
+ ['base', 'accent', 'case', 'variant']);
+ if (sensitivity === undefined && internalOptions.usage === 'sort') {
+ sensitivity = 'variant';
+ }
+ defineWEProperty(internalOptions, 'sensitivity', sensitivity);
+
+ defineWEProperty(internalOptions, 'ignorePunctuation', getOption(
+ 'ignorePunctuation', 'boolean', undefined, false));
+
+ var locale = resolveLocale('collator', locales, options);
+
+ // ICU can't take kb, kc... parameters through localeID, so we need to pass
+ // them as options.
+ // One exception is -co- which has to be part of the extension, but only for
+ // usage: sort, and its value can't be 'standard' or 'search'.
+ var extensionMap = parseExtension(locale.extension);
+ setOptions(
+ options, extensionMap, COLLATOR_KEY_MAP, getOption, internalOptions);
+
+ var collation = 'default';
+ var extension = '';
+ if (extensionMap.hasOwnProperty('co') && internalOptions.usage === 'sort') {
+ if (ALLOWED_CO_VALUES.indexOf(extensionMap.co) !== -1) {
+ extension = '-u-co-' + extensionMap.co;
+ // ICU can't tell us what the collation is, so save user's input.
+ collation = extensionMap.co;
+ }
+ } else if (internalOptions.usage === 'search') {
+ extension = '-u-co-search';
+ }
+ defineWEProperty(internalOptions, 'collation', collation);
+
+ var requestedLocale = locale.locale + extension;
+
+ // We define all properties C++ code may produce, to prevent security
+ // problems. If malicious user decides to redefine Object.prototype.locale
+ // we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
+ // Object.defineProperties will either succeed defining or throw an error.
+ var resolved = $Object.defineProperties({}, {
+ caseFirst: {writable: true},
+ collation: {value: internalOptions.collation, writable: true},
+ ignorePunctuation: {writable: true},
+ locale: {writable: true},
+ numeric: {writable: true},
+ requestedLocale: {value: requestedLocale, writable: true},
+ sensitivity: {writable: true},
+ strength: {writable: true},
+ usage: {value: internalOptions.usage, writable: true}
+ });
+
+ var internalCollator = %CreateCollator(requestedLocale,
+ internalOptions,
+ resolved);
+
+ // Writable, configurable and enumerable are set to false by default.
+ $Object.defineProperty(collator, 'collator', {value: internalCollator});
+ $Object.defineProperty(collator, '__initializedIntlObject',
+ {value: 'collator'});
+ $Object.defineProperty(collator, 'resolved', {value: resolved});
+
+ return collator;
+}
+
+
+/**
+ * Constructs Intl.Collator object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'Collator', function() {
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.Collator(locales, options);
+ }
+
+ return initializeCollator(toObject(this), locales, options);
+ },
+ DONT_ENUM
+);
+
+
+/**
+ * Collator resolvedOptions method.
+ */
+%SetProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'collator') {
+ throw new $TypeError('resolvedOptions method called on a non-object ' +
+ 'or on a object that is not Intl.Collator.');
+ }
+
+ var coll = this;
+ var locale = getOptimalLanguageTag(coll.resolved.requestedLocale,
+ coll.resolved.locale);
+
+ return {
+ locale: locale,
+ usage: coll.resolved.usage,
+ sensitivity: coll.resolved.sensitivity,
+ ignorePunctuation: coll.resolved.ignorePunctuation,
+ numeric: coll.resolved.numeric,
+ caseFirst: coll.resolved.caseFirst,
+ collation: coll.resolved.collation
+ };
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
+%FunctionRemovePrototype(Intl.Collator.prototype.resolvedOptions);
+%SetNativeFlag(Intl.Collator.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('collator', locales, %_Arguments(1));
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.Collator.supportedLocalesOf);
+%SetNativeFlag(Intl.Collator.supportedLocalesOf);
+
+
+/**
+ * When the compare method is called with two arguments x and y, it returns a
+ * Number other than NaN that represents the result of a locale-sensitive
+ * String comparison of x with y.
+ * The result is intended to order String values in the sort order specified
+ * by the effective locale and collation options computed during construction
+ * of this Collator object, and will be negative, zero, or positive, depending
+ * on whether x comes before y in the sort order, the Strings are equal under
+ * the sort order, or x comes after y in the sort order, respectively.
+ */
+function compare(collator, x, y) {
+ return %InternalCompare(collator.collator, $String(x), $String(y));
+};
+
+
+addBoundMethod(Intl.Collator, 'compare', compare, 2);
+
+/**
+ * Verifies that the input is a well-formed ISO 4217 currency code.
+ * Don't uppercase to test. It could convert invalid code into a valid one.
+ * For example \u00DFP (Eszett+P) becomes SSP.
+ */
+function isWellFormedCurrencyCode(currency) {
+ return typeof currency == "string" &&
+ currency.length == 3 &&
+ currency.match(/[^A-Za-z]/) == null;
+}
+
+
+/**
+ * Returns the valid digit count for a property, or throws RangeError on
+ * a value out of the range.
+ */
+function getNumberOption(options, property, min, max, fallback) {
+ var value = options[property];
+ if (value !== undefined) {
+ value = $Number(value);
+ if ($isNaN(value) || value < min || value > max) {
+ throw new $RangeError(property + ' value is out of range.');
+ }
+ return $floor(value);
+ }
+
+ return fallback;
+}
+
+
+/**
+ * Initializes the given object so it's a valid NumberFormat instance.
+ * Useful for subclassing.
+ */
+function initializeNumberFormat(numberFormat, locales, options) {
+ if (numberFormat.hasOwnProperty('__initializedIntlObject')) {
+ throw new $TypeError('Trying to re-initialize NumberFormat object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var getOption = getGetOption(options, 'numberformat');
+
+ var locale = resolveLocale('numberformat', locales, options);
+
+ var internalOptions = {};
+ defineWEProperty(internalOptions, 'style', getOption(
+ 'style', 'string', ['decimal', 'percent', 'currency'], 'decimal'));
+
+ var currency = getOption('currency', 'string');
+ if (currency !== undefined && !isWellFormedCurrencyCode(currency)) {
+ throw new $RangeError('Invalid currency code: ' + currency);
+ }
+
+ if (internalOptions.style === 'currency' && currency === undefined) {
+ throw new $TypeError('Currency code is required with currency style.');
+ }
+
+ var currencyDisplay = getOption(
+ 'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
+ if (internalOptions.style === 'currency') {
+ defineWEProperty(internalOptions, 'currency', currency.toUpperCase());
+ defineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
+ }
+
+ // Digit ranges.
+ var mnid = getNumberOption(options, 'minimumIntegerDigits', 1, 21, 1);
+ defineWEProperty(internalOptions, 'minimumIntegerDigits', mnid);
+
+ var mnfd = getNumberOption(options, 'minimumFractionDigits', 0, 20, 0);
+ defineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
+
+ var mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20, 3);
+ defineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
+
+ var mnsd = options['minimumSignificantDigits'];
+ var mxsd = options['maximumSignificantDigits'];
+ if (mnsd !== undefined || mxsd !== undefined) {
+ mnsd = getNumberOption(options, 'minimumSignificantDigits', 1, 21, 0);
+ defineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
+
+ mxsd = getNumberOption(options, 'maximumSignificantDigits', mnsd, 21, 21);
+ defineWEProperty(internalOptions, 'maximumSignificantDigits', mxsd);
+ }
+
+ // Grouping.
+ defineWEProperty(internalOptions, 'useGrouping', getOption(
+ 'useGrouping', 'boolean', undefined, true));
+
+ // ICU prefers options to be passed using -u- extension key/values for
+ // number format, so we need to build that.
+ var extensionMap = parseExtension(locale.extension);
+ var extension = setOptions(options, extensionMap, NUMBER_FORMAT_KEY_MAP,
+ getOption, internalOptions);
+
+ var requestedLocale = locale.locale + extension;
+ var resolved = $Object.defineProperties({}, {
+ currency: {writable: true},
+ currencyDisplay: {writable: true},
+ locale: {writable: true},
+ maximumFractionDigits: {writable: true},
+ minimumFractionDigits: {writable: true},
+ minimumIntegerDigits: {writable: true},
+ numberingSystem: {writable: true},
+ requestedLocale: {value: requestedLocale, writable: true},
+ style: {value: internalOptions.style, writable: true},
+ useGrouping: {writable: true}
+ });
+ if (internalOptions.hasOwnProperty('minimumSignificantDigits')) {
+ defineWEProperty(resolved, 'minimumSignificantDigits', undefined);
+ }
+ if (internalOptions.hasOwnProperty('maximumSignificantDigits')) {
+ defineWEProperty(resolved, 'maximumSignificantDigits', undefined);
+ }
+ var formatter = %CreateNumberFormat(requestedLocale,
+ internalOptions,
+ resolved);
+
+ // We can't get information about number or currency style from ICU, so we
+ // assume user request was fulfilled.
+ if (internalOptions.style === 'currency') {
+ $Object.defineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
+ writable: true});
+ }
+
+ $Object.defineProperty(numberFormat, 'formatter', {value: formatter});
+ $Object.defineProperty(numberFormat, 'resolved', {value: resolved});
+ $Object.defineProperty(numberFormat, '__initializedIntlObject',
+ {value: 'numberformat'});
+
+ return numberFormat;
+}
+
+
+/**
+ * Constructs Intl.NumberFormat object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'NumberFormat', function() {
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.NumberFormat(locales, options);
+ }
+
+ return initializeNumberFormat(toObject(this), locales, options);
+ },
+ DONT_ENUM
+);
+
+
+/**
+ * NumberFormat resolvedOptions method.
+ */
+%SetProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'numberformat') {
+ throw new $TypeError('resolvedOptions method called on a non-object' +
+ ' or on a object that is not Intl.NumberFormat.');
+ }
+
+ var format = this;
+ var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
+ format.resolved.locale);
+
+ var result = {
+ locale: locale,
+ numberingSystem: format.resolved.numberingSystem,
+ style: format.resolved.style,
+ useGrouping: format.resolved.useGrouping,
+ minimumIntegerDigits: format.resolved.minimumIntegerDigits,
+ minimumFractionDigits: format.resolved.minimumFractionDigits,
+ maximumFractionDigits: format.resolved.maximumFractionDigits,
+ };
+
+ if (result.style === 'currency') {
+ defineWECProperty(result, 'currency', format.resolved.currency);
+ defineWECProperty(result, 'currencyDisplay',
+ format.resolved.currencyDisplay);
+ }
+
+ if (format.resolved.hasOwnProperty('minimumSignificantDigits')) {
+ defineWECProperty(result, 'minimumSignificantDigits',
+ format.resolved.minimumSignificantDigits);
+ }
+
+ if (format.resolved.hasOwnProperty('maximumSignificantDigits')) {
+ defineWECProperty(result, 'maximumSignificantDigits',
+ format.resolved.maximumSignificantDigits);
+ }
+
+ return result;
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.NumberFormat.prototype.resolvedOptions,
+ 'resolvedOptions');
+%FunctionRemovePrototype(Intl.NumberFormat.prototype.resolvedOptions);
+%SetNativeFlag(Intl.NumberFormat.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('numberformat', locales, %_Arguments(1));
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.NumberFormat.supportedLocalesOf);
+%SetNativeFlag(Intl.NumberFormat.supportedLocalesOf);
+
+
+/**
+ * Returns a String value representing the result of calling ToNumber(value)
+ * according to the effective locale and the formatting options of this
+ * NumberFormat.
+ */
+function formatNumber(formatter, value) {
+ // Spec treats -0 and +0 as 0.
+ var number = $Number(value);
+ if (number === -0) {
+ number = 0;
+ }
+
+ return %InternalNumberFormat(formatter.formatter, number);
+}
+
+
+/**
+ * Returns a Number that represents string value that was passed in.
+ */
+function parseNumber(formatter, value) {
+ return %InternalNumberParse(formatter.formatter, $String(value));
+}
+
+
+addBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
+addBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);
+
+/**
+ * Returns a string that matches LDML representation of the options object.
+ */
+function toLDMLString(options) {
+ var getOption = getGetOption(options, 'dateformat');
+
+ var ldmlString = '';
+
+ var option = getOption('weekday', 'string', ['narrow', 'short', 'long']);
+ ldmlString += appendToLDMLString(
+ option, {narrow: 'EEEEE', short: 'EEE', long: 'EEEE'});
+
+ option = getOption('era', 'string', ['narrow', 'short', 'long']);
+ ldmlString += appendToLDMLString(
+ option, {narrow: 'GGGGG', short: 'GGG', long: 'GGGG'});
+
+ option = getOption('year', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'yy', 'numeric': 'y'});
+
+ option = getOption('month', 'string',
+ ['2-digit', 'numeric', 'narrow', 'short', 'long']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'MM', 'numeric': 'M',
+ 'narrow': 'MMMMM', 'short': 'MMM', 'long': 'MMMM'});
+
+ option = getOption('day', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(
+ option, {'2-digit': 'dd', 'numeric': 'd'});
+
+ var hr12 = getOption('hour12', 'boolean');
+ option = getOption('hour', 'string', ['2-digit', 'numeric']);
+ if (hr12 === undefined) {
+ ldmlString += appendToLDMLString(option, {'2-digit': 'jj', 'numeric': 'j'});
+ } else if (hr12 === true) {
+ ldmlString += appendToLDMLString(option, {'2-digit': 'hh', 'numeric': 'h'});
+ } else {
+ ldmlString += appendToLDMLString(option, {'2-digit': 'HH', 'numeric': 'H'});
+ }
+
+ option = getOption('minute', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'mm', 'numeric': 'm'});
+
+ option = getOption('second', 'string', ['2-digit', 'numeric']);
+ ldmlString += appendToLDMLString(option, {'2-digit': 'ss', 'numeric': 's'});
+
+ option = getOption('timeZoneName', 'string', ['short', 'long']);
+ ldmlString += appendToLDMLString(option, {short: 'v', long: 'vv'});
+
+ return ldmlString;
+}
+
+
+/**
+ * Returns either LDML equivalent of the current option or empty string.
+ */
+function appendToLDMLString(option, pairs) {
+ if (option !== undefined) {
+ return pairs[option];
+ } else {
+ return '';
+ }
+}
+
+
+/**
+ * Returns object that matches LDML representation of the date.
+ */
+function fromLDMLString(ldmlString) {
+ // First remove '' quoted text, so we lose 'Uhr' strings.
+ ldmlString = ldmlString.replace(GetQuotedStringRE(), '');
+
+ var options = {};
+ var match = ldmlString.match(/E{3,5}/g);
+ options = appendToDateTimeObject(
+ options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
+
+ match = ldmlString.match(/G{3,5}/g);
+ options = appendToDateTimeObject(
+ options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
+
+ match = ldmlString.match(/y{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'year', match, {y: 'numeric', yy: '2-digit'});
+
+ match = ldmlString.match(/M{1,5}/g);
+ options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
+ M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
+
+ // Sometimes we get L instead of M for month - standalone name.
+ match = ldmlString.match(/L{1,5}/g);
+ options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
+ L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
+
+ match = ldmlString.match(/d{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'day', match, {d: 'numeric', dd: '2-digit'});
+
+ match = ldmlString.match(/h{1,2}/g);
+ if (match !== null) {
+ options['hour12'] = true;
+ }
+ options = appendToDateTimeObject(
+ options, 'hour', match, {h: 'numeric', hh: '2-digit'});
+
+ match = ldmlString.match(/H{1,2}/g);
+ if (match !== null) {
+ options['hour12'] = false;
+ }
+ options = appendToDateTimeObject(
+ options, 'hour', match, {H: 'numeric', HH: '2-digit'});
+
+ match = ldmlString.match(/m{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'minute', match, {m: 'numeric', mm: '2-digit'});
+
+ match = ldmlString.match(/s{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'second', match, {s: 'numeric', ss: '2-digit'});
+
+ match = ldmlString.match(/v{1,2}/g);
+ options = appendToDateTimeObject(
+ options, 'timeZoneName', match, {v: 'short', vv: 'long'});
+
+ return options;
+}
+
+
+function appendToDateTimeObject(options, option, match, pairs) {
+ if (match === null) {
+ if (!options.hasOwnProperty(option)) {
+ defineWEProperty(options, option, undefined);
+ }
+ return options;
+ }
+
+ var property = match[0];
+ defineWEProperty(options, option, pairs[property]);
+
+ return options;
+}
+
+
+/**
+ * Returns options with at least default values in it.
+ */
+function toDateTimeOptions(options, required, defaults) {
+ if (options === undefined) {
+ options = null;
+ } else {
+ options = toObject(options);
+ }
+
+ options = $Object.apply(this, [options]);
+
+ var needsDefault = true;
+ if ((required === 'date' || required === 'any') &&
+ (options.weekday !== undefined || options.year !== undefined ||
+ options.month !== undefined || options.day !== undefined)) {
+ needsDefault = false;
+ }
+
+ if ((required === 'time' || required === 'any') &&
+ (options.hour !== undefined || options.minute !== undefined ||
+ options.second !== undefined)) {
+ needsDefault = false;
+ }
+
+ if (needsDefault && (defaults === 'date' || defaults === 'all')) {
+ $Object.defineProperty(options, 'year', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ $Object.defineProperty(options, 'month', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ $Object.defineProperty(options, 'day', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ }
+
+ if (needsDefault && (defaults === 'time' || defaults === 'all')) {
+ $Object.defineProperty(options, 'hour', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ $Object.defineProperty(options, 'minute', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ $Object.defineProperty(options, 'second', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ }
+
+ return options;
+}
+
+
+/**
+ * Initializes the given object so it's a valid DateTimeFormat instance.
+ * Useful for subclassing.
+ */
+function initializeDateTimeFormat(dateFormat, locales, options) {
+
+ if (dateFormat.hasOwnProperty('__initializedIntlObject')) {
+ throw new $TypeError('Trying to re-initialize DateTimeFormat object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var locale = resolveLocale('dateformat', locales, options);
+
+ options = toDateTimeOptions(options, 'any', 'date');
+
+ var getOption = getGetOption(options, 'dateformat');
+
+ // We implement only best fit algorithm, but still need to check
+ // if the formatMatcher values are in range.
+ var matcher = getOption('formatMatcher', 'string',
+ ['basic', 'best fit'], 'best fit');
+
+ // Build LDML string for the skeleton that we pass to the formatter.
+ var ldmlString = toLDMLString(options);
+
+ // Filter out supported extension keys so we know what to put in resolved
+ // section later on.
+ // We need to pass calendar and number system to the method.
+ var tz = canonicalizeTimeZoneID(options.timeZone);
+
+ // ICU prefers options to be passed using -u- extension key/values, so
+ // we need to build that.
+ var internalOptions = {};
+ var extensionMap = parseExtension(locale.extension);
+ var extension = setOptions(options, extensionMap, DATETIME_FORMAT_KEY_MAP,
+ getOption, internalOptions);
+
+ var requestedLocale = locale.locale + extension;
+ var resolved = $Object.defineProperties({}, {
+ calendar: {writable: true},
+ day: {writable: true},
+ era: {writable: true},
+ hour12: {writable: true},
+ hour: {writable: true},
+ locale: {writable: true},
+ minute: {writable: true},
+ month: {writable: true},
+ numberingSystem: {writable: true},
+ pattern: {writable: true},
+ requestedLocale: {value: requestedLocale, writable: true},
+ second: {writable: true},
+ timeZone: {writable: true},
+ timeZoneName: {writable: true},
+ tz: {value: tz, writable: true},
+ weekday: {writable: true},
+ year: {writable: true}
+ });
+
+ var formatter = %CreateDateTimeFormat(
+ requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
+
+ if (tz !== undefined && tz !== resolved.timeZone) {
+ throw new $RangeError('Unsupported time zone specified ' + tz);
+ }
+
+ $Object.defineProperty(dateFormat, 'formatter', {value: formatter});
+ $Object.defineProperty(dateFormat, 'resolved', {value: resolved});
+ $Object.defineProperty(dateFormat, '__initializedIntlObject',
+ {value: 'dateformat'});
+
+ return dateFormat;
+}
+
+
+/**
+ * Constructs Intl.DateTimeFormat object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'DateTimeFormat', function() {
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.DateTimeFormat(locales, options);
+ }
+
+ return initializeDateTimeFormat(toObject(this), locales, options);
+ },
+ DONT_ENUM
+);
+
+
+/**
+ * DateTimeFormat resolvedOptions method.
+ */
+%SetProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'dateformat') {
+ throw new $TypeError('resolvedOptions method called on a non-object or ' +
+ 'on a object that is not Intl.DateTimeFormat.');
+ }
+
+ var format = this;
+ var fromPattern = fromLDMLString(format.resolved.pattern);
+ var userCalendar = ICU_CALENDAR_MAP[format.resolved.calendar];
+ if (userCalendar === undefined) {
+ // Use ICU name if we don't have a match. It shouldn't happen, but
+ // it would be too strict to throw for this.
+ userCalendar = format.resolved.calendar;
+ }
+
+ var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
+ format.resolved.locale);
+
+ var result = {
+ locale: locale,
+ numberingSystem: format.resolved.numberingSystem,
+ calendar: userCalendar,
+ timeZone: format.resolved.timeZone
+ };
+
+ addWECPropertyIfDefined(result, 'timeZoneName', fromPattern.timeZoneName);
+ addWECPropertyIfDefined(result, 'era', fromPattern.era);
+ addWECPropertyIfDefined(result, 'year', fromPattern.year);
+ addWECPropertyIfDefined(result, 'month', fromPattern.month);
+ addWECPropertyIfDefined(result, 'day', fromPattern.day);
+ addWECPropertyIfDefined(result, 'weekday', fromPattern.weekday);
+ addWECPropertyIfDefined(result, 'hour12', fromPattern.hour12);
+ addWECPropertyIfDefined(result, 'hour', fromPattern.hour);
+ addWECPropertyIfDefined(result, 'minute', fromPattern.minute);
+ addWECPropertyIfDefined(result, 'second', fromPattern.second);
+
+ return result;
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.DateTimeFormat.prototype.resolvedOptions,
+ 'resolvedOptions');
+%FunctionRemovePrototype(Intl.DateTimeFormat.prototype.resolvedOptions);
+%SetNativeFlag(Intl.DateTimeFormat.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('dateformat', locales, %_Arguments(1));
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.DateTimeFormat.supportedLocalesOf);
+%SetNativeFlag(Intl.DateTimeFormat.supportedLocalesOf);
+
+
+/**
+ * Returns a String value representing the result of calling ToNumber(date)
+ * according to the effective locale and the formatting options of this
+ * DateTimeFormat.
+ */
+function formatDate(formatter, dateValue) {
+ var dateMs;
+ if (dateValue === undefined) {
+ dateMs = $Date.now();
+ } else {
+ dateMs = $Number(dateValue);
+ }
+
+ if (!$isFinite(dateMs)) {
+ throw new $RangeError('Provided date is not in valid range.');
+ }
+
+ return %InternalDateFormat(formatter.formatter, new $Date(dateMs));
+}
+
+
+/**
+ * Returns a Date object representing the result of calling ToString(value)
+ * according to the effective locale and the formatting options of this
+ * DateTimeFormat.
+ * Returns undefined if date string cannot be parsed.
+ */
+function parseDate(formatter, value) {
+ return %InternalDateParse(formatter.formatter, $String(value));
+}
+
+
+// 0 because date is optional argument.
+addBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
+addBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
+
+
+/**
+ * Returns canonical Area/Location name, or throws an exception if the zone
+ * name is invalid IANA name.
+ */
+function canonicalizeTimeZoneID(tzID) {
+ // Skip undefined zones.
+ if (tzID === undefined) {
+ return tzID;
+ }
+
+ // Special case handling (UTC, GMT).
+ var upperID = tzID.toUpperCase();
+ if (upperID === 'UTC' || upperID === 'GMT' ||
+ upperID === 'ETC/UTC' || upperID === 'ETC/GMT') {
+ return 'UTC';
+ }
+
+ // We expect only _ and / beside ASCII letters.
+ // All inputs should conform to Area/Location from now on.
+ var match = GetTimezoneNameCheckRE().exec(tzID);
+ if (match === null) {
+ throw new $RangeError('Expected Area/Location for time zone, got ' + tzID);
+ }
+
+ var result = toTitleCaseWord(match[1]) + '/' + toTitleCaseWord(match[2]);
+ var i = 3;
+ while (match[i] !== undefined && i < match.length) {
+ result = result + '_' + toTitleCaseWord(match[i]);
+ i++;
+ }
+
+ return result;
+}
+
+/**
+ * Initializes the given object so it's a valid BreakIterator instance.
+ * Useful for subclassing.
+ */
+function initializeBreakIterator(iterator, locales, options) {
+ if (iterator.hasOwnProperty('__initializedIntlObject')) {
+ throw new $TypeError('Trying to re-initialize v8BreakIterator object.');
+ }
+
+ if (options === undefined) {
+ options = {};
+ }
+
+ var getOption = getGetOption(options, 'breakiterator');
+
+ var internalOptions = {};
+
+ defineWEProperty(internalOptions, 'type', getOption(
+ 'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
+
+ var locale = resolveLocale('breakiterator', locales, options);
+ var resolved = $Object.defineProperties({}, {
+ requestedLocale: {value: locale.locale, writable: true},
+ type: {value: internalOptions.type, writable: true},
+ locale: {writable: true}
+ });
+
+ var internalIterator = %CreateBreakIterator(locale.locale,
+ internalOptions,
+ resolved);
+
+ $Object.defineProperty(iterator, 'iterator', {value: internalIterator});
+ $Object.defineProperty(iterator, 'resolved', {value: resolved});
+ $Object.defineProperty(iterator, '__initializedIntlObject',
+ {value: 'breakiterator'});
+
+ return iterator;
+}
+
+
+/**
+ * Constructs Intl.v8BreakIterator object given optional locales and options
+ * parameters.
+ *
+ * @constructor
+ */
+%SetProperty(Intl, 'v8BreakIterator', function() {
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+
+ if (!this || this === Intl) {
+ // Constructor is called as a function.
+ return new Intl.v8BreakIterator(locales, options);
+ }
+
+ return initializeBreakIterator(toObject(this), locales, options);
+ },
+ DONT_ENUM
+);
+
+
+/**
+ * BreakIterator resolvedOptions method.
+ */
+%SetProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions', function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!this || typeof this !== 'object' ||
+ this.__initializedIntlObject !== 'breakiterator') {
+ throw new $TypeError('resolvedOptions method called on a non-object or ' +
+ 'on a object that is not Intl.v8BreakIterator.');
+ }
+
+ var segmenter = this;
+ var locale = getOptimalLanguageTag(segmenter.resolved.requestedLocale,
+ segmenter.resolved.locale);
+
+ return {
+ locale: locale,
+ type: segmenter.resolved.type
+ };
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.v8BreakIterator.prototype.resolvedOptions,
+ 'resolvedOptions');
+%FunctionRemovePrototype(Intl.v8BreakIterator.prototype.resolvedOptions);
+%SetNativeFlag(Intl.v8BreakIterator.prototype.resolvedOptions);
+
+
+/**
+ * Returns the subset of the given locale list for which this locale list
+ * has a matching (possibly fallback) locale. Locales appear in the same
+ * order in the returned list as in the input list.
+ * Options are optional parameter.
+ */
+%SetProperty(Intl.v8BreakIterator, 'supportedLocalesOf', function(locales) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ return supportedLocalesOf('breakiterator', locales, %_Arguments(1));
+ },
+ DONT_ENUM
+);
+%FunctionSetName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionRemovePrototype(Intl.v8BreakIterator.supportedLocalesOf);
+%SetNativeFlag(Intl.v8BreakIterator.supportedLocalesOf);
+
+
+/**
+ * Adopts text to segment using the iterator. Old text, if present,
+ * gets discarded.
+ */
+function adoptText(iterator, text) {
+ %BreakIteratorAdoptText(iterator.iterator, $String(text));
+}
+
+
+/**
+ * Returns index of the first break in the string and moves current pointer.
+ */
+function first(iterator) {
+ return %BreakIteratorFirst(iterator.iterator);
+}
+
+
+/**
+ * Returns the index of the next break and moves the pointer.
+ */
+function next(iterator) {
+ return %BreakIteratorNext(iterator.iterator);
+}
+
+
+/**
+ * Returns index of the current break.
+ */
+function current(iterator) {
+ return %BreakIteratorCurrent(iterator.iterator);
+}
+
+
+/**
+ * Returns type of the current break.
+ */
+function breakType(iterator) {
+ return %BreakIteratorBreakType(iterator.iterator);
+}
+
+
+addBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
+addBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
+addBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
+addBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
+addBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);
+
+// Save references to Intl objects and methods we use, for added security.
+var savedObjects = {
+ 'collator': Intl.Collator,
+ 'numberformat': Intl.NumberFormat,
+ 'dateformatall': Intl.DateTimeFormat,
+ 'dateformatdate': Intl.DateTimeFormat,
+ 'dateformattime': Intl.DateTimeFormat
+};
+
+
+// Default (created with undefined locales and options parameters) collator,
+// number and date format instances. They'll be created as needed.
+var defaultObjects = {
+ 'collator': undefined,
+ 'numberformat': undefined,
+ 'dateformatall': undefined,
+ 'dateformatdate': undefined,
+ 'dateformattime': undefined,
+};
+
+
+/**
+ * Returns cached or newly created instance of a given service.
+ * We cache only default instances (where no locales or options are provided).
+ */
+function cachedOrNewService(service, locales, options, defaults) {
+ var useOptions = (defaults === undefined) ? options : defaults;
+ if (locales === undefined && options === undefined) {
+ if (defaultObjects[service] === undefined) {
+ defaultObjects[service] = new savedObjects[service](locales, useOptions);
+ }
+ return defaultObjects[service];
+ }
+ return new savedObjects[service](locales, useOptions);
+}
+
+
+/**
+ * Compares this and that, and returns less than 0, 0 or greater than 0 value.
+ * Overrides the built-in method.
+ */
+$Object.defineProperty($String.prototype, 'localeCompare', {
+ value: function(that) {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (this === undefined || this === null) {
+ throw new $TypeError('Method invoked on undefined or null value.');
+ }
+
+ var locales = %_Arguments(1);
+ var options = %_Arguments(2);
+ var collator = cachedOrNewService('collator', locales, options);
+ return compare(collator, this, that);
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($String.prototype.localeCompare, 'localeCompare');
+%FunctionRemovePrototype($String.prototype.localeCompare);
+%SetNativeFlag($String.prototype.localeCompare);
+
+
+/**
+ * Formats a Number object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used.
+ */
+$Object.defineProperty($Number.prototype, 'toLocaleString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ if (!(this instanceof $Number) && typeof(this) !== 'number') {
+ throw new $TypeError('Method invoked on an object that is not Number.');
+ }
+
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+ var numberFormat = cachedOrNewService('numberformat', locales, options);
+ return formatNumber(numberFormat, this);
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($Number.prototype.toLocaleString, 'toLocaleString');
+%FunctionRemovePrototype($Number.prototype.toLocaleString);
+%SetNativeFlag($Number.prototype.toLocaleString);
+
+
+/**
+ * Returns actual formatted date or fails if date parameter is invalid.
+ */
+function toLocaleDateTime(date, locales, options, required, defaults, service) {
+ if (!(date instanceof $Date)) {
+ throw new $TypeError('Method invoked on an object that is not Date.');
+ }
+
+ if ($isNaN(date)) {
+ return 'Invalid Date';
+ }
+
+ var internalOptions = toDateTimeOptions(options, required, defaults);
+
+ var dateFormat =
+ cachedOrNewService(service, locales, options, internalOptions);
+
+ return formatDate(dateFormat, date);
+}
+
+
+/**
+ * Formats a Date object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used - both date and time are
+ * present in the output.
+ */
+$Object.defineProperty($Date.prototype, 'toLocaleString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+ return toLocaleDateTime(
+ this, locales, options, 'any', 'all', 'dateformatall');
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($Date.prototype.toLocaleString, 'toLocaleString');
+%FunctionRemovePrototype($Date.prototype.toLocaleString);
+%SetNativeFlag($Date.prototype.toLocaleString);
+
+
+/**
+ * Formats a Date object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used - only date is present
+ * in the output.
+ */
+$Object.defineProperty($Date.prototype, 'toLocaleDateString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+ return toLocaleDateTime(
+ this, locales, options, 'date', 'date', 'dateformatdate');
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($Date.prototype.toLocaleDateString, 'toLocaleDateString');
+%FunctionRemovePrototype($Date.prototype.toLocaleDateString);
+%SetNativeFlag($Date.prototype.toLocaleDateString);
+
+
+/**
+ * Formats a Date object (this) using locale and options values.
+ * If locale or options are omitted, defaults are used - only time is present
+ * in the output.
+ */
+$Object.defineProperty($Date.prototype, 'toLocaleTimeString', {
+ value: function() {
+ if (%_IsConstructCall()) {
+ throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
+ }
+
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
+ return toLocaleDateTime(
+ this, locales, options, 'time', 'time', 'dateformattime');
+ },
+ writable: true,
+ configurable: true,
+ enumerable: false
+});
+%FunctionSetName($Date.prototype.toLocaleTimeString, 'toLocaleTimeString');
+%FunctionRemovePrototype($Date.prototype.toLocaleTimeString);
+%SetNativeFlag($Date.prototype.toLocaleTimeString);
+
+return Intl;
+}())});
diff --git a/chromium/v8/src/ia32/assembler-ia32-inl.h b/chromium/v8/src/ia32/assembler-ia32-inl.h
index b6ef242a2c4..5a35b207f72 100644
--- a/chromium/v8/src/ia32/assembler-ia32-inl.h
+++ b/chromium/v8/src/ia32/assembler-ia32-inl.h
@@ -251,7 +251,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
@@ -266,12 +266,11 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (IsRuntimeEntry(mode)) {
@@ -329,14 +328,11 @@ Immediate::Immediate(Label* internal_offset) {
Immediate::Immediate(Handle<Object> handle) {
-#ifdef DEBUG
- Isolate* isolate = Isolate::Current();
-#endif
AllowDeferredHandleDereference using_raw_address;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!isolate->heap()->InNewSpace(obj));
if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
x_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
diff --git a/chromium/v8/src/ia32/assembler-ia32.cc b/chromium/v8/src/ia32/assembler-ia32.cc
index e0ae0066552..e5456da4746 100644
--- a/chromium/v8/src/ia32/assembler-ia32.cc
+++ b/chromium/v8/src/ia32/assembler-ia32.cc
@@ -101,80 +101,28 @@ void CpuFeatures::Probe() {
return; // No features if we might serialize.
}
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
+ uint64_t probed_features = 0;
+ CPU cpu;
+ if (cpu.has_sse41()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE4_1;
}
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
+ if (cpu.has_sse3()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE3;
}
-
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old esp, since we are going to modify the stack.
- __ push(ebp);
- __ pushfd();
- __ push(ecx);
- __ push(ebx);
- __ mov(ebp, esp);
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfd();
- __ pop(eax);
- __ mov(edx, eax);
- __ xor_(eax, 0x200000); // Flip bit 21.
- __ push(eax);
- __ popfd();
- __ pushfd();
- __ pop(eax);
- __ xor_(eax, edx); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in edx:eax.
- __ xor_(eax, eax);
- __ xor_(edx, edx);
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ mov(eax, 1);
- supported_ = (1 << CPUID);
- { CpuFeatureScope fscope(&assm, CPUID);
- __ cpuid();
+ if (cpu.has_sse2()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE2;
}
- supported_ = 0;
-
- // Move the result from ecx:edx to edx:eax and make sure to mark the
- // CPUID feature as supported.
- __ mov(eax, edx);
- __ or_(eax, 1 << CPUID);
- __ mov(edx, ecx);
-
- // Done.
- __ bind(&done);
- __ mov(esp, ebp);
- __ pop(ebx);
- __ pop(ecx);
- __ popfd();
- __ pop(ebp);
- __ ret(0);
-#undef __
-
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
- uint64_t probed_features = probe();
+ if (cpu.has_cmov()) {
+ probed_features |= static_cast<uint64_t>(1) << CMOV;
+ }
+
+ // SAHF must be available in compat/legacy mode.
+ ASSERT(cpu.has_sahf());
+ probed_features |= static_cast<uint64_t>(1) << SAHF;
+
uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
supported_ = probed_features | platform_features;
found_by_runtime_probing_only_ = probed_features & ~platform_features;
-
- delete memory;
}
@@ -474,7 +422,6 @@ void Assembler::CodeTargetAlign() {
void Assembler::cpuid() {
- ASSERT(IsEnabled(CPUID));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xA2);
@@ -1227,6 +1174,10 @@ void Assembler::test_b(Register reg, const Operand& op) {
void Assembler::test(const Operand& op, const Immediate& imm) {
+ if (op.is_reg_only()) {
+ test(op.reg(), imm);
+ return;
+ }
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(eax, op);
@@ -1306,14 +1257,6 @@ void Assembler::nop() {
}
-void Assembler::rdtsc() {
- ASSERT(IsEnabled(RDTSC));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x31);
-}
-
-
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
ASSERT(is_uint16(imm16));
@@ -1637,6 +1580,13 @@ void Assembler::fstp_s(const Operand& adr) {
}
+void Assembler::fst_s(const Operand& adr) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ emit_operand(edx, adr);
+}
+
+
void Assembler::fstp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDD);
@@ -1771,12 +1721,24 @@ void Assembler::fadd(int i) {
}
+void Assembler::fadd_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xC0, i);
+}
+
+
void Assembler::fsub(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xE8, i);
}
+void Assembler::fsub_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xE0, i);
+}
+
+
void Assembler::fisub_s(const Operand& adr) {
EnsureSpace ensure_space(this);
EMIT(0xDA);
@@ -1802,6 +1764,12 @@ void Assembler::fdiv(int i) {
}
+void Assembler::fdiv_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xF0, i);
+}
+
+
void Assembler::faddp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xC0, i);
diff --git a/chromium/v8/src/ia32/assembler-ia32.h b/chromium/v8/src/ia32/assembler-ia32.h
index 8380897f6f1..55eff931907 100644
--- a/chromium/v8/src/ia32/assembler-ia32.h
+++ b/chromium/v8/src/ia32/assembler-ia32.h
@@ -183,6 +183,7 @@ const IntelDoubleRegister double_register_4 = { 4 };
const IntelDoubleRegister double_register_5 = { 5 };
const IntelDoubleRegister double_register_6 = { 6 };
const IntelDoubleRegister double_register_7 = { 7 };
+const IntelDoubleRegister no_double_reg = { -1 };
struct XMMRegister : IntelDoubleRegister {
@@ -227,6 +228,7 @@ struct XMMRegister : IntelDoubleRegister {
#define xmm5 (static_cast<const XMMRegister&>(double_register_5))
#define xmm6 (static_cast<const XMMRegister&>(double_register_6))
#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
+#define no_xmm_reg (static_cast<const XMMRegister&>(no_double_reg))
struct X87Register : IntelDoubleRegister {
@@ -537,7 +539,6 @@ class CpuFeatures : public AllStatic {
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
@@ -870,7 +871,6 @@ class Assembler : public AssemblerBase {
void hlt();
void int3();
void nop();
- void rdtsc();
void ret(int imm16);
// Label operations & relative jumps (PPUM Appendix D)
@@ -929,6 +929,7 @@ class Assembler : public AssemblerBase {
void fld_d(const Operand& adr);
void fstp_s(const Operand& adr);
+ void fst_s(const Operand& adr);
void fstp_d(const Operand& adr);
void fst_d(const Operand& adr);
@@ -955,10 +956,13 @@ class Assembler : public AssemblerBase {
void fninit();
void fadd(int i);
+ void fadd_i(int i);
void fsub(int i);
+ void fsub_i(int i);
void fmul(int i);
void fmul_i(int i);
void fdiv(int i);
+ void fdiv_i(int i);
void fisub_s(const Operand& adr);
diff --git a/chromium/v8/src/ia32/builtins-ia32.cc b/chromium/v8/src/ia32/builtins-ia32.cc
index b90a17f6c38..a1597481aa6 100644
--- a/chromium/v8/src/ia32/builtins-ia32.cc
+++ b/chromium/v8/src/ia32/builtins-ia32.cc
@@ -74,6 +74,24 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function.
+ __ push(edi);
+ // Push call kind information.
+ __ push(ecx);
+ // Function is also the parameter to the runtime call.
+ __ push(edi);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
+ __ pop(edi);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
@@ -83,56 +101,29 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
__ jmp(eax);
-}
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
- // Tear down internal frame.
- }
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -241,7 +232,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
if (FLAG_debug_code) {
__ cmp(esi, edi);
__ Assert(less_equal,
- "Unexpected number of pre-allocated property fields.");
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(ecx, esi, edx);
__ mov(edx, factory->one_pointer_filler_map());
@@ -272,7 +263,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ sub(edx, ecx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
+ __ Assert(positive, kPropertyAllocationCountFailed);
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
@@ -519,25 +510,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
__ jmp(eax);
@@ -545,25 +518,7 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
__ jmp(eax);
@@ -654,7 +609,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ ret(2 * kPointerSize); // Remove state, eax.
__ bind(&not_tos_eax);
- __ Abort("no cases left");
+ __ Abort(kNoCasesLeft);
}
@@ -1033,9 +988,9 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for InternalArray function");
+ __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for InternalArray function");
+ __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -1062,9 +1017,9 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for Array function");
+ __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for Array function");
+ __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
@@ -1092,7 +1047,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
- __ Assert(equal, "Unexpected String function");
+ __ Assert(equal, kUnexpectedStringFunction);
}
// Load the first argument into eax and get rid of the rest
@@ -1137,9 +1092,9 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
JSValue::kSize >> kPointerSizeLog2);
- __ Assert(equal, "Unexpected string wrapper instance size");
+ __ Assert(equal, kUnexpectedStringWrapperInstanceSize);
__ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
- __ Assert(equal, "Unexpected unused properties of string wrapper");
+ __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
}
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
@@ -1327,32 +1282,47 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Lookup and calculate pc offset.
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+ __ mov(ebx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+ __ sub(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ sub(edx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+ __ SmiTag(edx);
+
+ // Pass both function and pc offset as arguments.
__ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ push(edx);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
Label skip;
- __ cmp(eax, Immediate(Smi::FromInt(-1)));
+ // If the code object is null, just return to the unoptimized code.
+ __ cmp(eax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(eax);
- __ push(eax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
+
+ // Load deoptimization data from the code object.
+ __ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ __ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ __ SmiUntag(ebx);
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ __ lea(eax, Operand(eax, ebx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+ // Overwrite the return address on the stack.
+ __ mov(Operand(esp, 0), eax);
+
+ // And "return" to the OSR entry point of the function.
+ __ ret(0);
}
diff --git a/chromium/v8/src/ia32/code-stubs-ia32.cc b/chromium/v8/src/ia32/code-stubs-ia32.cc
index 5789f49216f..a83c1ae91d1 100644
--- a/chromium/v8/src/ia32/code-stubs-ia32.cc
+++ b/chromium/v8/src/ia32/code-stubs-ia32.cc
@@ -43,6 +43,17 @@ namespace v8 {
namespace internal {
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ebx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -250,17 +261,6 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
}
-void UnaryOpStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { eax };
- descriptor->register_param_count_ = 1;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(UnaryOpIC_Miss);
-}
-
-
void StoreGlobalStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -310,133 +310,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in esi.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
- __ Allocate(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1);
-
- // Get the function info from the stack.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
-
- int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, Operand(ecx, Context::SlotOffset(map_index)));
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ebx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- Factory* factory = masm->isolate()->factory();
- __ mov(ebx, Immediate(factory->empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(factory->the_hole_value()));
- __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
- __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
- __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ test(ebx, ebx);
- __ j(not_zero, &check_optimized, Label::kNear);
- }
- __ bind(&install_unoptimized);
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
- Immediate(factory->undefined_value()));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
-
- // ecx holds native context, ebx points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // Map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into edx.
- __ mov(edx, FieldOperand(ebx, SharedFunctionInfo::kFirstCodeSlot));
- __ cmp(ecx, FieldOperand(ebx, SharedFunctionInfo::kFirstContextSlot));
- __ j(equal, &install_optimized);
-
- // Iterate through the rest of map backwards. edx holds an index as a Smi.
- Label loop;
- Label restore;
- __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
- __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
- __ j(equal, &restore);
- __ sub(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
- __ j(not_equal, &loop, Label::kNear);
- // Hit: fetch the optimized code.
- __ mov(edx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 1));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
-
- // TODO(fschneider): Idea: store proper code pointers in the optimized code
- // map and either unmangle them on marking or do nothing as the whole map is
- // discarded on major GC anyway.
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
-
- // Now link a function into a list of optimized functions.
- __ mov(edx, ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), edx);
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ mov(ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST), eax);
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(edx, eax);
- __ RecordWriteContextSlot(
- ecx,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- edx,
- ebx,
- kDontSaveFPRegs);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&restore);
- // Restore SharedFunctionInfo into edx.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ jmp(&install_unoptimized);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(ecx); // Temporarily remove return address.
- __ pop(edx);
- __ push(esi);
- __ push(edx);
- __ push(Immediate(factory->false_value()));
- __ push(ecx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
@@ -511,9 +384,8 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
Label after_sentinel;
__ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
__ cmp(ecx, 0);
- __ Assert(equal, message);
+ __ Assert(equal, kExpected0AsASmiSentinel);
}
__ mov(ecx, GlobalObjectOperand());
__ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
@@ -625,11 +497,6 @@ class FloatingPointHelper : public AllStatic {
BinaryOpIC::TypeInfo right_type,
Label* operand_conversion_failure);
- // Assumes that operands are smis or heap numbers and loads them
- // into xmm0 and xmm1. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm);
-
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
@@ -791,18 +658,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-// Uses SSE2 to convert the heap number in |source| to an integer. Jumps to
-// |conversion_failure| if the heap number did not contain an int32 value.
-// Result is in ecx. Trashes ebx, xmm0, and xmm1.
-static void ConvertHeapNumberToInt32(MacroAssembler* masm,
- Register source,
- Label* conversion_failure) {
- __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset));
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, conversion_failure, xmm0, ecx, ebx, xmm1);
-}
-
-
void BinaryOpStub::Initialize() {
platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
}
@@ -2403,16 +2258,7 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(
__ cmp(ebx, factory->heap_number_map());
__ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the edx heap number in ecx.
- if (left_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- ConvertHeapNumberToInt32(masm, edx, conversion_failure);
- } else {
- DoubleToIStub stub(edx, ecx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- }
- __ mov(edx, ecx);
+ __ TruncateHeapNumberToI(edx, edx);
// Here edx has the untagged integer, eax has a Smi or a heap number.
__ bind(&load_arg2);
@@ -2441,14 +2287,7 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the eax heap number in ecx.
- if (right_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- ConvertHeapNumberToInt32(masm, eax, conversion_failure);
- } else {
- DoubleToIStub stub(eax, ecx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- }
+ __ TruncateHeapNumberToI(ecx, eax);
__ bind(&done);
__ mov(eax, edx);
@@ -2473,33 +2312,6 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
}
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
- Label load_smi_edx, load_eax, load_smi_eax, done;
- // Load operand in edx into xmm0.
- __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- __ bind(&load_eax);
- // Load operand in eax into xmm1.
- __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
-
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
-
- __ bind(&done);
-}
-
-
void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
@@ -2702,16 +2514,16 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
if (exponent_type_ != INTEGER) {
- Label fast_power;
- // Detect integer exponents stored as double.
- __ cvttsd2si(exponent, Operand(double_exponent));
+ Label fast_power, try_arithmetic_simplification;
+ __ DoubleToI(exponent, double_exponent, double_scratch,
+ TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+ __ jmp(&int_exponent);
+
+ __ bind(&try_arithmetic_simplification);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
+ __ cvttsd2si(exponent, Operand(double_exponent));
__ cmp(exponent, Immediate(0x80000000u));
__ j(equal, &call_runtime);
- __ cvtsi2sd(double_scratch, exponent);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_exponent, double_scratch);
- __ j(equal, &int_exponent);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -3469,9 +3281,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
__ test(ecx, Immediate(kSmiTagMask));
- __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
+ __ Check(not_zero, kUnexpectedTypeForRegExpDataFixedArrayExpected);
__ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected);
}
// ecx: RegExp data (FixedArray)
@@ -3831,7 +3643,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ test_b(ebx, kIsIndirectStringMask);
- __ Assert(zero, "external string expected, but not found");
+ __ Assert(zero, kExternalStringExpectedButNotFound);
}
__ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
@@ -3968,11 +3780,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register scratch = scratch2;
// Load the number string cache.
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
- __ mov(number_string_cache,
- Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
+ __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
@@ -4326,7 +4134,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
edi);
}
#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
+ __ Abort(kUnexpectedFallThroughFromStringComparison);
#endif
__ bind(&check_unequal_objects);
@@ -4393,16 +4201,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
@@ -4467,6 +4265,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(eax);
__ push(eax);
__ push(edi);
__ push(ebx);
@@ -4477,6 +4276,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ pop(ebx);
__ pop(edi);
__ pop(eax);
+ __ SmiUntag(eax);
}
__ jmp(&done);
@@ -4630,8 +4430,8 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
+ return (!save_doubles_ || isolate->fp_stubs_generated()) &&
result_size_ == 1;
}
@@ -5022,9 +4822,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
-
ASSERT_EQ(object.code(), InstanceofStub::left().code());
ASSERT_EQ(function.code(), InstanceofStub::right().code());
@@ -5044,18 +4841,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (!HasCallSiteInlineCheck()) {
// Look up the function and the map in the instanceof cache.
Label miss;
- __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ cmp(function, Operand::StaticArray(scratch,
- times_pointer_size,
- roots_array_start));
+ __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
__ j(not_equal, &miss, Label::kNear);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ cmp(map, Operand::StaticArray(
- scratch, times_pointer_size, roots_array_start));
+ __ CompareRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
__ j(not_equal, &miss, Label::kNear);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(eax, Operand::StaticArray(
- scratch, times_pointer_size, roots_array_start));
+ __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&miss);
}
@@ -5070,12 +4860,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Update the global instanceof or call site inlined cache with the current
// map and function. The cached answer will be set when it is known below.
if (!HasCallSiteInlineCheck()) {
- __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
- map);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
- function);
+ __ StoreRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
+ __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
} else {
// The constants for the code patching are based on no push instructions
// at the call site.
@@ -5085,9 +4871,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
- __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp1);
__ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
- __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp2);
}
__ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
__ mov(Operand(scratch, 0), map);
@@ -5109,10 +4895,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&is_instance);
if (!HasCallSiteInlineCheck()) {
- __ Set(eax, Immediate(0));
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(scratch,
- times_pointer_size, roots_array_start), eax);
+ __ mov(eax, Immediate(0));
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->true_value());
@@ -5120,7 +4904,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
@@ -5131,10 +4915,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&is_not_instance);
if (!HasCallSiteInlineCheck()) {
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(
- scratch, times_pointer_size, roots_array_start), eax);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->false_value());
@@ -5142,7 +4924,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
@@ -5255,7 +5037,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
// Index is not a smi.
__ bind(&index_not_smi_);
@@ -5305,7 +5087,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
@@ -5340,7 +5122,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
@@ -5352,7 +5134,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
@@ -5733,7 +5515,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ j(below, &done);
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
NumberToStringStub::GenerateLookupNumberStringCache(masm,
@@ -5741,22 +5522,9 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch1,
scratch2,
scratch3,
- &not_cached);
+ slow);
__ mov(arg, scratch1);
__ mov(Operand(esp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(zero, slow);
- __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ mov(Operand(esp, stack_offset), arg);
-
__ bind(&done);
}
@@ -5887,11 +5655,7 @@ void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
// Load the string table.
Register string_table = c2;
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kStringTableRootIndex));
- __ mov(string_table,
- Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
+ __ LoadRoot(string_table, Heap::kStringTableRootIndex);
// Calculate capacity mask from the string table capacity.
Register mask = scratch2;
@@ -5979,12 +5743,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register scratch) {
// hash = (seed + character) + ((seed + character) << 10);
if (Serializer::enabled()) {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
- __ mov(scratch, Operand::StaticArray(scratch,
- times_pointer_size,
- roots_array_start));
+ __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
__ SmiUntag(scratch);
__ add(scratch, character);
__ mov(hash, scratch);
@@ -7039,8 +6798,6 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in CompileArrayPushCall.
{ REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
{ REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal and CallFunctionStub.
- { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField and
// KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
@@ -7074,7 +6831,7 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7468,96 +7225,128 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(edx, kind);
- __ j(not_equal, &next);
- T stub(kind);
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(edx, kind);
+ __ j(not_equal, &next);
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort("Unexpected ElementsKind in array constructor");
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
- // ebx - type info cell
- // edx - kind
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // ebx - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // edx - kind (if mode != DISABLE_ALLOCATION_SITES)
// eax - number of arguments
// edi - constructor?
// esp[0] - return address
// esp[4] - last argument
- ASSERT(FAST_SMI_ELEMENTS == 0);
- ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ASSERT(FAST_ELEMENTS == 2);
- ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
- // is the low bit set? If so, we are holey and that is good.
- __ test_b(edx, 1);
Label normal_sequence;
- __ j(not_zero, &normal_sequence);
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ test_b(edx, 1);
+ __ j(not_zero, &normal_sequence);
+ }
// look at the first argument
__ mov(ecx, Operand(esp, kPointerSize));
__ test(ecx, ecx);
__ j(zero, &normal_sequence);
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
- __ inc(edx);
- __ cmp(ebx, Immediate(undefined_sentinel));
- __ j(equal, &normal_sequence);
- __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
- __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
- __ j(not_equal, &normal_sequence);
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
- // Save the resulting elements kind in type info
- __ SmiTag(edx);
- __ mov(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset), edx);
- __ SmiUntag(edx);
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
- __ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmp(edx, kind);
- __ j(not_equal, &next);
- ArraySingleArgumentConstructorStub stub(kind);
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry.
+ __ inc(edx);
+ __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ Assert(equal, kExpectedAllocationSiteInCell);
+ }
+
+ // Save the resulting elements kind in type info
+ __ SmiTag(edx);
+ __ mov(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset), edx);
+ __ SmiUntag(edx);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmp(edx, kind);
+ __ j(not_equal, &next);
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort("Unexpected ElementsKind in array constructor");
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -7590,6 +7379,34 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
}
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ test(eax, eax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc (only if argument_count_ == ANY)
@@ -7610,9 +7427,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for Array function");
+ __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for Array function");
+ __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in ebx or a valid cell
Label okay_here;
@@ -7620,54 +7437,26 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &okay_here);
__ cmp(FieldOperand(ebx, 0), Immediate(cell_map));
- __ Assert(equal, "Expected property cell in register ebx");
+ __ Assert(equal, kExpectedPropertyCellInRegisterEbx);
__ bind(&okay_here);
}
- Label no_info, switch_ready;
- // Get the elements kind and case on that.
+ Label no_info;
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
-
- // The type cell may have undefined in its value.
- __ cmp(edx, Immediate(undefined_sentinel));
- __ j(equal, &no_info);
-
- // The type cell has either an AllocationSite or a JSFunction
__ cmp(FieldOperand(edx, 0), Immediate(Handle<Map>(
masm->isolate()->heap()->allocation_site_map())));
__ j(not_equal, &no_info);
__ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(edx);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ mov(edx, Immediate(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ test(eax, eax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-
- __ bind(&not_zero_case);
- __ cmp(eax, 1);
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm);
-
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
}
@@ -7724,9 +7513,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for Array function");
+ __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for Array function");
+ __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -7745,7 +7534,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ j(equal, &done);
__ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
__ Assert(equal,
- "Invalid ElementsKind for InternalArray or InternalPackedArray");
+ kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
diff --git a/chromium/v8/src/ia32/code-stubs-ia32.h b/chromium/v8/src/ia32/code-stubs-ia32.h
index e80acc6ccfc..5c8eca37b5b 100644
--- a/chromium/v8/src/ia32/code-stubs-ia32.h
+++ b/chromium/v8/src/ia32/code-stubs-ia32.h
@@ -74,7 +74,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -327,7 +327,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
diff --git a/chromium/v8/src/ia32/codegen-ia32.cc b/chromium/v8/src/ia32/codegen-ia32.cc
index f488718dc6d..84a4d238bd4 100644
--- a/chromium/v8/src/ia32/codegen-ia32.cc
+++ b/chromium/v8/src/ia32/codegen-ia32.cc
@@ -177,11 +177,6 @@ UnaryMathFunction CreateSqrtFunction() {
#undef __
#define __ ACCESS_MASM(masm)
-// Keep around global pointers to these objects so that Valgrind won't complain.
-static size_t* medium_handlers = NULL;
-static size_t* small_handlers = NULL;
-
-
enum Direction { FORWARD, BACKWARD };
enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
@@ -253,12 +248,24 @@ void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
#define __ masm.
+class LabelConverter {
+ public:
+ explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
+ int32_t address(Label* l) const {
+ return reinterpret_cast<int32_t>(buffer_) + l->pos();
+ }
+ private:
+ byte* buffer_;
+};
+
+
OS::MemMoveFunction CreateMemMoveFunction() {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return NULL;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ LabelConverter conv(buffer);
// Generated code is put into a fixed, unmovable buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
@@ -452,7 +459,7 @@ OS::MemMoveFunction CreateMemMoveFunction() {
// Special handlers for 9 <= copy_size < 64. No assumptions about
// alignment or move distance, so all reads must be unaligned and
// must happen before any writes.
- Label f9_16, f17_32, f33_48, f49_63;
+ Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
__ bind(&f9_16);
__ movdbl(xmm0, Operand(src, 0));
@@ -488,11 +495,11 @@ OS::MemMoveFunction CreateMemMoveFunction() {
__ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
MemMoveEmitPopAndReturn(&masm);
- medium_handlers = new size_t[4];
- medium_handlers[0] = reinterpret_cast<intptr_t>(buffer) + f9_16.pos();
- medium_handlers[1] = reinterpret_cast<intptr_t>(buffer) + f17_32.pos();
- medium_handlers[2] = reinterpret_cast<intptr_t>(buffer) + f33_48.pos();
- medium_handlers[3] = reinterpret_cast<intptr_t>(buffer) + f49_63.pos();
+ __ bind(&medium_handlers);
+ __ dd(conv.address(&f9_16));
+ __ dd(conv.address(&f17_32));
+ __ dd(conv.address(&f33_48));
+ __ dd(conv.address(&f49_63));
__ bind(&medium_size); // Entry point into this block.
__ mov(eax, count);
@@ -505,13 +512,12 @@ OS::MemMoveFunction CreateMemMoveFunction() {
__ int3();
__ bind(&ok);
}
- __ mov(eax, Operand(eax, times_4,
- reinterpret_cast<intptr_t>(medium_handlers)));
+ __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
__ jmp(eax);
}
{
// Specialized copiers for copy_size <= 8 bytes.
- Label f0, f1, f2, f3, f4, f5_8;
+ Label small_handlers, f0, f1, f2, f3, f4, f5_8;
__ bind(&f0);
MemMoveEmitPopAndReturn(&masm);
@@ -544,16 +550,16 @@ OS::MemMoveFunction CreateMemMoveFunction() {
__ mov(Operand(dst, count, times_1, -4), edx);
MemMoveEmitPopAndReturn(&masm);
- small_handlers = new size_t[9];
- small_handlers[0] = reinterpret_cast<intptr_t>(buffer) + f0.pos();
- small_handlers[1] = reinterpret_cast<intptr_t>(buffer) + f1.pos();
- small_handlers[2] = reinterpret_cast<intptr_t>(buffer) + f2.pos();
- small_handlers[3] = reinterpret_cast<intptr_t>(buffer) + f3.pos();
- small_handlers[4] = reinterpret_cast<intptr_t>(buffer) + f4.pos();
- small_handlers[5] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
- small_handlers[6] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
- small_handlers[7] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
- small_handlers[8] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
+ __ bind(&small_handlers);
+ __ dd(conv.address(&f0));
+ __ dd(conv.address(&f1));
+ __ dd(conv.address(&f2));
+ __ dd(conv.address(&f3));
+ __ dd(conv.address(&f4));
+ __ dd(conv.address(&f5_8));
+ __ dd(conv.address(&f5_8));
+ __ dd(conv.address(&f5_8));
+ __ dd(conv.address(&f5_8));
__ bind(&small_size); // Entry point into this block.
if (FLAG_debug_code) {
@@ -563,8 +569,7 @@ OS::MemMoveFunction CreateMemMoveFunction() {
__ int3();
__ bind(&ok);
}
- __ mov(eax, Operand(count, times_4,
- reinterpret_cast<intptr_t>(small_handlers)));
+ __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
__ jmp(eax);
}
} else {
@@ -779,7 +784,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
if (FLAG_debug_code) {
__ cmp(ebx, masm->isolate()->factory()->the_hole_value());
- __ Assert(equal, "object found in smi-only array");
+ __ Assert(equal, kObjectFoundInSmiOnlyArray);
}
if (CpuFeatures::IsSupported(SSE2)) {
@@ -1011,7 +1016,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ test(result, Immediate(kIsIndirectStringMask));
- __ Assert(zero, "external string expected, but not found");
+ __ Assert(zero, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
diff --git a/chromium/v8/src/ia32/codegen-ia32.h b/chromium/v8/src/ia32/codegen-ia32.h
index 6db381e47e5..6a207ca9b51 100644
--- a/chromium/v8/src/ia32/codegen-ia32.h
+++ b/chromium/v8/src/ia32/codegen-ia32.h
@@ -53,7 +53,7 @@ class CodeGenerator {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static bool RecordPositions(MacroAssembler* masm,
int pos,
diff --git a/chromium/v8/src/ia32/cpu-ia32.cc b/chromium/v8/src/ia32/cpu-ia32.cc
index 77ff169b52b..5fb04fc7272 100644
--- a/chromium/v8/src/ia32/cpu-ia32.cc
+++ b/chromium/v8/src/ia32/cpu-ia32.cc
@@ -72,20 +72,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif
}
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#elif defined(__native_client__)
- asm("hlt");
-#else
- asm("int $3");
-#endif
-}
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/chromium/v8/src/ia32/debug-ia32.cc b/chromium/v8/src/ia32/debug-ia32.cc
index 68199f905b5..76a7003bfe0 100644
--- a/chromium/v8/src/ia32/debug-ia32.cc
+++ b/chromium/v8/src/ia32/debug-ia32.cc
@@ -49,8 +49,8 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
void BreakLocationIterator::SetDebugBreakAtReturn() {
ASSERT(Assembler::kJSReturnSequenceLength >=
Assembler::kCallInstructionLength);
- Isolate* isolate = Isolate::Current();
- rinfo()->PatchCodeWithCall(isolate->debug()->debug_break_return()->entry(),
+ rinfo()->PatchCodeWithCall(
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry(),
Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
@@ -79,7 +79,7 @@ bool BreakLocationIterator::IsDebugBreakAtSlot() {
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = debug_info_->GetIsolate();
rinfo()->PatchCodeWithCall(
isolate->debug()->debug_break_slot()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
@@ -128,7 +128,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ test(reg, Immediate(0xc0000000));
- __ Assert(zero, "Unable to encode value as smi");
+ __ Assert(zero, kUnableToEncodeValueAsSmi);
}
__ SmiTag(reg);
__ push(reg);
diff --git a/chromium/v8/src/ia32/deoptimizer-ia32.cc b/chromium/v8/src/ia32/deoptimizer-ia32.cc
index 48968064aa3..13a70afe521 100644
--- a/chromium/v8/src/ia32/deoptimizer-ia32.cc
+++ b/chromium/v8/src/ia32/deoptimizer-ia32.cc
@@ -200,12 +200,7 @@ static const byte kNopByteTwo = 0x90;
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
@@ -221,12 +216,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
@@ -241,214 +231,33 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT_EQ(replacement_code->entry(),
- Assembler::target_address_at(call_target_address));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- return true;
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT_EQ(osr_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ return PATCHED_FOR_OSR;
} else {
- ASSERT_EQ(interrupt_code->entry(),
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+ ASSERT_EQ(interrupt_builtin->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Next(); // Drop JS frames count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => node=%u, frame=%d->%d, ebp:esp=0x%08x:0x%08x]\n",
- ast_id,
- input_frame_size,
- output_frame_size,
- input_->GetRegister(ebp.code()),
- input_->GetRegister(esp.code()));
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // All OSR stack frames are dynamically aligned to an 8-byte boundary.
- int frame_pointer = input_->GetRegister(ebp.code());
- if ((frame_pointer & kPointerSize) != 0) {
- frame_pointer -= kPointerSize;
- has_alignment_padding_ = 1;
- }
-
- int32_t alignment_state = (has_alignment_padding_ == 1) ?
- kAlignmentPaddingPushed :
- kNoAlignmentPadding;
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08x ; (alignment state)\n",
- output_offset,
- alignment_state);
- }
- output_[0]->SetFrameSlot(output_offset, alignment_state);
- output_offset -= kPointerSize;
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(ebp.code(), frame_pointer);
- output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -616,27 +425,17 @@ void Deoptimizer::EntryGenerator::Generate() {
}
__ pop(eax);
- if (type() != OSR) {
- // If frame was dynamically aligned, pop padding.
- Label no_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_padding);
- __ pop(ecx);
- if (FLAG_debug_code) {
- __ cmp(ecx, Immediate(kAlignmentZapValue));
- __ Assert(equal, "alignment marker expected");
- }
- __ bind(&no_padding);
- } else {
- // If frame needs dynamic alignment push padding.
- Label no_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_padding);
- __ push(Immediate(kAlignmentZapValue));
- __ bind(&no_padding);
+ // If frame was dynamically aligned, pop padding.
+ Label no_padding;
+ __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+ Immediate(0));
+ __ j(equal, &no_padding);
+ __ pop(ecx);
+ if (FLAG_debug_code) {
+ __ cmp(ecx, Immediate(kAlignmentZapValue));
+ __ Assert(equal, kAlignmentMarkerExpected);
}
+ __ bind(&no_padding);
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop,
@@ -663,7 +462,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
- // In case of OSR or a failed STUB, we have to restore the XMM registers.
+ // In case of a failed STUB, we have to restore the XMM registers.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
@@ -674,9 +473,7 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(ebx, FrameDescription::state_offset()));
- }
+ __ push(Operand(ebx, FrameDescription::state_offset()));
__ push(Operand(ebx, FrameDescription::pc_offset()));
__ push(Operand(ebx, FrameDescription::continuation_offset()));
diff --git a/chromium/v8/src/ia32/disasm-ia32.cc b/chromium/v8/src/ia32/disasm-ia32.cc
index c43f11c00e9..01fa9996456 100644
--- a/chromium/v8/src/ia32/disasm-ia32.cc
+++ b/chromium/v8/src/ia32/disasm-ia32.cc
@@ -606,7 +606,7 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
}
ASSERT_NE(NULL, mnem);
AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
- if (imm8 > 0) {
+ if (imm8 >= 0) {
AppendToBuffer("%d", imm8);
} else {
AppendToBuffer("cl");
@@ -698,6 +698,7 @@ int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
switch (escape_opcode) {
case 0xD9: switch (regop) {
case 0: mnem = "fld_s"; break;
+ case 2: mnem = "fst_s"; break;
case 3: mnem = "fstp_s"; break;
case 7: mnem = "fstcw"; break;
default: UnimplementedInstruction();
@@ -743,7 +744,14 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
switch (escape_opcode) {
case 0xD8:
- UnimplementedInstruction();
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "fadd_i"; break;
+ case 0xE0: mnem = "fsub_i"; break;
+ case 0xC8: mnem = "fmul_i"; break;
+ case 0xF0: mnem = "fdiv_i"; break;
+ default: UnimplementedInstruction();
+ }
break;
case 0xD9:
@@ -767,6 +775,7 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
case 0xEE: mnem = "fldz"; break;
case 0xF0: mnem = "f2xm1"; break;
case 0xF1: mnem = "fyl2x"; break;
+ case 0xF4: mnem = "fxtract"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
@@ -815,6 +824,7 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
has_register = true;
switch (modrm_byte & 0xF8) {
case 0xC0: mnem = "ffree"; break;
+ case 0xD0: mnem = "fst"; break;
case 0xD8: mnem = "fstp"; break;
default: UnimplementedInstruction();
}
@@ -862,7 +872,6 @@ static const char* F0Mnem(byte f0byte) {
switch (f0byte) {
case 0x18: return "prefetch";
case 0xA2: return "cpuid";
- case 0x31: return "rdtsc";
case 0xBE: return "movsx_b";
case 0xBF: return "movsx_w";
case 0xB6: return "movzx_b";
@@ -1449,6 +1458,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += D1D3C1Instruction(data);
break;
+ case 0xD8: // fall through
case 0xD9: // fall through
case 0xDA: // fall through
case 0xDB: // fall through
diff --git a/chromium/v8/src/ia32/full-codegen-ia32.cc b/chromium/v8/src/ia32/full-codegen-ia32.cc
index 8f11acc1bec..6d39cc1e6e5 100644
--- a/chromium/v8/src/ia32/full-codegen-ia32.cc
+++ b/chromium/v8/src/ia32/full-codegen-ia32.cc
@@ -288,8 +288,7 @@ void FullCodeGenerator::Generate() {
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -347,8 +346,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
@@ -395,8 +393,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(eax);
EmitProfilingCounterReset();
@@ -745,9 +743,9 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// Check that we're not inside a with or catch context.
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
__ cmp(ebx, isolate()->factory()->with_context_map());
- __ Check(not_equal, "Declaration in with context.");
+ __ Check(not_equal, kDeclarationInWithContext);
__ cmp(ebx, isolate()->factory()->catch_context_map());
- __ Check(not_equal, "Declaration in catch context.");
+ __ Check(not_equal, kDeclarationInCatchContext);
}
}
@@ -1268,7 +1266,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
- __ push(Immediate(info));
+ __ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
__ push(esi);
@@ -2169,7 +2167,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Push(Smi::FromInt(resume_mode));
__ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
- __ Abort("Generator failed to resume.");
+ __ Abort(kGeneratorFailedToResume);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
@@ -2468,7 +2466,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Check for an uninitialized let binding.
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
- __ Check(equal, "Let binding re-initialization.");
+ __ Check(equal, kLetBindingReInitialization);
}
// Perform the assignment.
__ mov(location, eax);
@@ -2958,7 +2956,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -2972,7 +2970,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ test_b(FieldOperand(ebx, Map::kBitField2Offset),
1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(not_zero, if_true);
+ __ j(not_zero, &skip_lookup);
// Check for fast case object. Return false for slow case objects.
__ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
@@ -3018,6 +3016,12 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Reload map as register ebx was used as temporary above.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ or_(FieldOperand(ebx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+
+ __ bind(&skip_lookup);
+
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
@@ -3029,14 +3033,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmp(ecx,
ContextOperand(edx,
Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(ebx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -3272,7 +3271,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
@@ -3430,15 +3429,15 @@ void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
Register value,
uint32_t encoding_mask) {
__ test(index, Immediate(kSmiTagMask));
- __ Check(zero, "Non-smi index");
+ __ Check(zero, kNonSmiIndex);
__ test(value, Immediate(kSmiTagMask));
- __ Check(zero, "Non-smi value");
+ __ Check(zero, kNonSmiValue);
__ cmp(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, "Index is too large");
+ __ Check(less, kIndexIsTooLarge);
__ cmp(index, Immediate(Smi::FromInt(0)));
- __ Check(greater_equal, "Index is negative");
+ __ Check(greater_equal, kIndexIsNegative);
__ push(value);
__ mov(value, FieldOperand(string, HeapObject::kMapOffset));
@@ -3446,7 +3445,7 @@ void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
__ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
__ cmp(value, Immediate(encoding_mask));
- __ Check(equal, "Unexpected string type");
+ __ Check(equal, kUnexpectedStringType);
__ pop(value);
}
@@ -3818,7 +3817,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
+ __ Abort(kAttemptToUseUndefinedCache);
__ mov(eax, isolate()->factory()->undefined_value());
context()->Plug(eax);
return;
@@ -4000,7 +3999,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// scratch, string_length, elements.
if (generate_debug_code_) {
__ cmp(index, array_length);
- __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
+ __ Assert(less, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
__ mov(string, FieldOperand(elements,
@@ -4347,34 +4346,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
default:
UNREACHABLE();
}
}
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- Comment cmt(masm_, comment);
- UnaryOpStub stub(expr->op());
- // UnaryOpStub expects the argument to be in the
- // accumulator register eax.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
diff --git a/chromium/v8/src/ia32/ic-ia32.cc b/chromium/v8/src/ia32/ic-ia32.cc
index bf0c80b2b46..327ac57623e 100644
--- a/chromium/v8/src/ia32/ic-ia32.cc
+++ b/chromium/v8/src/ia32/ic-ia32.cc
@@ -483,7 +483,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// based on 32 bits of the map pointer and the string hash.
if (FLAG_debug_code) {
__ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset));
- __ Check(equal, "Map is no longer in eax.");
+ __ Check(equal, kMapIsNoLongerInEax);
}
__ mov(ebx, eax); // Keep the map around for later.
__ shr(eax, KeyedLookupCache::kMapHashShift);
@@ -1306,7 +1306,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, eax);
// Cache miss: Jump to runtime.
@@ -1425,8 +1425,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
diff --git a/chromium/v8/src/ia32/lithium-codegen-ia32.cc b/chromium/v8/src/ia32/lithium-codegen-ia32.cc
index 7a601cf39bc..025740d4575 100644
--- a/chromium/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-codegen-ia32.cc
@@ -35,6 +35,7 @@
#include "deoptimizer.h"
#include "stub-cache.h"
#include "codegen.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -48,7 +49,7 @@ static SaveFPRegsMode GetSaveFPRegsMode() {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -56,11 +57,11 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) {}
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const {}
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -113,7 +114,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::Abort(const char* reason) {
+void LCodeGen::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -220,7 +221,7 @@ bool LCodeGen::GeneratePrologue() {
dynamic_frame_alignment_ &&
FLAG_debug_code) {
__ test(esp, Immediate(kPointerSize));
- __ Assert(zero, "frame is expected to be aligned");
+ __ Assert(zero, kFrameIsExpectedToBeAligned);
}
// Reserve space for the stack slots needed by the code.
@@ -332,6 +333,28 @@ bool LCodeGen::GeneratePrologue() {
}
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Save the first local, which is overwritten by the alignment state.
+ Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
+ __ push(alignment_loc);
+
+ // Set the dynamic frame alignment state to "not aligned".
+ __ mov(alignment_loc, Immediate(kNoAlignmentPadding));
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 1);
+ __ sub(esp, Immediate((slots - 1) * kPointerSize));
+}
+
+
bool LCodeGen::GenerateBody() {
ASSERT(is_generating());
bool emit_instructions = true;
@@ -355,11 +378,16 @@ bool LCodeGen::GenerateBody() {
if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
+ RecordAndUpdatePosition(instr->position());
+
instr->CompileToNative(this);
if (!CpuFeatures::IsSupported(SSE2)) {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(x87_stack_depth_);
+ if (instr->IsGoto()) {
+ x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
+ } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
+ !instr->IsGap() && !instr->IsReturn()) {
+ __ VerifyX87StackDepth(x87_stack_.depth());
}
}
}
@@ -422,6 +450,12 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
+ X87Stack copy(code->x87_stack());
+ x87_stack_ = copy;
+
+ int pos = instructions_->at(code->instruction_index())->position();
+ RecordAndUpdatePosition(pos);
+
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
code->instruction_index(),
@@ -442,6 +476,7 @@ bool LCodeGen::GenerateDeferredCode() {
}
code->Generate();
if (NeedsDeferredFrame()) {
+ __ bind(code->done());
Comment(";;; Destroy frame");
ASSERT(frame_is_built_);
frame_is_built_ = false;
@@ -491,21 +526,22 @@ XMMRegister LCodeGen::ToDoubleRegister(int index) const {
void LCodeGen::X87LoadForUsage(X87Register reg) {
- ASSERT(X87StackContains(reg));
- X87Fxch(reg);
- x87_stack_depth_--;
+ ASSERT(x87_stack_.Contains(reg));
+ x87_stack_.Fxch(reg);
+ x87_stack_.pop();
}
-void LCodeGen::X87Fxch(X87Register reg, int other_slot) {
- ASSERT(X87StackContains(reg) && x87_stack_depth_ > other_slot);
- int i = X87ArrayIndex(reg);
- int st = x87_st2idx(i);
+void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
+ ASSERT(is_mutable_);
+ ASSERT(Contains(reg) && stack_depth_ > other_slot);
+ int i = ArrayIndex(reg);
+ int st = st2idx(i);
if (st != other_slot) {
- int other_i = x87_st2idx(other_slot);
- X87Register other = x87_stack_[other_i];
- x87_stack_[other_i] = reg;
- x87_stack_[i] = other;
+ int other_i = st2idx(other_slot);
+ X87Register other = stack_[other_i];
+ stack_[other_i] = reg;
+ stack_[i] = other;
if (st == 0) {
__ fxch(other_slot);
} else if (other_slot == 0) {
@@ -519,88 +555,104 @@ void LCodeGen::X87Fxch(X87Register reg, int other_slot) {
}
-int LCodeGen::x87_st2idx(int pos) {
- return x87_stack_depth_ - pos - 1;
+int LCodeGen::X87Stack::st2idx(int pos) {
+ return stack_depth_ - pos - 1;
}
-int LCodeGen::X87ArrayIndex(X87Register reg) {
- for (int i = 0; i < x87_stack_depth_; i++) {
- if (x87_stack_[i].is(reg)) return i;
+int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
+ for (int i = 0; i < stack_depth_; i++) {
+ if (stack_[i].is(reg)) return i;
}
UNREACHABLE();
return -1;
}
-bool LCodeGen::X87StackContains(X87Register reg) {
- for (int i = 0; i < x87_stack_depth_; i++) {
- if (x87_stack_[i].is(reg)) return true;
+bool LCodeGen::X87Stack::Contains(X87Register reg) {
+ for (int i = 0; i < stack_depth_; i++) {
+ if (stack_[i].is(reg)) return true;
}
return false;
}
-void LCodeGen::X87Free(X87Register reg) {
- ASSERT(X87StackContains(reg));
- int i = X87ArrayIndex(reg);
- int st = x87_st2idx(i);
+void LCodeGen::X87Stack::Free(X87Register reg) {
+ ASSERT(is_mutable_);
+ ASSERT(Contains(reg));
+ int i = ArrayIndex(reg);
+ int st = st2idx(i);
if (st > 0) {
// keep track of how fstp(i) changes the order of elements
- int tos_i = x87_st2idx(0);
- x87_stack_[i] = x87_stack_[tos_i];
+ int tos_i = st2idx(0);
+ stack_[i] = stack_[tos_i];
}
- x87_stack_depth_--;
+ pop();
__ fstp(st);
}
void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
- if (X87StackContains(dst)) {
- X87Fxch(dst);
+ if (x87_stack_.Contains(dst)) {
+ x87_stack_.Fxch(dst);
__ fstp(0);
} else {
- ASSERT(x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
- x87_stack_[x87_stack_depth_] = dst;
- x87_stack_depth_++;
+ x87_stack_.push(dst);
}
X87Fld(src, opts);
}
void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
- if (opts == kX87DoubleOperand) {
- __ fld_d(src);
- } else if (opts == kX87FloatOperand) {
- __ fld_s(src);
- } else if (opts == kX87IntOperand) {
- __ fild_s(src);
- } else {
- UNREACHABLE();
+ ASSERT(!src.is_reg_only());
+ switch (opts) {
+ case kX87DoubleOperand:
+ __ fld_d(src);
+ break;
+ case kX87FloatOperand:
+ __ fld_s(src);
+ break;
+ case kX87IntOperand:
+ __ fild_s(src);
+ break;
+ default:
+ UNREACHABLE();
}
}
-void LCodeGen::X87Mov(Operand dst, X87Register src) {
- X87Fxch(src);
- __ fst_d(dst);
+void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
+ ASSERT(!dst.is_reg_only());
+ x87_stack_.Fxch(src);
+ switch (opts) {
+ case kX87DoubleOperand:
+ __ fst_d(dst);
+ break;
+ case kX87IntOperand:
+ __ fist_s(dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
-void LCodeGen::X87PrepareToWrite(X87Register reg) {
- if (X87StackContains(reg)) {
- X87Free(reg);
+void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
+ ASSERT(is_mutable_);
+ if (Contains(reg)) {
+ Free(reg);
}
// Mark this register as the next register to write to
- x87_stack_[x87_stack_depth_] = reg;
+ stack_[stack_depth_] = reg;
}
-void LCodeGen::X87CommitWrite(X87Register reg) {
+void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
+ ASSERT(is_mutable_);
// Assert the reg is prepared to write, but not on the virtual stack yet
- ASSERT(!X87StackContains(reg) && x87_stack_[x87_stack_depth_].is(reg) &&
- x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
- x87_stack_depth_++;
+ ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
+ stack_depth_ < X87Register::kNumAllocatableRegisters);
+ stack_depth_++;
}
@@ -608,38 +660,62 @@ void LCodeGen::X87PrepareBinaryOp(
X87Register left, X87Register right, X87Register result) {
// You need to use DefineSameAsFirst for x87 instructions
ASSERT(result.is(left));
- X87Fxch(right, 1);
- X87Fxch(left);
+ x87_stack_.Fxch(right, 1);
+ x87_stack_.Fxch(left);
}
-void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) {
- if (x87_stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
+void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
+ if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
bool double_inputs = instr->HasDoubleRegisterInput();
// Flush stack from tos down, since FreeX87() will mess with tos
- for (int i = x87_stack_depth_-1; i >= 0; i--) {
- X87Register reg = x87_stack_[i];
+ for (int i = stack_depth_-1; i >= 0; i--) {
+ X87Register reg = stack_[i];
// Skip registers which contain the inputs for the next instruction
// when flushing the stack
- if (double_inputs && instr->IsDoubleInput(reg, this)) {
+ if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
continue;
}
- X87Free(reg);
- if (i < x87_stack_depth_-1) i++;
+ Free(reg);
+ if (i < stack_depth_-1) i++;
}
}
if (instr->IsReturn()) {
- while (x87_stack_depth_ > 0) {
+ while (stack_depth_ > 0) {
__ fstp(0);
- x87_stack_depth_--;
+ stack_depth_--;
}
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
+ }
+}
+
+
+void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
+ ASSERT(stack_depth_ <= 1);
+ // If ever used for new stubs producing two pairs of doubles joined into two
+ // phis this assert hits. That situation is not handled, since the two stacks
+ // might have st0 and st1 swapped.
+ if (current_block_id + 1 != goto_instr->block_id()) {
+ // If we have a value on the x87 stack on leaving a block, it must be a
+ // phi input. If the next block we compile is not the join block, we have
+ // to discard the stack state.
+ stack_depth_ = 0;
}
}
void LCodeGen::EmitFlushX87ForDeopt() {
- for (int i = 0; i < x87_stack_depth_; i++) __ fstp(0);
+ // The deoptimizer does not support X87 Registers. But as long as we
+ // deopt from a stub its not a problem, since we will re-materialize the
+ // original stub inputs, which can't be double registers.
+ ASSERT(info()->IsStub());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ pushfd();
+ __ VerifyX87StackDepth(x87_stack_.depth());
+ __ popfd();
+ }
+ for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
}
@@ -679,7 +755,7 @@ int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle();
+ return constant->handle(isolate());
}
@@ -763,37 +839,57 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
UNREACHABLE();
}
+ int object_index = 0;
+ int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
-
- // TODO(mstarzinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- if (value == NULL) {
- int arguments_count = environment->values()->length() - translation_size;
- translation->BeginArgumentsObject(arguments_count);
- for (int i = 0; i < arguments_count; ++i) {
- LOperand* value = environment->values()->at(translation_size + i);
- AddToTranslation(translation,
- value,
- environment->HasTaggedValueAt(translation_size + i),
- environment->HasUint32ValueAt(translation_size + i));
- }
- continue;
- }
-
- AddToTranslation(translation,
+ AddToTranslation(environment,
+ translation,
value,
environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i));
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
}
}
-void LCodeGen::AddToTranslation(Translation* translation,
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32) {
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@@ -822,7 +918,7 @@ void LCodeGen::AddToTranslation(Translation* translation,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -882,7 +978,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
- __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
+ __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
} else {
UNREACHABLE();
}
@@ -948,7 +1044,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
- Abort("bailout was not prepared");
+ Abort(kBailoutWasNotPrepared);
return;
}
@@ -977,14 +1073,14 @@ void LCodeGen::DeoptimizeIf(Condition cc,
// we can have inputs or outputs of the current instruction on the stack,
// thus we need to flush them here from the physical stack to leave it in a
// consistent state.
- if (x87_stack_depth_ > 0) {
+ if (x87_stack_.depth() > 0) {
Label done;
if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
EmitFlushX87ForDeopt();
__ bind(&done);
}
- if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
+ if (info()->ShouldTrapOnDeopt()) {
Label done;
if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
__ int3();
@@ -1168,6 +1264,14 @@ void LCodeGen::RecordPosition(int position) {
}
+void LCodeGen::RecordAndUpdatePosition(int position) {
+ if (position >= 0 && position != old_position_) {
+ masm()->positions_recorder()->RecordPosition(position);
+ old_position_ = position;
+ }
+}
+
+
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -1255,8 +1359,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Record the address of the first unknown OSR value as the place to enter.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
@@ -1407,6 +1510,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmp(dividend, 0);
__ j(less, &negative, Label::kNear);
__ sar(dividend, power);
+ if (divisor < 0) __ neg(dividend);
__ jmp(&done, Label::kNear);
__ bind(&negative);
@@ -1679,8 +1783,9 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(left->IsRegister());
if (right->IsConstantOperand()) {
- int right_operand = ToRepresentation(LConstantOperand::cast(right),
- instr->hydrogen()->representation());
+ int32_t right_operand =
+ ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->representation());
switch (instr->op()) {
case Token::BIT_AND:
__ and_(ToRegister(left), right_operand);
@@ -1689,7 +1794,11 @@ void LCodeGen::DoBitI(LBitI* instr) {
__ or_(ToRegister(left), right_operand);
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), right_operand);
+ if (right_operand == int32_t(~0)) {
+ __ not_(ToRegister(left));
+ } else {
+ __ xor_(ToRegister(left), right_operand);
+ }
break;
default:
UNREACHABLE();
@@ -1726,8 +1835,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::ROR:
__ ror_cl(ToRegister(left));
if (instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
}
break;
case Token::SAR:
@@ -1736,8 +1845,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHR:
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
}
break;
case Token::SHL:
@@ -1753,8 +1862,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
switch (instr->op()) {
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
} else {
__ ror(ToRegister(left), shift_count);
}
@@ -1766,8 +1875,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
break;
case Token::SHR:
if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
+ __ test(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(sign, instr->environment());
} else {
__ shr(ToRegister(left), shift_count);
}
@@ -1826,15 +1935,16 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
uint64_t int_val = BitCast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+ ASSERT(instr->result()->IsDoubleRegister());
if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
__ push(Immediate(upper));
__ push(Immediate(lower));
- X87Mov(ToX87Register(instr->result()), Operand(esp, 0));
+ X87Register reg = ToX87Register(instr->result());
+ X87Mov(reg, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
} else {
CpuFeatureScope scope1(masm(), SSE2);
- ASSERT(instr->result()->IsDoubleRegister());
XMMRegister res = ToDoubleRegister(instr->result());
if (int_val == 0) {
__ xorps(res, res);
@@ -1874,7 +1984,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result());
- Handle<Object> handle = instr->value();
+ Handle<Object> handle = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
__ LoadObject(reg, handle);
}
@@ -1976,7 +2086,7 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, "Unexpected string type");
+ __ Check(equal, kUnexpectedStringType);
__ pop(value);
}
@@ -1990,13 +2100,6 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->Equals(instr->result()));
- __ not_(ToRegister(input));
-}
-
-
void LCodeGen::DoThrow(LThrow* instr) {
__ push(ToOperand(instr->value()));
ASSERT(ToRegister(instr->context()).is(esi));
@@ -2148,11 +2251,36 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
X87Register left = ToX87Register(instr->left());
X87Register right = ToX87Register(instr->right());
X87Register result = ToX87Register(instr->result());
- X87PrepareBinaryOp(left, right, result);
+ if (instr->op() != Token::MOD) {
+ X87PrepareBinaryOp(left, right, result);
+ }
switch (instr->op()) {
+ case Token::ADD:
+ __ fadd_i(1);
+ break;
+ case Token::SUB:
+ __ fsub_i(1);
+ break;
case Token::MUL:
__ fmul_i(1);
break;
+ case Token::DIV:
+ __ fdiv_i(1);
+ break;
+ case Token::MOD: {
+ // Pass two doubles as arguments on the stack.
+ __ PrepareCallCFunction(4, eax);
+ X87Mov(Operand(esp, 1 * kDoubleSize), right);
+ X87Mov(Operand(esp, 0), left);
+ X87PrepareToWrite(result);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ 4);
+
+ // Return value is in st(0) on ia32.
+ X87CommitWrite(result);
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -2201,6 +2329,17 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
}
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
+ int false_block = instr->FalseDestination(chunk_);
+ if (cc == no_condition) {
+ __ jmp(chunk_->GetAssemblyLabel(false_block));
+ } else {
+ __ j(cc, chunk_->GetAssemblyLabel(false_block));
+ }
+}
+
+
void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32() || r.IsDouble()) {
@@ -2223,7 +2362,6 @@ void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32()) {
- ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ test(reg, Operand(reg));
EmitBranch(instr, not_zero);
@@ -2367,6 +2505,10 @@ void LCodeGen::EmitGoto(int block) {
}
+void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
+}
+
+
void LCodeGen::DoGoto(LGoto* instr) {
EmitGoto(instr->block_id());
}
@@ -2451,6 +2593,51 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ cmp(input_reg, factory()->the_hole_value());
+ EmitBranch(instr, equal);
+ return;
+ }
+
+ bool use_sse2 = CpuFeatures::IsSupported(SSE2);
+ if (use_sse2) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->object());
+ __ ucomisd(input_reg, input_reg);
+ EmitFalseBranch(instr, parity_odd);
+ } else {
+ // Put the value to the top of stack
+ X87Register src = ToX87Register(instr->object());
+ X87LoadForUsage(src);
+ __ fld(0);
+ __ fld(0);
+ __ FCmp();
+ Label ok;
+ __ j(parity_even, &ok);
+ __ fstp(0);
+ EmitFalseBranch(instr, no_condition);
+ __ bind(&ok);
+ }
+
+
+ __ sub(esp, Immediate(kDoubleSize));
+ if (use_sse2) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->object());
+ __ movdbl(MemOperand(esp, 0), input_reg);
+ } else {
+ __ fstp_d(MemOperand(esp, 0));
+ }
+
+ __ add(esp, Immediate(kDoubleSize));
+ int offset = sizeof(kHoleNanUpper32);
+ __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
+ EmitBranch(instr, equal);
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -2728,15 +2915,16 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ LInstanceOfKnownGlobal* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -2744,7 +2932,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
};
DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
+ deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
Label done, false_result;
Register object = ToRegister(instr->value());
@@ -2863,7 +3051,7 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
__ cmp(Operand(esp,
(parameter_count + extra_value_count) * kPointerSize),
Immediate(kAlignmentZapValue));
- __ Assert(equal, "expected alignment marker");
+ __ Assert(equal, kExpectedAlignmentMarker);
}
__ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
} else {
@@ -2876,7 +3064,7 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
__ cmp(Operand(esp, reg, times_pointer_size,
extra_value_count * kPointerSize),
Immediate(kAlignmentZapValue));
- __ Assert(equal, "expected alignment marker");
+ __ Assert(equal, kExpectedAlignmentMarker);
}
// emit code to restore stack based on instr->parameter_count()
@@ -3090,47 +3278,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
-void LCodeGen::EmitLoadFieldOrConstant(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ mov(result, FieldOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstant()) {
- Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
- __ LoadObject(result, constant);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Handle<Map>(current->map()));
- DeoptimizeIf(not_equal, env);
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ mov(result, factory()->undefined_value());
- }
-}
-
-
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
@@ -3149,68 +3296,6 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
}
-// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
-// prototype chain, which causes unbounded code generation.
-static bool CompactEmit(SmallMapList* list,
- Handle<String> name,
- int i,
- Isolate* isolate) {
- Handle<Map> map = list->at(i);
- LookupResult lookup(isolate);
- map->LookupDescriptor(NULL, *name, &lookup);
- return lookup.IsField() || lookup.IsConstant();
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- bool all_are_compact = true;
- for (int i = 0; i < map_count; ++i) {
- if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
- all_are_compact = false;
- break;
- }
- }
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMap(object, map, &check_passed);
- if (last && !need_generic) {
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstant(result, object, map, name, instr->environment());
- } else {
- Label next;
- bool compact = all_are_compact ? true :
- CompactEmit(instr->hydrogen()->types(), name, i, isolate());
- __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
- __ bind(&check_passed);
- EmitLoadFieldOrConstant(result, object, map, name, instr->environment());
- __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ mov(ecx, name);
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
-}
-
-
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
@@ -3447,7 +3532,7 @@ Operand LCodeGen::BuildFastArrayOperand(
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
- Abort("array index constant value too big");
+ Abort(kArrayIndexConstantValueTooBig);
}
return Operand(elements_pointer_reg,
((constant_value + additional_index) << shift_size)
@@ -3799,14 +3884,16 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+ LMathAbs* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3821,11 +3908,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
__ pand(input_reg, scratch);
- } else if (r.IsInteger32()) {
+ } else if (r.IsSmiOrInteger32()) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+ new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
@@ -4039,82 +4126,66 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
CpuFeatureScope scope(masm(), SSE2);
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- ASSERT(ToRegister(instr->global_object()).is(eax));
+
// Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
+ // Load native context
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ mov(native_context, FieldOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds)
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
- // ebx: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ mov(state, FieldOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
- // If state[0] == 0, call runtime to initialize seeds.
- __ test(ecx, ecx);
- __ j(zero, deferred->entry());
+ Register state0 = ToRegister(instr->scratch());
+ __ mov(state0, FieldOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
- // ecx: state[0]
- // eax: state[1]
+ Register state1 = ToRegister(instr->scratch2());
+ __ mov(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ movzx_w(edx, ecx);
- __ imul(edx, edx, 18273);
- __ shr(ecx, 16);
- __ add(ecx, edx);
+ Register scratch3 = ToRegister(instr->scratch3());
+ __ movzx_w(scratch3, state0);
+ __ imul(scratch3, scratch3, 18273);
+ __ shr(state0, 16);
+ __ add(state0, scratch3);
// Save state[0].
- __ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx);
+ __ mov(FieldOperand(state, ByteArray::kHeaderSize), state0);
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzx_w(edx, eax);
- __ imul(edx, edx, 36969);
- __ shr(eax, 16);
- __ add(eax, edx);
+ __ movzx_w(scratch3, state1);
+ __ imul(scratch3, scratch3, 36969);
+ __ shr(state1, 16);
+ __ add(state1, scratch3);
// Save state[1].
- __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
+ __ mov(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ shl(ecx, 14);
- __ and_(eax, Immediate(0x3FFFF));
- __ add(eax, ecx);
+ Register random = state0;
+ __ shl(random, 14);
+ __ and_(state1, Immediate(0x3FFFF));
+ __ add(random, state1);
- __ bind(deferred->exit());
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
+ // Convert 32 random bits in random to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm2, ebx);
- __ movd(xmm1, eax);
- __ cvtss2sd(xmm2, xmm2);
- __ xorps(xmm1, xmm2);
- __ subsd(xmm1, xmm2);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in eax.
+ XMMRegister result = ToDoubleRegister(instr->result());
+ // We use xmm0 as fixed scratch register here.
+ XMMRegister scratch4 = xmm0;
+ __ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
+ __ movd(scratch4, scratch3);
+ __ movd(result, random);
+ __ cvtss2sd(scratch4, scratch4);
+ __ xorps(result, scratch4);
+ __ subsd(result, scratch4);
}
@@ -4163,6 +4234,9 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ Set(esi, Immediate(0));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -4171,6 +4245,9 @@ void LCodeGen::DoMathTan(LMathTan* instr) {
void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ Set(esi, Immediate(0));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -4179,6 +4256,9 @@ void LCodeGen::DoMathCos(LMathCos* instr) {
void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ // Set the context register to a GC-safe fake value. Clobbering it is
+ // OK because this instruction is marked as a call.
+ __ Set(esi, Immediate(0));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -4333,6 +4413,14 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+}
+
+
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
@@ -4765,18 +4853,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredStringCharCodeAt(LCodeGen* codegen,
+ LStringCharCodeAt* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
+ new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
StringCharLoadGenerator::Generate(masm(),
factory(),
@@ -4820,18 +4912,22 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredStringCharFromCode(LCodeGen* codegen,
+ LStringCharFromCode* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
+ new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
@@ -4876,15 +4972,20 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ ASSERT(output->IsDoubleRegister());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
__ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ } else if (input->IsRegister()) {
+ Register input_reg = ToRegister(input);
+ __ push(input_reg);
+ X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
+ __ pop(input_reg);
} else {
- UNREACHABLE();
+ X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
}
}
@@ -4911,15 +5012,28 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ Register input = ToRegister(instr->value());
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ test(input, Immediate(0xc0000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ SmiTag(input);
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ DeferredNumberTagI(LCodeGen* codegen,
+ LNumberTagI* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4928,7 +5042,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
- DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
+ DeferredNumberTagI* deferred =
+ new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
__ SmiTag(reg);
__ j(overflow, deferred->entry());
__ bind(deferred->exit());
@@ -4936,14 +5051,16 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ DeferredNumberTagU(LCodeGen* codegen,
+ LNumberTagU* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4952,7 +5069,8 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+ DeferredNumberTagU* deferred =
+ new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
__ cmp(reg, Immediate(Smi::kMaxValue));
__ j(above, deferred->entry());
__ SmiTag(reg);
@@ -5039,25 +5157,22 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredNumberTagD(LCodeGen* codegen,
+ LNumberTagD* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
Register reg = ToRegister(instr->result());
- bool convert_hole = false;
- HValue* change_input = instr->hydrogen()->value();
- if (change_input->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(change_input);
- convert_hole = load->UsesMustHandleHole();
- }
-
bool use_sse2 = CpuFeatures::IsSupported(SSE2);
if (!use_sse2) {
// Put the value to the top of stack
@@ -5065,55 +5180,8 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
X87LoadForUsage(src);
}
- Label no_special_nan_handling;
- Label done;
- if (convert_hole) {
- if (use_sse2) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ ucomisd(input_reg, input_reg);
- } else {
- __ fld(0);
- __ fld(0);
- __ FCmp();
- }
-
- __ j(parity_odd, &no_special_nan_handling);
- __ sub(esp, Immediate(kDoubleSize));
- if (use_sse2) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(MemOperand(esp, 0), input_reg);
- } else {
- __ fld(0);
- __ fstp_d(MemOperand(esp, 0));
- }
- __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
- Immediate(kHoleNanUpper32));
- Label canonicalize;
- __ j(not_equal, &canonicalize);
- __ add(esp, Immediate(kDoubleSize));
- __ mov(reg, factory()->the_hole_value());
- if (!use_sse2) {
- __ fstp(0);
- }
- __ jmp(&done);
- __ bind(&canonicalize);
- __ add(esp, Immediate(kDoubleSize));
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- if (use_sse2) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(input_reg, Operand::StaticVariable(nan));
- } else {
- __ fstp(0);
- __ fld_d(Operand::StaticVariable(nan));
- }
- }
-
- __ bind(&no_special_nan_handling);
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+ DeferredNumberTagD* deferred =
+ new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
if (FLAG_inline_new) {
Register tmp = ToRegister(instr->temp());
__ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
@@ -5128,7 +5196,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
- __ bind(&done);
}
@@ -5178,23 +5245,21 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
Register temp_reg,
X87Register res_reg,
- bool allow_undefined_as_nan,
+ bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
X87PrepareToWrite(res_reg);
- STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
- NUMBER_CANDIDATE_IS_ANY_TAGGED);
- if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- if (!allow_undefined_as_nan) {
+ if (!can_convert_undefined_to_nan) {
DeoptimizeIf(not_equal, env);
} else {
Label heap_number, convert;
@@ -5202,10 +5267,6 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
// Convert undefined (or hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
- __ j(equal, &convert, Label::kNear);
- __ cmp(input_reg, factory()->the_hole_value());
- }
DeoptimizeIf(not_equal, env);
__ bind(&convert);
@@ -5239,11 +5300,13 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
}
__ bind(&load_smi);
- __ SmiUntag(input_reg); // Untag smi before converting to float.
- __ push(input_reg);
+ // Clobbering a temp is faster than re-tagging the
+ // input register since we avoid dependencies.
+ __ mov(temp_reg, input_reg);
+ __ SmiUntag(temp_reg); // Untag smi before converting to float.
+ __ push(temp_reg);
__ fild_s(Operand(esp, 0));
- __ pop(input_reg);
- __ SmiTag(input_reg); // Retag smi.
+ __ add(esp, Immediate(kPointerSize));
__ bind(&done);
X87CommitWrite(res_reg);
}
@@ -5252,22 +5315,20 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
void LCodeGen::EmitNumberUntagD(Register input_reg,
Register temp_reg,
XMMRegister result_reg,
- bool allow_undefined_as_nan,
+ bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
- STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
- NUMBER_CANDIDATE_IS_ANY_TAGGED);
- if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- if (!allow_undefined_as_nan) {
+ if (!can_convert_undefined_to_nan) {
DeoptimizeIf(not_equal, env);
} else {
Label heap_number, convert;
@@ -5275,10 +5336,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Convert undefined (and hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
- __ j(equal, &convert, Label::kNear);
- __ cmp(input_reg, factory()->the_hole_value());
- }
DeoptimizeIf(not_equal, env);
__ bind(&convert);
@@ -5305,113 +5362,63 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- // Smi to XMM conversion
__ bind(&load_smi);
- __ SmiUntag(input_reg); // Untag smi before converting to float.
- __ cvtsi2sd(result_reg, Operand(input_reg));
- __ SmiTag(input_reg); // Retag smi.
+ // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
+ // input register since we avoid dependencies.
+ __ mov(temp_reg, input_reg);
+ __ SmiUntag(temp_reg); // Untag smi before converting to float.
+ __ cvtsi2sd(result_reg, Operand(temp_reg));
__ bind(&done);
}
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Label done, heap_number;
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
if (instr->truncating()) {
+ Label heap_number, slow_case;
+
+ // Heap number map check.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
__ j(equal, &heap_number, Label::kNear);
+
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ cmp(input_reg, factory()->undefined_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
__ mov(input_reg, 0);
- __ jmp(&done, Label::kNear);
+ __ jmp(done);
__ bind(&heap_number);
- if (CpuFeatures::IsSupported(SSE3)) {
- CpuFeatureScope scope(masm(), SSE3);
- Label convert;
- // Use more powerful conversion when sse3 is available.
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
- // Get exponent alone and check for too-big exponent.
- __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ and_(input_reg, HeapNumber::kExponentMask);
- const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
- __ j(less, &convert, Label::kNear);
- // Pop FPU stack before deoptimizing.
- __ fstp(0);
- __ RecordComment("Deferred TaggedToI: exponent too big");
- DeoptimizeIf(no_condition, instr->environment());
-
- // Reserve space for 64 bit answer.
- __ bind(&convert);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
- __ add(Operand(esp), Immediate(kDoubleSize));
- } else if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ cmp(input_reg, 0x80000000u);
- __ j(not_equal, &done);
- // Check if the input was 0x8000000 (kMinInt).
- // If no, then we got an overflow and we deoptimize.
- ExternalReference min_int = ExternalReference::address_of_min_int();
- __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
- __ ucomisd(xmm_temp, xmm0);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- } else {
- UNREACHABLE();
- }
- } else if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- // Deoptimize if we don't have a heap number.
- __ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIf(not_equal, instr->environment());
-
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ cvtsi2sd(xmm_temp, Operand(input_reg));
- __ ucomisd(xmm0, xmm_temp);
- __ RecordComment("Deferred TaggedToI: lost precision");
- DeoptimizeIf(not_equal, instr->environment());
- __ RecordComment("Deferred TaggedToI: NaN");
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(input_reg, Operand(input_reg));
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ and_(input_reg, 1);
- __ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIf(not_zero, instr->environment());
- }
+ __ TruncateHeapNumberToI(input_reg, input_reg);
} else {
- UNREACHABLE();
+ Label bailout;
+ XMMRegister scratch = (instr->temp() != NULL)
+ ? ToDoubleRegister(instr->temp())
+ : no_xmm_reg;
+ __ TaggedToI(input_reg, input_reg, scratch,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout);
+ __ jmp(done);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
}
- __ bind(&done);
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredTaggedToI(LCodeGen* codegen,
+ LTaggedToI* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_, done());
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -5421,7 +5428,8 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
ASSERT(input_reg.is(ToRegister(instr->result())));
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ DeferredTaggedToI* deferred =
+ new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
__ JumpIfNotSmi(input_reg, deferred->entry());
__ SmiUntag(input_reg);
@@ -5429,190 +5437,22 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
}
-void LCodeGen::DoDeferredTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
- Label done, heap_number;
- Register result_reg = ToRegister(instr->result());
- Register input_reg = ToRegister(instr->value());
-
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- if (instr->truncating()) {
- __ j(equal, &heap_number, Label::kNear);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ cmp(input_reg, factory()->undefined_value());
- __ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(not_equal, instr->environment());
- __ xor_(result_reg, result_reg);
- __ jmp(&done, Label::kFar);
- __ bind(&heap_number);
- } else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Surprisingly, all of this crazy bit manipulation is considerably
- // faster than using the built-in x86 CPU conversion functions (about 6x).
- Label right_exponent, adjust_bias, zero_result;
- Register scratch = ToRegister(instr->scratch());
- Register scratch2 = ToRegister(instr->scratch2());
- // Get exponent word.
- __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- __ shr(scratch2, HeapNumber::kExponentShift);
- if (instr->truncating()) {
- __ j(zero, &zero_result);
- } else {
- __ j(not_zero, &adjust_bias);
- __ test(scratch, Immediate(HeapNumber::kMantissaMask));
- DeoptimizeIf(not_zero, instr->environment());
- __ cmp(FieldOperand(input_reg, HeapNumber::kMantissaOffset), Immediate(0));
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&adjust_bias);
- }
- __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
- if (!instr->truncating()) {
- DeoptimizeIf(negative, instr->environment());
- } else {
- __ j(negative, &zero_result);
- }
-
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- Register scratch3 = ToRegister(instr->scratch3());
- __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
- __ xor_(result_reg, result_reg);
-
- const uint32_t non_int32_exponent = 31;
- __ cmp(scratch2, Immediate(non_int32_exponent));
- // If we have a match of the int32 exponent then skip some logic.
- __ j(equal, &right_exponent, Label::kNear);
- // If the number doesn't find in an int32, deopt.
- DeoptimizeIf(greater, instr->environment());
-
- // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
- // < 31.
- __ mov(result_reg, Immediate(31));
- __ sub(result_reg, scratch2);
-
- __ bind(&right_exponent);
-
- // Save off exponent for negative check later.
- __ mov(scratch2, scratch);
-
- // Here result_reg is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch, shift_distance);
- if (!instr->truncating()) {
- // If not truncating, a non-zero value in the bottom 22 bits means a
- // non-integral value --> trigger a deopt.
- __ test(scratch3, Immediate((1 << (32 - shift_distance)) - 1));
- DeoptimizeIf(not_equal, instr->environment());
- }
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch3, 32 - shift_distance);
- __ or_(scratch3, scratch);
- if (!instr->truncating()) {
- // If truncating, a non-zero value in the bits that will be shifted away
- // when adjusting the exponent means rounding --> deopt.
- __ mov(scratch, 0x1);
- ASSERT(result_reg.is(ecx));
- __ shl_cl(scratch);
- __ dec(scratch);
- __ test(scratch3, scratch);
- DeoptimizeIf(not_equal, instr->environment());
- }
- // Move down according to the exponent.
- ASSERT(result_reg.is(ecx));
- __ shr_cl(scratch3);
- // Now the unsigned 32-bit answer is in scratch3. We need to move it to
- // result_reg and we may need to fix the sign.
- Label negative_result;
- __ xor_(result_reg, result_reg);
- __ cmp(scratch2, result_reg);
- __ j(less, &negative_result, Label::kNear);
- __ cmp(scratch3, result_reg);
- __ mov(result_reg, scratch3);
- // If the result is > MAX_INT, result doesn't fit in signed 32-bit --> deopt.
- DeoptimizeIf(less, instr->environment());
- __ jmp(&done, Label::kNear);
- __ bind(&zero_result);
- __ xor_(result_reg, result_reg);
- __ jmp(&done, Label::kNear);
- __ bind(&negative_result);
- __ sub(result_reg, scratch3);
- if (!instr->truncating()) {
- // -0.0 triggers a deopt.
- DeoptimizeIf(zero, instr->environment());
- }
- // If the negative subtraction overflows into a positive number, there was an
- // overflow --> deopt.
- DeoptimizeIf(positive, instr->environment());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
- class DeferredTaggedToINoSSE2: public LDeferredCode {
- public:
- DeferredTaggedToINoSSE2(LCodeGen* codegen, LTaggedToINoSSE2* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToINoSSE2(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToINoSSE2* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- Register input_reg = ToRegister(input);
- ASSERT(input_reg.is(ToRegister(instr->result())));
-
- DeferredTaggedToINoSSE2* deferred =
- new(zone()) DeferredTaggedToINoSSE2(this, instr);
-
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiUntag(input_reg); // Untag smi.
- __ bind(deferred->exit());
-}
-
-
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister());
LOperand* temp = instr->temp();
- ASSERT(temp == NULL || temp->IsRegister());
+ ASSERT(temp->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
bool deoptimize_on_minus_zero =
instr->hydrogen()->deoptimize_on_minus_zero();
- Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+ Register temp_reg = ToRegister(temp);
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
- if (value->representation().IsSmi()) {
- mode = NUMBER_CANDIDATE_IS_SMI;
- } else if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
- }
- }
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
@@ -5620,7 +5460,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
EmitNumberUntagD(input_reg,
temp_reg,
result_reg,
- instr->hydrogen()->allow_undefined_as_nan(),
+ instr->hydrogen()->can_convert_undefined_to_nan(),
deoptimize_on_minus_zero,
instr->environment(),
mode);
@@ -5628,7 +5468,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
EmitNumberUntagDNoSSE2(input_reg,
temp_reg,
ToX87Register(instr->result()),
- instr->hydrogen()->allow_undefined_as_nan(),
+ instr->hydrogen()->can_convert_undefined_to_nan(),
deoptimize_on_minus_zero,
instr->environment(),
mode);
@@ -5641,43 +5481,34 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
- CpuFeatureScope scope(masm(), SSE2);
-
- XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
- __ cvttsd2si(result_reg, Operand(input_reg));
-
if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- Label fast_case_succeeded;
- __ cmp(result_reg, 0x80000000u);
- __ j(not_equal, &fast_case_succeeded);
- __ sub(esp, Immediate(kDoubleSize));
- __ movdbl(MemOperand(esp, 0), input_reg);
- DoubleToIStub stub(esp, result_reg, 0, true);
- __ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
- __ add(esp, Immediate(kDoubleSize));
- __ bind(&fast_case_succeeded);
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(input);
+ __ TruncateDoubleToI(result_reg, input_reg);
+ } else {
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ TruncateX87TOSToI(result_reg);
+ }
} else {
- Label done;
- __ cvtsi2sd(xmm0, Operand(result_reg));
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ and_(result_reg, 1);
- DeoptimizeIf(not_zero, instr->environment());
+ Label bailout, done;
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(input);
+ __ DoubleToI(result_reg, input_reg, xmm0,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+ } else {
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
+ &bailout, Label::kNear);
}
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
__ bind(&done);
}
}
@@ -5688,31 +5519,25 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
- CpuFeatureScope scope(masm(), SSE2);
-
- XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
- Label done;
- __ cvttsd2si(result_reg, Operand(input_reg));
- __ cvtsi2sd(xmm0, Operand(result_reg));
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ and_(result_reg, 1);
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&done);
+ Label bailout, done;
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(input);
+ __ DoubleToI(result_reg, input_reg, xmm0,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+ } else {
+ X87Register input_reg = ToX87Register(input);
+ X87Fxch(input_reg);
+ __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
+ &bailout, Label::kNear);
}
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
+
__ SmiTag(result_reg);
DeoptimizeIf(overflow, instr->environment());
}
@@ -5779,45 +5604,85 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (instr->hydrogen()->target_in_new_space()) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+ Handle<HeapObject> object = instr->hydrogen()->object();
+ if (instr->hydrogen()->object_in_new_space()) {
Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ cmp(reg, Operand::ForCell(cell));
} else {
Operand operand = ToOperand(instr->value());
- __ cmp(operand, target);
+ __ cmp(operand, object);
}
DeoptimizeIf(not_equal, instr->environment());
}
-void LCodeGen::DoCheckMapCommon(Register reg,
- Handle<Map> map,
- LInstruction* instr) {
- Label success;
- __ CompareMap(reg, map, &success);
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&success);
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ __ xor_(esi, esi);
+ __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+
+ __ test(eax, Immediate(kSmiTagMask));
+ }
+ DeoptimizeIf(zero, instr->environment());
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen,
+ LCheckMaps* instr,
+ Register object,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
if (instr->hydrogen()->CanOmitMapChecks()) return;
+
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- Label success;
SmallMapList* map_set = instr->hydrogen()->map_set();
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->has_migration_target()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
+ __ bind(deferred->check_maps());
+ }
+
+ Label success;
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
__ CompareMap(reg, map, &success);
__ j(equal, &success);
}
+
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, instr);
+ __ CompareMap(reg, map, &success);
+ if (instr->hydrogen()->has_migration_target()) {
+ __ j(not_equal, deferred->entry());
+ } else {
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+
__ bind(&success);
}
@@ -5994,35 +5859,23 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
}
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
- Register reg = ToRegister(instr->temp());
-
- ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
- ZoneList<Handle<Map> >* maps = instr->maps();
-
- ASSERT(prototypes->length() == maps->length());
-
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(reg, prototypes->at(i));
- DoCheckMapCommon(reg, maps->at(i), instr);
- }
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredAllocate(LCodeGen* codegen,
+ LAllocate* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
+ new(zone()) DeferredAllocate(this, instr, x87_stack_);
Register result = ToRegister(instr->result());
Register temp = ToRegister(instr->temp());
@@ -6173,7 +6026,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
- __ push(Immediate(instr->hydrogen()->shared_info()));
+ __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ push(esi);
@@ -6337,6 +6190,7 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
DeoptimizeIf(no_condition, instr->environment(), type);
}
@@ -6359,12 +6213,16 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ DeferredStackCheck(LCodeGen* codegen,
+ LStackCheck* instr,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -6383,8 +6241,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->context()->IsRegister());
ASSERT(ToRegister(instr->context()).is(esi));
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -6393,7 +6252,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
+ new(zone()) DeferredStackCheck(this, instr, x87_stack_);
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
@@ -6420,9 +6279,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- // Normally we record the first unknown OSR value as the entrypoint to the OSR
- // code, but if there were none, record the entrypoint here.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
diff --git a/chromium/v8/src/ia32/lithium-codegen-ia32.h b/chromium/v8/src/ia32/lithium-codegen-ia32.h
index 0beef85f0bc..769917f7e24 100644
--- a/chromium/v8/src/ia32/lithium-codegen-ia32.h
+++ b/chromium/v8/src/ia32/lithium-codegen-ia32.h
@@ -45,7 +45,7 @@ class LDeferredCode;
class LGapNode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen V8_FINAL BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
@@ -68,10 +68,11 @@ class LCodeGen BASE_EMBEDDED {
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
frame_is_built_(false),
- x87_stack_depth_(0),
+ x87_stack_(assembler),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
+ expected_safepoint_kind_(Safepoint::kSimple),
+ old_position_(RelocInfo::kNoPosition) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -121,14 +122,23 @@ class LCodeGen BASE_EMBEDDED {
void X87Mov(X87Register reg, Operand src,
X87OperandType operand = kX87DoubleOperand);
- void X87Mov(Operand src, X87Register reg);
+ void X87Mov(Operand src, X87Register reg,
+ X87OperandType operand = kX87DoubleOperand);
void X87PrepareBinaryOp(
X87Register left, X87Register right, X87Register result);
void X87LoadForUsage(X87Register reg);
- void X87PrepareToWrite(X87Register reg);
- void X87CommitWrite(X87Register reg);
+ void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
+ void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
+
+ void X87Fxch(X87Register reg, int other_slot = 0) {
+ x87_stack_.Fxch(reg, other_slot);
+ }
+
+ bool X87StackEmpty() {
+ return x87_stack_.depth() == 0;
+ }
Handle<Object> ToHandle(LConstantOperand* op) const;
@@ -153,18 +163,15 @@ class LCodeGen BASE_EMBEDDED {
LOperand* value,
IntegerSignedness signedness);
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredTaggedToINoSSE2(LTaggedToINoSSE2* instr);
+ void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
-
- void DoCheckMapCommon(Register reg, Handle<Map> map, LInstruction* instr);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -212,7 +219,7 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(const char* reason);
+ void Abort(BailoutReason reason);
void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -225,6 +232,9 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@@ -282,10 +292,13 @@ class LCodeGen BASE_EMBEDDED {
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
- void AddToTranslation(Translation* translation,
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32);
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -295,7 +308,7 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
X87Register ToX87Register(int index) const;
- int ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
ExternalReference ToExternalReference(LConstantOperand* op) const;
@@ -320,10 +333,14 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::DeoptMode mode);
void RecordPosition(int position);
+ void RecordAndUpdatePosition(int position);
+
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
+ template<class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition cc);
void EmitNumberUntagD(
Register input,
Register temp,
@@ -370,12 +387,6 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
- void EmitLoadFieldOrConstant(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -398,15 +409,13 @@ class LCodeGen BASE_EMBEDDED {
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
- void X87Fxch(X87Register reg, int other_slot = 0);
void X87Fld(Operand src, X87OperandType opts);
- void X87Free(X87Register reg);
- void FlushX87StackIfNecessary(LInstruction* instr);
void EmitFlushX87ForDeopt();
- bool X87StackContains(X87Register reg);
- int X87ArrayIndex(X87Register reg);
- int x87_st2idx(int pos);
+ void FlushX87StackIfNecessary(LInstruction* instr) {
+ x87_stack_.FlushIfNecessary(instr, this);
+ }
+ friend class LGapResolver;
#ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
@@ -437,8 +446,55 @@ class LCodeGen BASE_EMBEDDED {
int osr_pc_offset_;
int last_lazy_deopt_pc_;
bool frame_is_built_;
- X87Register x87_stack_[X87Register::kNumAllocatableRegisters];
- int x87_stack_depth_;
+
+ class X87Stack {
+ public:
+ explicit X87Stack(MacroAssembler* masm)
+ : stack_depth_(0), is_mutable_(true), masm_(masm) { }
+ explicit X87Stack(const X87Stack& other)
+ : stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) {
+ for (int i = 0; i < stack_depth_; i++) {
+ stack_[i] = other.stack_[i];
+ }
+ }
+ bool operator==(const X87Stack& other) const {
+ if (stack_depth_ != other.stack_depth_) return false;
+ for (int i = 0; i < stack_depth_; i++) {
+ if (!stack_[i].is(other.stack_[i])) return false;
+ }
+ return true;
+ }
+ bool Contains(X87Register reg);
+ void Fxch(X87Register reg, int other_slot = 0);
+ void Free(X87Register reg);
+ void PrepareToWrite(X87Register reg);
+ void CommitWrite(X87Register reg);
+ void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
+ void LeavingBlock(int current_block_id, LGoto* goto_instr);
+ int depth() const { return stack_depth_; }
+ void pop() {
+ ASSERT(is_mutable_);
+ stack_depth_--;
+ }
+ void push(X87Register reg) {
+ ASSERT(is_mutable_);
+ ASSERT(stack_depth_ < X87Register::kNumAllocatableRegisters);
+ stack_[stack_depth_] = reg;
+ stack_depth_++;
+ }
+
+ MacroAssembler* masm() const { return masm_; }
+
+ private:
+ int ArrayIndex(X87Register reg);
+ int st2idx(int pos);
+
+ X87Register stack_[X87Register::kNumAllocatableRegisters];
+ int stack_depth_;
+ bool is_mutable_;
+ MacroAssembler* masm_;
+ };
+ X87Stack x87_stack_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -449,7 +505,9 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ int old_position_;
+
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
@@ -476,23 +534,26 @@ class LCodeGen BASE_EMBEDDED {
};
-class LDeferredCode: public ZoneObject {
+class LDeferredCode : public ZoneObject {
public:
- explicit LDeferredCode(LCodeGen* codegen)
+ explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack)
: codegen_(codegen),
external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
+ instruction_index_(codegen->current_instruction_),
+ x87_stack_(x87_stack) {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
int instruction_index() const { return instruction_index_; }
+ const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -503,7 +564,9 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
+ Label done_;
int instruction_index_;
+ LCodeGen::X87Stack x87_stack_;
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.h b/chromium/v8/src/ia32/lithium-gap-resolver-ia32.h
index 3a58f585c31..4aff241f431 100644
--- a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.h
+++ b/chromium/v8/src/ia32/lithium-gap-resolver-ia32.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/chromium/v8/src/ia32/lithium-ia32.cc b/chromium/v8/src/ia32/lithium-ia32.cc
index ea07c5a1991..a5acb9fa9e4 100644
--- a/chromium/v8/src/ia32/lithium-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-ia32.cc
@@ -32,6 +32,7 @@
#include "lithium-allocator-inl.h"
#include "ia32/lithium-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -290,6 +291,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
@@ -475,6 +484,14 @@ LPlatformChunk* LChunkBuilder::Build() {
USE(alignment_state_index);
}
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(false);
+ }
+ }
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -487,7 +504,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(const char* reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -645,8 +662,10 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
+ &argument_index_accumulator,
+ &objects_to_materialize));
return instr;
}
@@ -698,7 +717,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
- Abort("Out of virtual registers while trying to allocate temp register.");
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
vreg = 0;
}
operand->set_virtual_register(vreg);
@@ -770,12 +789,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Left shifts can deoptimize if we shift by > 0 and the result cannot be
// truncated to smi.
if (instr->representation().IsSmi() && constant_value > 0) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToSmi)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
}
} else {
right = UseFixed(right_value, ecx);
@@ -787,12 +801,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
@@ -938,12 +947,23 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
+ instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
+ if (!CpuFeatures::IsSafeForSnapshot(SSE2) && instr->IsGoto() &&
+ LGoto::cast(instr)->jumps_to_join()) {
+ // TODO(olivf) Since phis of spilled values are joined as registers
+ // (not in the stack slot), we need to allow the goto gaps to keep one
+ // x87 register alive. To ensure all other values are still spilled, we
+ // insert a fpu register barrier right before.
+ LClobberDoubles* clobber = new(zone()) LClobberDoubles();
+ clobber->set_hydrogen_value(current);
+ chunk_->AddInstruction(clobber, current_block_);
+ }
instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
@@ -953,11 +973,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
+ argument_index_accumulator,
+ objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
@@ -972,16 +994,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
- bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
+ int object_index = objects_to_materialize->length();
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
+ LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- needs_arguments_object_materialization = true;
- op = NULL;
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@@ -992,15 +1014,33 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
- if (needs_arguments_object_materialization) {
- HArgumentsObject* arguments = hydrogen_env->entry() == NULL
- ? graph()->GetArgumentsObject()
- : hydrogen_env->entry()->arguments_object();
- ASSERT(arguments->IsLinked());
- for (int i = 1; i < arguments->arguments_count(); ++i) {
- HValue* value = arguments->arguments_values()->at(i);
- ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
- LOperand* op = UseAny(value);
+ for (int i = object_index; i < objects_to_materialize->length(); ++i) {
+ HValue* object_to_materialize = objects_to_materialize->at(i);
+ int previously_materialized_object = -1;
+ for (int prev = 0; prev < i; ++prev) {
+ if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
+ previously_materialized_object = prev;
+ break;
+ }
+ }
+ int length = object_to_materialize->OperandCount();
+ bool is_arguments = object_to_materialize->IsArgumentsObject();
+ if (previously_materialized_object >= 0) {
+ result->AddDuplicateObject(previously_materialized_object);
+ continue;
+ } else {
+ result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ }
+ for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+ LOperand* op;
+ HValue* value = object_to_materialize->OperandAt(i);
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
+ } else {
+ ASSERT(!value->IsPushArgument());
+ op = UseAny(value);
+ }
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
@@ -1016,7 +1056,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -1028,7 +1068,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
+ return new(zone()) LGoto(successor);
}
ToBooleanStub::Types expected = instr->expected_input_types();
@@ -1137,6 +1177,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
@@ -1392,9 +1440,8 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().IsSmiOrInteger32());
- ASSERT(instr->right()->representation().Equals(
- instr->left()->representation()));
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
@@ -1414,16 +1461,6 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new(zone()) LBitNotI(input);
- return DefineSameAsFirst(result);
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
@@ -1512,8 +1549,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(left->representation().IsSmiOrInteger32());
- ASSERT(right->representation().Equals(left->representation()));
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!right->CanBeZero());
@@ -1589,9 +1626,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().IsSmiOrInteger32());
- ASSERT(instr->right()->representation().Equals(
- instr->left()->representation()));
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1611,9 +1647,8 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().IsSmiOrInteger32());
- ASSERT(instr->right()->representation().Equals(
- instr->left()->representation()));
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
// Check to see if it would be advantageous to use an lea instruction rather
// than an add. This is the case when no overflow check is needed and there
// are multiple uses of the add's inputs, so using a 3-register add will
@@ -1646,9 +1681,8 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().IsSmiOrInteger32());
- ASSERT(instr->right()->representation().Equals(
- instr->left()->representation()));
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
@@ -1681,10 +1715,14 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsSmiOrTagged());
- LOperand* global_object = UseFixed(instr->global_object(), eax);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ ASSERT(instr->global_object()->representation().IsTagged());
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, xmm1);
}
@@ -1703,9 +1741,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(
- instr->right()->representation()));
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCompareNumericAndBranch(left, right);
@@ -1735,6 +1772,13 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return new(zone()) LCmpHoleAndBranch(object);
+}
+
+
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsSmiOrTagged());
LOperand* temp = TempRegister();
@@ -1851,17 +1895,6 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
}
-LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
- HInductionVariableAnnotation* instr) {
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new(zone()) LBoundsCheck(
UseRegisterOrConstantAtStart(instr->index()),
@@ -1876,13 +1909,6 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), eax);
@@ -1921,9 +1947,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
- LOperand* temp = instr->deoptimize_on_minus_zero()
- ? TempRegister()
- : NULL;
+ LOperand* temp = TempRegister();
LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
return AssignEnvironment(DefineAsRegister(res));
} else if (to.IsSmi()) {
@@ -1935,26 +1959,17 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- if (instr->value()->type().IsSmi()) {
- LOperand* value = UseRegister(instr->value());
+ HValue* val = instr->value();
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
} else {
bool truncating = instr->CanTruncateToInt32();
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- LOperand* value = UseRegister(instr->value());
- LOperand* xmm_temp =
- (truncating && CpuFeatures::IsSupported(SSE3))
- ? NULL
- : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
- } else {
- LOperand* value = UseFixed(instr->value(), ecx);
- LTaggedToINoSSE2* res =
- new(zone()) LTaggedToINoSSE2(value, TempRegister(),
- TempRegister(), TempRegister());
- return AssignEnvironment(DefineFixed(res, ecx));
- }
+ LOperand* xmm_temp =
+ (CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating)
+ ? FixedTemp(xmm1) : NULL;
+ LTaggedToI* res = new(zone()) LTaggedToI(UseRegister(val), xmm_temp);
+ return AssignEnvironment(DefineSameAsFirst(res));
}
}
} else if (from.IsDouble()) {
@@ -1974,7 +1989,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
bool truncating = instr->CanTruncateToInt32();
- bool needs_temp = truncating && !CpuFeatures::IsSupported(SSE3);
+ bool needs_temp = CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating;
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
@@ -2000,8 +2015,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ LInstruction* result = val->CheckFlag(HInstruction::kUint32)
+ ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
+ : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
@@ -2049,32 +2065,29 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
}
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp = NULL;
- if (!instr->CanOmitPrototypeChecks()) temp = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- if (instr->CanOmitPrototypeChecks()) return result;
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- // If the target is in new space, we'll emit a global cell compare and so
- // want the value in a register. If the target gets promoted before we
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+ // If the object is in new space, we'll emit a global cell compare and so
+ // want the value in a register. If the object gets promoted before we
// emit code, we will still get the register but will do an immediate
// compare instead of the cell compare. This is safe.
- LOperand* value = instr->target_in_new_space()
+ LOperand* value = instr->object_in_new_space()
? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
+ if (!instr->CanOmitMapChecks()) {
+ value = UseRegisterAtStart(instr->value());
+ if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
+ }
LCheckMaps* result = new(zone()) LCheckMaps(value);
- if (instr->CanOmitMapChecks()) return result;
- return AssignEnvironment(result);
+ if (!instr->CanOmitMapChecks()) {
+ AssignEnvironment(result);
+ if (instr->has_migration_target()) return AssignPointerMap(result);
+ }
+ return result;
}
@@ -2206,25 +2219,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* obj = UseFixed(instr->object(), edx);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(context, obj);
- return MarkAsCall(DefineFixed(result, eax), instr);
- } else {
- LOperand* context = UseAny(instr->context()); // Not actually used.
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(context, obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), edx);
@@ -2418,7 +2412,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_external_location = instr->access().IsExternalMemory() &&
instr->access().offset() == 0;
bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
+ bool needs_write_barrier_for_map = instr->has_transition() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
@@ -2565,10 +2559,23 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kNotEnoughSpillSlotsForOsr);
+ spill_index = 0;
+ }
+ if (spill_index == 0) {
+ // The dynamic frame alignment state overwrites the first local.
+ // The first local is saved at the end of the unoptimized frame.
+ spill_index = graph()->osr()->UnoptimizedFrameSlots();
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
@@ -2591,6 +2598,14 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
@@ -2634,20 +2649,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
diff --git a/chromium/v8/src/ia32/lithium-ia32.h b/chromium/v8/src/ia32/lithium-ia32.h
index 6b0f9d0a74c..aa5c0bbeed7 100644
--- a/chromium/v8/src/ia32/lithium-ia32.h
+++ b/chromium/v8/src/ia32/lithium-ia32.h
@@ -50,7 +50,6 @@ class LCodeGen;
V(ArithmeticD) \
V(ArithmeticT) \
V(BitI) \
- V(BitNotI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
@@ -63,20 +62,21 @@ class LCodeGen;
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClampTToUint8NoSSE2) \
V(ClassOfTestAndBranch) \
+ V(ClobberDoubles) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(ConstantD) \
@@ -129,7 +129,6 @@ class LCodeGen;
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
@@ -163,6 +162,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
@@ -176,7 +176,6 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(SubI) \
V(TaggedToI) \
- V(TaggedToINoSSE2) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
@@ -185,18 +184,23 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -206,13 +210,16 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
: environment_(NULL),
hydrogen_value_(NULL),
- is_call_(false) { }
- virtual ~LInstruction() { }
+ bit_field_(IsCallBits::encode(false)) {
+ set_position(RelocInfo::kNoPosition);
+ }
+
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -250,23 +257,31 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
+ // The 31 bits PositionBits is used to store the int position value. And the
+ // position value may be RelocInfo::kNoPosition (-1). The accessor always
+ // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
+ // and can fit into the 31 bits PositionBits.
+ void set_position(int pos) {
+ bit_field_ = PositionBits::update(bit_field_, pos + 1);
+ }
+ int position() { return PositionBits::decode(bit_field_) - 1; }
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
- void MarkAsCall() { is_call_ = true; }
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
// Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return is_call_; }
- bool ClobbersRegisters() const { return is_call_; }
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
virtual bool ClobbersDoubleRegisters() const {
- return is_call_ ||
- (!CpuFeatures::IsSupported(SSE2) &&
- // We only have rudimentary X87Stack tracking, thus in general
- // cannot handle deoptimization nor phi-nodes.
- (HasEnvironment() || IsControl()));
+ return IsCall() ||
+ // We only have rudimentary X87Stack tracking, thus in general
+ // cannot handle phi-nodes.
+ (!CpuFeatures::IsSafeForSnapshot(SSE2) && IsControl());
}
virtual bool HasResult() const = 0;
@@ -295,10 +310,13 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
+ class IsCallBits: public BitField<bool, 0, 1> {};
+ class PositionBits: public BitField<int, 1, 31> {};
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- bool is_call_;
+ int bit_field_;
};
@@ -306,11 +324,13 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0 && result() != NULL; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
@@ -321,15 +341,15 @@ class LTemplateInstruction: public LInstruction {
private:
// Iterator support.
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block) : block_(block) {
parallel_moves_[BEFORE] = NULL;
@@ -339,8 +359,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -376,11 +396,11 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const {
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
return !IsRedundant();
}
@@ -388,29 +408,42 @@ class LInstructionGap: public LGap {
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LClobberDoubles V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+ LClobberDoubles() { ASSERT(!CpuFeatures::IsSafeForSnapshot(SSE2)); }
+
+ virtual bool ClobbersDoubleRegisters() const { return true; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d")
+};
+
+
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
+ explicit LGoto(HBasicBlock* block) : block_(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const;
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
- int block_id() const { return block_id_; }
+ int block_id() const { return block_->block_id(); }
+ virtual bool ClobbersDoubleRegisters() const { return false; }
+
+ bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
private:
- int block_id_;
+ HBasicBlock* block_;
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
};
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -419,22 +452,24 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -450,14 +485,16 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 1, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
@@ -474,9 +511,11 @@ class LCallStub: public LTemplateInstruction<1, 1, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
@@ -486,7 +525,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const { return true; }
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -525,7 +564,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LWrapReceiver(LOperand* receiver,
LOperand* function,
@@ -543,7 +582,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -564,7 +603,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -578,11 +617,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -594,20 +633,20 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LModI: public LTemplateInstruction<1, 2, 1> {
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -624,7 +663,7 @@ class LModI: public LTemplateInstruction<1, 2, 1> {
};
-class LDivI: public LTemplateInstruction<1, 2, 1> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -642,7 +681,7 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
};
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
@@ -661,7 +700,7 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 1> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -678,7 +717,7 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
-class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -701,7 +740,7 @@ class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
};
-class LMathFloor: public LTemplateInstruction<1, 1, 0> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
@@ -714,7 +753,7 @@ class LMathFloor: public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound: public LTemplateInstruction<1, 2, 1> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathRound(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[1] = context;
@@ -731,7 +770,7 @@ class LMathRound: public LTemplateInstruction<1, 2, 1> {
};
-class LMathAbs: public LTemplateInstruction<1, 2, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathAbs(LOperand* context, LOperand* value) {
inputs_[1] = context;
@@ -746,7 +785,7 @@ class LMathAbs: public LTemplateInstruction<1, 2, 0> {
};
-class LMathLog: public LTemplateInstruction<1, 1, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -758,7 +797,7 @@ class LMathLog: public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin: public LTemplateInstruction<1, 1, 0> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSin(LOperand* value) {
inputs_[0] = value;
@@ -770,7 +809,7 @@ class LMathSin: public LTemplateInstruction<1, 1, 0> {
};
-class LMathCos: public LTemplateInstruction<1, 1, 0> {
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathCos(LOperand* value) {
inputs_[0] = value;
@@ -782,7 +821,7 @@ class LMathCos: public LTemplateInstruction<1, 1, 0> {
};
-class LMathTan: public LTemplateInstruction<1, 1, 0> {
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathTan(LOperand* value) {
inputs_[0] = value;
@@ -794,7 +833,7 @@ class LMathTan: public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 2> {
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value,
LOperand* temp1,
@@ -813,7 +852,7 @@ class LMathExp: public LTemplateInstruction<1, 1, 2> {
};
-class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -825,7 +864,7 @@ class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[1] = context;
@@ -841,7 +880,7 @@ class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -851,12 +890,24 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
};
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -868,11 +919,11 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsNumberAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -885,7 +936,7 @@ class LIsNumberAndBranch: public LControlInstruction<1, 0> {
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -898,11 +949,11 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -913,11 +964,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -931,11 +982,11 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<3, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
public:
LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -950,13 +1001,13 @@ class LStringCompareAndBranch: public LControlInstruction<3, 0> {
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Token::Value op() const { return hydrogen()->token(); }
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -970,11 +1021,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -987,7 +1038,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -998,11 +1050,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -1015,7 +1067,7 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -1031,11 +1083,11 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 3, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LCmpT(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1050,7 +1102,7 @@ class LCmpT: public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -1064,7 +1116,7 @@ class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1083,7 +1135,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1092,7 +1145,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
};
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInstanceSize(LOperand* object) {
inputs_[0] = object;
@@ -1105,7 +1158,7 @@ class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1120,7 +1173,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1137,7 +1190,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1159,7 +1212,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1174,7 +1227,7 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1183,7 +1236,7 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS: public LTemplateInstruction<1, 0, 0> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1192,7 +1245,7 @@ class LConstantS: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
@@ -1207,7 +1260,7 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
};
-class LConstantE: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1218,16 +1271,18 @@ class LConstantE: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 1> {
+class LBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1240,11 +1295,11 @@ class LBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpMapAndBranch: public LControlInstruction<1, 0> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1259,7 +1314,7 @@ class LCmpMapAndBranch: public LControlInstruction<1, 0> {
};
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1271,7 +1326,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1284,7 +1339,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1299,7 +1354,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
-class LDateField: public LTemplateInstruction<1, 1, 1> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index)
: index_(index) {
@@ -1320,7 +1375,7 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LSeqStringSetChar(String::Encoding encoding,
LOperand* string,
@@ -1344,7 +1399,7 @@ class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
};
-class LThrow: public LTemplateInstruction<0, 2, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LThrow(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1358,19 +1413,7 @@ class LThrow: public LTemplateInstruction<0, 2, 0> {
};
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1390,7 +1433,7 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1405,7 +1448,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1420,20 +1463,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* global_object() const { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1446,16 +1498,18 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
Token::Value op() const { return op_; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LArithmeticT(Token::Value op,
LOperand* context,
@@ -1471,9 +1525,11 @@ class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
Token::Value op() const { return op_; }
@@ -1482,7 +1538,7 @@ class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
};
-class LReturn: public LTemplateInstruction<0, 3, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
explicit LReturn(LOperand* value, LOperand* context,
LOperand* parameter_count) {
@@ -1505,17 +1561,12 @@ class LReturn: public LTemplateInstruction<0, 3, 0> {
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
}
- virtual bool ClobbersDoubleRegisters() const {
- return !CpuFeatures::IsSupported(SSE2) &&
- !hydrogen()->representation().IsDouble();
- }
-
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
@@ -1523,22 +1574,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadNamedFieldPolymorphic(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -1555,7 +1591,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
inputs_[0] = function;
@@ -1570,7 +1606,8 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1583,7 +1620,7 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1598,7 +1635,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
return hydrogen()->is_external();
}
- virtual bool ClobbersDoubleRegisters() const {
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
return !CpuFeatures::IsSupported(SSE2) &&
!IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
}
@@ -1606,7 +1643,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool key_is_smi() {
return hydrogen()->key()->representation().IsTagged();
@@ -1627,7 +1664,7 @@ inline static bool ExternalArrayOpRequiresTemp(
}
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
inputs_[0] = context;
@@ -1643,14 +1680,14 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
inputs_[0] = context;
@@ -1668,7 +1705,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value;
@@ -1681,7 +1718,7 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreGlobalGeneric(LOperand* context,
LOperand* global_object,
@@ -1703,7 +1740,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1716,11 +1753,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1737,11 +1774,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1753,7 +1790,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1766,7 +1803,24 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
@@ -1782,21 +1836,21 @@ class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1808,7 +1862,7 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
@@ -1821,7 +1875,7 @@ class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
};
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
@@ -1833,7 +1887,7 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1845,19 +1899,19 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInvokeFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1870,13 +1924,13 @@ class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallKeyed(LOperand* context, LOperand* key) {
inputs_[0] = context;
@@ -1889,13 +1943,13 @@ class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 1, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNamed(LOperand* context) {
inputs_[0] = context;
@@ -1906,14 +1960,14 @@ class LCallNamed: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 2, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
@@ -1930,7 +1984,7 @@ class LCallFunction: public LTemplateInstruction<1, 2, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallGlobal(LOperand* context) {
inputs_[0] = context;
@@ -1941,25 +1995,25 @@ class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 2, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1972,13 +2026,13 @@ class LCallNew: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray: public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
@@ -1991,13 +2045,13 @@ class LCallNewArray: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
inputs_[0] = context;
@@ -2013,7 +2067,7 @@ class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -2025,7 +2079,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToSmi(LOperand* value) {
inputs_[0] = value;
@@ -2038,7 +2092,7 @@ class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2052,7 +2106,20 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -2064,7 +2131,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagU(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2078,7 +2145,7 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2094,7 +2161,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDoubleToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2111,7 +2178,7 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
};
-class LDoubleToSmi: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
@@ -2125,7 +2192,7 @@ class LDoubleToSmi: public LTemplateInstruction<1, 1, 0> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LTaggedToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2142,32 +2209,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
};
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToINoSSE2: public LTemplateInstruction<1, 1, 3> {
- public:
- LTaggedToINoSSE2(LOperand* value,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* scratch() { return temps_[0]; }
- LOperand* scratch2() { return temps_[1]; }
- LOperand* scratch3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToINoSSE2, "tagged-to-i-nosse2")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2179,7 +2221,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberUntagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2189,14 +2231,12 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- virtual bool ClobbersDoubleRegisters() const { return false; }
-
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change);
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2214,7 +2254,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LStoreNamedField(LOperand* obj,
LOperand* val,
@@ -2234,16 +2274,16 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> transition() const { return hydrogen()->transition(); }
+ Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
return hydrogen()->field_representation();
}
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
@@ -2258,13 +2298,13 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
@@ -2283,13 +2323,13 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* object,
@@ -2309,13 +2349,13 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LTransitionElementsKind: public LTemplateInstruction<0, 2, 2> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* context,
@@ -2336,7 +2376,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 2, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
@@ -2345,7 +2385,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 2, 2> {
};
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2361,7 +2401,7 @@ class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd: public LTemplateInstruction<1, 3, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
@@ -2378,7 +2418,7 @@ class LStringAdd: public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
inputs_[0] = context;
@@ -2395,7 +2435,7 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
};
-class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharFromCode(LOperand* context, LOperand* char_code) {
inputs_[0] = context;
@@ -2410,20 +2450,20 @@ class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
};
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LCheckInstanceType(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2438,7 +2478,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2451,25 +2491,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
- public:
- explicit LCheckPrototypeMaps(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- ZoneList<Handle<JSObject> >* prototypes() const {
- return hydrogen()->prototypes();
- }
- ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2481,7 +2503,7 @@ class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* value) {
inputs_[0] = value;
@@ -2493,7 +2515,7 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* value) {
inputs_[0] = value;
@@ -2505,7 +2527,7 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2519,7 +2541,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
// Truncating conversion from a tagged value to an int32.
-class LClampTToUint8NoSSE2: public LTemplateInstruction<1, 1, 3> {
+class LClampTToUint8NoSSE2 V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LClampTToUint8NoSSE2(LOperand* unclamped,
LOperand* temp1,
@@ -2542,7 +2564,7 @@ class LClampTToUint8NoSSE2: public LTemplateInstruction<1, 1, 3> {
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2555,7 +2577,7 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocate: public LTemplateInstruction<1, 2, 1> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
inputs_[0] = context;
@@ -2572,7 +2594,7 @@ class LAllocate: public LTemplateInstruction<1, 2, 1> {
};
-class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LRegExpLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2585,7 +2607,7 @@ class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LFunctionLiteral(LOperand* context) {
inputs_[0] = context;
@@ -2598,7 +2620,7 @@ class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2611,7 +2633,7 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 2, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -2625,7 +2647,7 @@ class LTypeof: public LTemplateInstruction<1, 2, 0> {
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2638,20 +2660,20 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- LOsrEntry() {}
-
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck: public LTemplateInstruction<0, 1, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LStackCheck(LOperand* context) {
inputs_[0] = context;
@@ -2669,7 +2691,7 @@ class LStackCheck: public LTemplateInstruction<0, 1, 0> {
};
-class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
@@ -2683,7 +2705,7 @@ class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2699,7 +2721,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2713,7 +2735,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2728,7 +2750,7 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph),
@@ -2744,7 +2766,7 @@ class LPlatformChunk: public LChunk {
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
@@ -2800,7 +2822,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* reason);
+ void Abort(BailoutReason reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2889,7 +2911,8 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.cc b/chromium/v8/src/ia32/macro-assembler-ia32.cc
index 2ab5a259321..b65d328435e 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.cc
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.cc
@@ -48,12 +48,67 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
+ // TODO(titzer): should we just use a null handle here instead?
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
}
}
+void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
+ if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ mov(destination, value);
+ return;
+ }
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(destination, Immediate(index));
+ mov(destination, Operand::StaticArray(destination,
+ times_pointer_size,
+ roots_array_start));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Register scratch,
+ Heap::RootListIndex index) {
+ ASSERT(Heap::RootCanBeWrittenAfterInitialization(index));
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(scratch, Immediate(index));
+ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
+ source);
+}
+
+
+void MacroAssembler::CompareRoot(Register with,
+ Register scratch,
+ Heap::RootListIndex index) {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(scratch, Immediate(index));
+ cmp(with, Operand::StaticArray(scratch,
+ times_pointer_size,
+ roots_array_start));
+}
+
+
+void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
+ ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ cmp(with, value);
+}
+
+
+void MacroAssembler::CompareRoot(const Operand& with,
+ Heap::RootListIndex index) {
+ ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ cmp(with, value);
+}
+
+
void MacroAssembler::InNewSpace(
Register object,
Register scratch,
@@ -161,6 +216,236 @@ void MacroAssembler::ClampUint8(Register reg) {
}
+void MacroAssembler::SlowTruncateToI(Register result_reg,
+ Register input_reg,
+ int offset) {
+ DoubleToIStub stub(input_reg, result_reg, offset, true);
+ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result_reg,
+ XMMRegister input_reg) {
+ Label done;
+ cvttsd2si(result_reg, Operand(input_reg));
+ cmp(result_reg, 0x80000000u);
+ j(not_equal, &done, Label::kNear);
+
+ sub(esp, Immediate(kDoubleSize));
+ movdbl(MemOperand(esp, 0), input_reg);
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
+ sub(esp, Immediate(kDoubleSize));
+ fst_d(MemOperand(esp, 0));
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+}
+
+
+void MacroAssembler::X87TOSToI(Register result_reg,
+ MinusZeroMode minus_zero_mode,
+ Label* conversion_failed,
+ Label::Distance dst) {
+ Label done;
+ sub(esp, Immediate(kPointerSize));
+ fist_s(MemOperand(esp, 0));
+ fld(0);
+ fild_s(MemOperand(esp, 0));
+ pop(result_reg);
+ FCmp();
+ j(not_equal, conversion_failed, dst);
+ j(parity_even, conversion_failed, dst);
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(not_zero, &done, Label::kNear);
+ // To check for minus zero, we load the value again as float, and check
+ // if that is still 0.
+ sub(esp, Immediate(kPointerSize));
+ fst_s(MemOperand(esp, 0));
+ pop(result_reg);
+ test(result_reg, Operand(result_reg));
+ j(not_zero, conversion_failed, dst);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::DoubleToI(Register result_reg,
+ XMMRegister input_reg,
+ XMMRegister scratch,
+ MinusZeroMode minus_zero_mode,
+ Label* conversion_failed,
+ Label::Distance dst) {
+ ASSERT(!input_reg.is(scratch));
+ cvttsd2si(result_reg, Operand(input_reg));
+ cvtsi2sd(scratch, Operand(result_reg));
+ ucomisd(scratch, input_reg);
+ j(not_equal, conversion_failed, dst);
+ j(parity_even, conversion_failed, dst); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ Label done;
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ test(result_reg, Operand(result_reg));
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // jump to conversion_failed.
+ and_(result_reg, 1);
+ j(not_zero, conversion_failed, dst);
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
+ Register input_reg) {
+ Label done, slow_case;
+
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatureScope scope(this, SSE3);
+ Label convert;
+ // Use more powerful conversion when sse3 is available.
+ // Load x87 register with heap number.
+ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ // Get exponent alone and check for too-big exponent.
+ mov(result_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ and_(result_reg, HeapNumber::kExponentMask);
+ const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ cmp(Operand(result_reg), Immediate(kTooBigExponent));
+ j(greater_equal, &slow_case, Label::kNear);
+
+ // Reserve space for 64 bit answer.
+ sub(Operand(esp), Immediate(kDoubleSize));
+ // Do conversion, which cannot fail because we checked the exponent.
+ fisttp_d(Operand(esp, 0));
+ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
+ add(Operand(esp), Immediate(kDoubleSize));
+ jmp(&done, Label::kNear);
+
+ // Slow case.
+ bind(&slow_case);
+ if (input_reg.is(result_reg)) {
+ // Input is clobbered. Restore number from fpu stack
+ sub(Operand(esp), Immediate(kDoubleSize));
+ fstp_d(Operand(esp, 0));
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+ } else {
+ fstp(0);
+ SlowTruncateToI(result_reg, input_reg);
+ }
+ } else if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(this, SSE2);
+ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2si(result_reg, Operand(xmm0));
+ cmp(result_reg, 0x80000000u);
+ j(not_equal, &done, Label::kNear);
+ // Check if the input was 0x8000000 (kMinInt).
+ // If no, then we got an overflow and we deoptimize.
+ ExternalReference min_int = ExternalReference::address_of_min_int();
+ ucomisd(xmm0, Operand::StaticVariable(min_int));
+ j(not_equal, &slow_case, Label::kNear);
+ j(parity_even, &slow_case, Label::kNear); // NaN.
+ jmp(&done, Label::kNear);
+
+ // Slow case.
+ bind(&slow_case);
+ if (input_reg.is(result_reg)) {
+ // Input is clobbered. Restore number from double scratch.
+ sub(esp, Immediate(kDoubleSize));
+ movdbl(MemOperand(esp, 0), xmm0);
+ SlowTruncateToI(result_reg, esp, 0);
+ add(esp, Immediate(kDoubleSize));
+ } else {
+ SlowTruncateToI(result_reg, input_reg);
+ }
+ } else {
+ SlowTruncateToI(result_reg, input_reg);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::TaggedToI(Register result_reg,
+ Register input_reg,
+ XMMRegister temp,
+ MinusZeroMode minus_zero_mode,
+ Label* lost_precision) {
+ Label done;
+ ASSERT(!temp.is(xmm0));
+
+ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, lost_precision, Label::kNear);
+
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ ASSERT(!temp.is(no_xmm_reg));
+ CpuFeatureScope scope(this, SSE2);
+
+ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2si(result_reg, Operand(xmm0));
+ cvtsi2sd(temp, Operand(result_reg));
+ ucomisd(xmm0, temp);
+ RecordComment("Deferred TaggedToI: lost precision");
+ j(not_equal, lost_precision, Label::kNear);
+ RecordComment("Deferred TaggedToI: NaN");
+ j(parity_even, lost_precision, Label::kNear);
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, xmm0);
+ and_(result_reg, 1);
+ RecordComment("Deferred TaggedToI: minus zero");
+ j(not_zero, lost_precision, Label::kNear);
+ }
+ } else {
+ // TODO(olivf) Converting a number on the fpu is actually quite slow. We
+ // should first try a fast conversion and then bailout to this slow case.
+ Label lost_precision_pop, zero_check;
+ Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO)
+ ? &lost_precision_pop : lost_precision;
+ sub(esp, Immediate(kPointerSize));
+ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0);
+ fist_s(MemOperand(esp, 0));
+ fild_s(MemOperand(esp, 0));
+ FCmp();
+ pop(result_reg);
+ j(not_equal, lost_precision_int, Label::kNear);
+ j(parity_even, lost_precision_int, Label::kNear); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ test(result_reg, Operand(result_reg));
+ j(zero, &zero_check, Label::kNear);
+ fstp(0);
+ jmp(&done, Label::kNear);
+ bind(&zero_check);
+ // To check for minus zero, we load the value again as float, and check
+ // if that is still 0.
+ sub(esp, Immediate(kPointerSize));
+ fstp_s(Operand(esp, 0));
+ pop(result_reg);
+ test(result_reg, Operand(result_reg));
+ j(zero, &done, Label::kNear);
+ jmp(lost_precision, Label::kNear);
+
+ bind(&lost_precision_pop);
+ fstp(0);
+ jmp(lost_precision, Label::kNear);
+ }
+ }
+ bind(&done);
+}
+
+
+
static double kUint32Bias =
static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
@@ -432,21 +717,6 @@ void MacroAssembler::SafePush(const Immediate& x) {
}
-void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- // see ROOT_ACCESSOR macro in factory.h
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
-}
-
-
-void MacroAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
- // see ROOT_ACCESSOR macro in factory.h
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
-}
-
-
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
@@ -678,7 +948,7 @@ void MacroAssembler::AssertNumber(Register object) {
JumpIfSmi(object, &ok);
cmp(FieldOperand(object, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
- Check(equal, "Operand not a number");
+ Check(equal, kOperandNotANumber);
bind(&ok);
}
}
@@ -687,7 +957,7 @@ void MacroAssembler::AssertNumber(Register object) {
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(equal, "Operand is not a smi");
+ Check(equal, kOperandIsNotASmi);
}
}
@@ -695,12 +965,12 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi and not a string");
+ Check(not_equal, kOperandIsASmiAndNotAString);
push(object);
mov(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
pop(object);
- Check(below, "Operand is not a string");
+ Check(below, kOperandIsNotAString);
}
}
@@ -708,12 +978,12 @@ void MacroAssembler::AssertString(Register object) {
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi and not a name");
+ Check(not_equal, kOperandIsASmiAndNotAName);
push(object);
mov(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, LAST_NAME_TYPE);
pop(object);
- Check(below_equal, "Operand is not a name");
+ Check(below_equal, kOperandIsNotAName);
}
}
@@ -721,7 +991,7 @@ void MacroAssembler::AssertName(Register object) {
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi");
+ Check(not_equal, kOperandIsASmi);
}
}
@@ -734,7 +1004,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(Immediate(CodeObject()));
if (emit_debug_code()) {
cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
- Check(not_equal, "code object not properly patched");
+ Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
@@ -743,7 +1013,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(type)));
- Check(equal, "stack frame types must match");
+ Check(equal, kStackFrameTypesMustMatch);
}
leave();
}
@@ -1024,7 +1294,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
cmp(scratch1, Immediate(0));
- Check(not_equal, "we should not have an empty lexical context");
+ Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
int offset =
@@ -1037,7 +1307,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Read the first word and compare to native_context_map.
cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
isolate()->factory()->native_context_map());
- Check(equal, "JSGlobalObject::native_context should be a native context.");
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
}
// Check if both contexts are the same.
@@ -1056,12 +1326,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
cmp(scratch2, isolate()->factory()->null_value());
- Check(not_equal, "JSGlobalProxy::context() should not be null.");
+ Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
// Read the first word and compare to native_context_map(),
cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
isolate()->factory()->native_context_map());
- Check(equal, "JSGlobalObject::native_context should be a native context.");
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
}
int token_offset = Context::kHeaderSize +
@@ -1206,7 +1476,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
#ifdef DEBUG
// Assert that result actually contains top on entry.
cmp(result, Operand::StaticVariable(allocation_top));
- Check(equal, "Unexpected allocation top");
+ Check(equal, kUnexpectedAllocationTop);
#endif
return;
}
@@ -1226,7 +1496,7 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
AllocationFlags flags) {
if (emit_debug_code()) {
test(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, "Unaligned allocation in new space");
+ Check(zero, kUnalignedAllocationInNewSpace);
}
ExternalReference allocation_top =
@@ -1268,26 +1538,29 @@ void MacroAssembler::Allocate(int object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
// Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
bind(&aligned);
}
- Register top_reg = result_end.is_valid() ? result_end : result;
-
// Calculate new top and bail out if space is exhausted.
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
+ Register top_reg = result_end.is_valid() ? result_end : result;
if (!top_reg.is(result)) {
mov(top_reg, result);
}
@@ -1342,14 +1615,21 @@ void MacroAssembler::Allocate(int header_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
// Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
@@ -1357,9 +1637,6 @@ void MacroAssembler::Allocate(int header_size,
}
// Calculate new top and bail out if space is exhausted.
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
// We assume that element_count*element_size + header_size does not
// overflow.
if (element_count_type == REGISTER_VALUE_IS_SMI) {
@@ -1413,14 +1690,21 @@ void MacroAssembler::Allocate(Register object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
// Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ cmp(result, Operand::StaticVariable(allocation_limit));
+ j(above_equal, gc_required);
+ }
mov(Operand(result, 0),
Immediate(isolate()->factory()->one_pointer_filler_map()));
add(result, Immediate(kDoubleSize / 2));
@@ -1428,9 +1712,6 @@ void MacroAssembler::Allocate(Register object_size,
}
// Calculate new top and bail out if space is exhausted.
- ExternalReference allocation_limit =
- AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
if (!object_size.is(result_end)) {
mov(result_end, object_size);
}
@@ -1458,7 +1739,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
and_(object, Immediate(~kHeapObjectTagMask));
#ifdef DEBUG
cmp(object, Operand::StaticVariable(new_space_allocation_top));
- Check(below, "Undo allocation of non allocated memory");
+ Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
mov(Operand::StaticVariable(new_space_allocation_top), object);
}
@@ -1927,50 +2208,15 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
}
-// If true, a Handle<T> returned by value from a function with cdecl calling
-// convention will be returned directly as a value of location_ field in a
-// register eax.
-// If false, it is returned as a pointer to a preallocated by caller memory
-// region. Pointer to this region should be passed to a function as an
-// implicit first argument.
-#if defined(USING_BSD_ABI) || defined(__MINGW32__) || defined(__CYGWIN__)
-static const bool kReturnHandlesDirectly = true;
-#else
-static const bool kReturnHandlesDirectly = false;
-#endif
-
-
-Operand ApiParameterOperand(int index, bool returns_handle) {
- int offset = (index +(kReturnHandlesDirectly || !returns_handle ? 0 : 1));
- return Operand(esp, offset * kPointerSize);
+Operand ApiParameterOperand(int index) {
+ return Operand(esp, index * kPointerSize);
}
-void MacroAssembler::PrepareCallApiFunction(int argc, bool returns_handle) {
- if (kReturnHandlesDirectly || !returns_handle) {
- EnterApiExitFrame(argc);
- // When handles are returned directly we don't have to allocate extra
- // space for and pass an out parameter.
- if (emit_debug_code()) {
- mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
- }
- } else {
- // We allocate two additional slots: return value and pointer to it.
- EnterApiExitFrame(argc + 2);
-
- // The argument slots are filled as follows:
- //
- // n + 1: output slot
- // n: arg n
- // ...
- // 1: arg1
- // 0: pointer to the output slot
-
- lea(esi, Operand(esp, (argc + 1) * kPointerSize));
- mov(Operand(esp, 0 * kPointerSize), esi);
- if (emit_debug_code()) {
- mov(Operand(esi, 0), Immediate(0));
- }
+void MacroAssembler::PrepareCallApiFunction(int argc) {
+ EnterApiExitFrame(argc);
+ if (emit_debug_code()) {
+ mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
}
}
@@ -1979,7 +2225,6 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Address thunk_address,
Operand thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
@@ -2035,21 +2280,6 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
}
Label prologue;
- if (returns_handle) {
- if (!kReturnHandlesDirectly) {
- // PrepareCallApiFunction saved pointer to the output slot into
- // callee-save register esi.
- mov(eax, Operand(esi, 0));
- }
- Label empty_handle;
- // Check if the result handle holds 0.
- test(eax, eax);
- j(zero, &empty_handle);
- // It was non-zero. Dereference to get the result value.
- mov(eax, Operand(eax, 0));
- jmp(&prologue);
- bind(&empty_handle);
- }
// Load the value from ReturnValue
mov(eax, Operand(ebp, return_value_offset * kPointerSize));
@@ -2062,7 +2292,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
// previous handle scope.
mov(Operand::StaticVariable(next_address), ebx);
sub(Operand::StaticVariable(level_address), Immediate(1));
- Assert(above_equal, "Invalid HandleScope level");
+ Assert(above_equal, kInvalidHandleScopeLevel);
cmp(edi, Operand::StaticVariable(limit_address));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
@@ -2104,7 +2334,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
cmp(return_value, isolate()->factory()->null_value());
j(equal, &ok, Label::kNear);
- Abort("API call returned invalid object");
+ Abort(kAPICallReturnedInvalidObject);
bind(&ok);
#endif
@@ -2390,7 +2620,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (emit_debug_code()) {
cmp(FieldOperand(dst, HeapObject::kMapOffset),
isolate()->factory()->with_context_map());
- Check(not_equal, "Variable resolved to with context.");
+ Check(not_equal, kVariableResolvedToWithContext);
}
}
@@ -2477,7 +2707,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
- Abort("Global functions must have initial map");
+ Abort(kGlobalFunctionsMustHaveInitialMap);
bind(&ok);
}
}
@@ -2568,6 +2798,8 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
// Make sure the floating point stack is either empty or has depth items.
ASSERT(depth <= 7);
+ // This is very expensive.
+ ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts);
// The top-of-stack (tos) is 7 if there is one item pushed.
int tos = (8 - depth) % 8;
@@ -2578,7 +2810,7 @@ void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
and_(eax, kTopMask);
shr(eax, 11);
cmp(eax, Immediate(tos));
- Check(equal, "Unexpected FPU stack depth after instruction");
+ Check(equal, kUnexpectedFPUStackDepthAfterInstruction);
fnclex();
pop(eax);
}
@@ -2661,8 +2893,8 @@ void MacroAssembler::DecrementCounter(Condition cc,
}
-void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (emit_debug_code()) Check(cc, msg);
+void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
+ if (emit_debug_code()) Check(cc, reason);
}
@@ -2679,16 +2911,16 @@ void MacroAssembler::AssertFastElements(Register elements) {
cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(factory->fixed_cow_array_map()));
j(equal, &ok);
- Abort("JSObject with fast elements map has slow elements");
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
}
}
-void MacroAssembler::Check(Condition cc, const char* msg) {
+void MacroAssembler::Check(Condition cc, BailoutReason reason) {
Label L;
j(cc, &L);
- Abort(msg);
+ Abort(reason);
// will not return here
bind(&L);
}
@@ -2709,12 +2941,13 @@ void MacroAssembler::CheckStackAlignment() {
}
-void MacroAssembler::Abort(const char* msg) {
+void MacroAssembler::Abort(BailoutReason reason) {
// We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
+ const char* msg = GetBailoutReason(reason);
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
@@ -2723,6 +2956,11 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ int3();
+ return;
+ }
#endif
push(eax);
@@ -3118,7 +3356,7 @@ void MacroAssembler::EnsureNotWhite(
if (emit_debug_code()) {
mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
- Check(less_equal, "Live Bytes Count overflow chunk size");
+ Check(less_equal, kLiveBytesCountOverflowChunkSize);
}
bind(&done);
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.h b/chromium/v8/src/ia32/macro-assembler-ia32.h
index 3bca930d667..e4e4533bf5f 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.h
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.h
@@ -61,6 +61,15 @@ class MacroAssembler: public Assembler {
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
+ // Operations on roots in the root-array.
+ void LoadRoot(Register destination, Heap::RootListIndex index);
+ void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
+ void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
+ // These methods can only be used with constant roots (i.e. non-writable
+ // and not in new space).
+ void CompareRoot(Register with, Heap::RootListIndex index);
+ void CompareRoot(const Operand& with, Heap::RootListIndex index);
+
// ---------------------------------------------------------------------------
// GC Support
enum RememberedSetFinalAction {
@@ -362,10 +371,6 @@ class MacroAssembler: public Assembler {
void SafeSet(Register dst, const Immediate& x);
void SafePush(const Immediate& x);
- // Compare against a known root, e.g. undefined, null, true, ...
- void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
-
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
@@ -469,6 +474,21 @@ class MacroAssembler: public Assembler {
XMMRegister scratch_reg,
Register result_reg);
+ void SlowTruncateToI(Register result_reg, Register input_reg,
+ int offset = HeapNumber::kValueOffset - kHeapObjectTag);
+
+ void TruncateHeapNumberToI(Register result_reg, Register input_reg);
+ void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
+ void TruncateX87TOSToI(Register result_reg);
+
+ void DoubleToI(Register result_reg, XMMRegister input_reg,
+ XMMRegister scratch, MinusZeroMode minus_zero_mode,
+ Label* conversion_failed, Label::Distance dst = Label::kFar);
+ void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode,
+ Label* conversion_failed, Label::Distance dst = Label::kFar);
+
+ void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
+ MinusZeroMode minus_zero_mode, Label* lost_precision);
// Smi tagging support.
void SmiTag(Register reg) {
@@ -777,7 +797,7 @@ class MacroAssembler: public Assembler {
// Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
// etc. Saves context (esi). If space was reserved for return value then
// stores the pointer to the reserved slot into esi.
- void PrepareCallApiFunction(int argc, bool returns_handle);
+ void PrepareCallApiFunction(int argc);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Clobbers ebx, edi and
@@ -787,7 +807,6 @@ class MacroAssembler: public Assembler {
Address thunk_address,
Operand thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset_from_ebp);
// Jump to a runtime routine.
@@ -807,6 +826,8 @@ class MacroAssembler: public Assembler {
void Drop(int element_count);
void Call(Label* target) { call(target); }
+ void Push(Register src) { push(src); }
+ void Pop(Register dst) { pop(dst); }
// Emit call to the code we are currently generating.
void CallSelf() {
@@ -844,15 +865,15 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, const char* msg);
+ void Assert(Condition cc, BailoutReason reason);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg);
+ void Check(Condition cc, BailoutReason reason);
// Print a message to stdout and abort execution.
- void Abort(const char* msg);
+ void Abort(BailoutReason reason);
// Check that the stack is aligned.
void CheckStackAlignment();
@@ -1032,7 +1053,7 @@ inline Operand GlobalObjectOperand() {
// Generates an Operand for saving parameters after PrepareCallApiFunction.
-Operand ApiParameterOperand(int index, bool returns_handle);
+Operand ApiParameterOperand(int index);
#ifdef GENERATED_CODE_COVERAGE
diff --git a/chromium/v8/src/ia32/regexp-macro-assembler-ia32.cc b/chromium/v8/src/ia32/regexp-macro-assembler-ia32.cc
index dfcc8695675..d371c456c1a 100644
--- a/chromium/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/chromium/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -711,7 +711,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
-#ifdef WIN32
+#if V8_OS_WIN
// Ensure that we write to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
@@ -721,7 +721,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
i += kRegistersPerPage) {
__ mov(register_location(i), eax); // One write every page.
}
-#endif // WIN32
+#endif // V8_OS_WIN
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
@@ -1099,7 +1099,6 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
diff --git a/chromium/v8/src/ia32/stub-cache-ia32.cc b/chromium/v8/src/ia32/stub-cache-ia32.cc
index 123506fa623..bebd7bebc9a 100644
--- a/chromium/v8/src/ia32/stub-cache-ia32.cc
+++ b/chromium/v8/src/ia32/stub-cache-ia32.cc
@@ -137,38 +137,34 @@ static void ProbeTable(Isolate* isolate,
}
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be unique and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<Name> name,
- Register r0,
- Register r1) {
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
ASSERT(name->IsUniqueName());
+ ASSERT(!receiver.is(scratch0));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
- __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
// Bail out if the receiver has a named interceptor or requires access checks.
- __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
+ __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
kInterceptorOrAccessCheckNeededMask);
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
__ j(below, miss_label);
// Load properties array.
- Register properties = r0;
+ Register properties = scratch0;
__ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
@@ -182,7 +178,7 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
&done,
properties,
name,
- r1);
+ scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
@@ -392,6 +388,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
@@ -400,8 +401,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(scratch);
__ push(receiver);
__ push(holder);
- __ push(FieldOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(Immediate(reinterpret_cast<int>(masm->isolate())));
}
@@ -415,7 +414,7 @@ static void CompileCallLoadPropertyWithInterceptor(
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
}
@@ -516,38 +515,65 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// Function address is a foreign pointer outside V8's heap.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(masm->isolate(),
- reinterpret_cast<void*>(function_address));
- __ PrepareCallApiFunction(kApiArgc + kApiStackSpace, returns_handle);
+ __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
// v8::Arguments::implicit_args_.
- __ mov(ApiParameterOperand(2, returns_handle), eax);
+ __ mov(ApiParameterOperand(2), eax);
__ add(eax, Immediate(argc * kPointerSize));
// v8::Arguments::values_.
- __ mov(ApiParameterOperand(3, returns_handle), eax);
+ __ mov(ApiParameterOperand(3), eax);
// v8::Arguments::length_.
- __ Set(ApiParameterOperand(4, returns_handle), Immediate(argc));
+ __ Set(ApiParameterOperand(4), Immediate(argc));
// v8::Arguments::is_construct_call_.
- __ Set(ApiParameterOperand(5, returns_handle), Immediate(0));
+ __ Set(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
- __ lea(eax, ApiParameterOperand(2, returns_handle));
- __ mov(ApiParameterOperand(0, returns_handle), eax);
+ __ lea(eax, ApiParameterOperand(2));
+ __ mov(ApiParameterOperand(0), eax);
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeInvocationCallback)
- : FUNCTION_ADDR(&InvokeFunctionCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
__ CallApiFunctionAndReturn(function_address,
thunk_address,
- ApiParameterOperand(1, returns_handle),
+ ApiParameterOperand(1),
argc + kFastApiCallArguments + 1,
- returns_handle,
kFastApiCallArguments + 1);
}
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ const int stack_space = kFastApiCallArguments + argc + 1;
+ // Copy return value.
+ __ mov(scratch, Operand(esp, 0));
+ // Assign stack space for the call arguments.
+ __ sub(esp, Immediate(stack_space * kPointerSize));
+ // Move the return address on top of the stack.
+ __ mov(Operand(esp, 0), scratch);
+ // Write holder to stack frame.
+ __ mov(Operand(esp, 1 * kPointerSize), receiver);
+ // Write receiver to stack frame.
+ int index = stack_space;
+ __ mov(Operand(esp, index-- * kPointerSize), receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ mov(Operand(esp, index-- * kPointerSize), values[i]);
+ }
+
+ GenerateFastApiCall(masm, optimization, argc);
+}
+
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -706,7 +732,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
@@ -759,13 +785,13 @@ void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
Handle<PropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
+ JSGlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
if (Serializer::enabled()) {
@@ -787,7 +813,7 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
Label* miss) {
if (holder->IsJSGlobalObject()) {
GenerateCheckPropertyCell(
- masm, Handle<GlobalObject>::cast(holder), name, scratch1(), miss);
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
} else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
GenerateDictionaryNegativeLookup(
masm, miss, holder_reg, name, scratch1(), scratch2());
@@ -1089,19 +1115,17 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
+void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
Handle<JSObject> current = object;
while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
+ if (current->IsJSGlobalObject()) {
GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
+ Handle<JSGlobalObject>::cast(current),
name,
scratch,
miss);
@@ -1267,7 +1291,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> holder,
Handle<Name> name,
Label* success,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<Object> callback) {
Label miss;
Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1321,26 +1345,6 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
}
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Label* success,
- Handle<GlobalObject> global) {
- Label miss;
-
- HandlerFrontendHeader(object, receiver(), last, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (!global.is_null()) {
- GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
- }
-
- HandlerFrontendFooter(name, success, &miss);
-}
-
-
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
PropertyIndex field,
@@ -1361,20 +1365,33 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
void BaseLoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch3().is(reg));
__ pop(scratch3()); // Get return address to place it below.
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
__ push(receiver()); // receiver
__ mov(scratch2(), esp);
ASSERT(!scratch2().is(reg));
- __ push(reg); // holder
// Push data from ExecutableAccessorInfo.
if (isolate()->heap()->InNewSpace(callback->data())) {
- __ mov(scratch1(), Immediate(callback));
- __ push(FieldOperand(scratch1(), ExecutableAccessorInfo::kDataOffset));
+ Register scratch = reg.is(scratch1()) ? receiver() : scratch1();
+ __ mov(scratch, Immediate(callback));
+ __ push(FieldOperand(scratch, ExecutableAccessorInfo::kDataOffset));
} else {
__ push(Immediate(Handle<Object>(callback->data(), isolate())));
}
@@ -1382,6 +1399,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// ReturnValue default value
__ push(Immediate(isolate()->factory()->undefined_value()));
__ push(Immediate(reinterpret_cast<int>(isolate())));
+ __ push(reg); // holder
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const ExecutableAccessorInfo& to the C++ callback.
@@ -1400,29 +1418,23 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
const int kApiArgc = 2 + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(isolate(),
- reinterpret_cast<void*>(getter_address));
- __ PrepareCallApiFunction(kApiArgc, returns_handle);
- __ mov(ApiParameterOperand(0, returns_handle), ebx); // name.
+ __ PrepareCallApiFunction(kApiArgc);
+ __ mov(ApiParameterOperand(0), ebx); // name.
__ add(ebx, Immediate(kPointerSize));
- __ mov(ApiParameterOperand(1, returns_handle), ebx); // arguments pointer.
+ __ mov(ApiParameterOperand(1), ebx); // arguments pointer.
// Emitting a stub call may try to allocate (if the code is not
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeAccessorGetter)
- : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
__ CallApiFunctionAndReturn(getter_address,
thunk_address,
- ApiParameterOperand(2, returns_handle),
+ ApiParameterOperand(2),
kStackSpace,
- returns_handle,
- 6);
+ 7);
}
@@ -1529,7 +1541,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
@@ -2479,6 +2491,8 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(eax, &not_smi);
+ // Branchless abs implementation, refer to below:
+ // http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
__ mov(ebx, eax);
@@ -2889,6 +2903,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch1(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -2956,48 +2988,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<PropertyCell> cell,
- Handle<Name> name) {
- Label miss;
-
- // Check that the map of the global has not changed.
- __ cmp(FieldOperand(receiver(), HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss);
-
- // Compute the cell operand to use.
- __ mov(scratch1(), Immediate(cell));
- Operand cell_operand =
- FieldOperand(scratch1(), PropertyCell::kValueOffset);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ cmp(cell_operand, factory()->the_hole_value());
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ mov(cell_operand, value());
- // No write barrier here, because cells are always rescanned.
-
- // Return the value (register eax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
-}
-
-
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -3030,7 +3020,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
- Handle<GlobalObject> global) {
+ Handle<JSGlobalObject> global) {
Label success;
NonexistentHandlerFrontend(object, last, name, &success, global);
@@ -3153,7 +3143,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ j(equal, &miss);
} else if (FLAG_debug_code) {
__ cmp(eax, factory()->the_hole_value());
- __ Check(not_equal, "DontDelete cells can't contain the hole");
+ __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
}
HandlerFrontendFooter(name, &success, &miss);
@@ -3254,520 +3244,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch,
- XMMRegister xmm_scratch0,
- XMMRegister xmm_scratch1,
- Label* fail) {
- // Check that key is a smi and if SSE2 is available a heap number
- // containing a smi and branch if the check fails.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- Label key_ok;
- __ JumpIfSmi(key, &key_ok);
- __ cmp(FieldOperand(key, HeapObject::kMapOffset),
- Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
- __ j(not_equal, fail);
- __ movdbl(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
- __ cvttsd2si(scratch, Operand(xmm_scratch0));
- __ cvtsi2sd(xmm_scratch1, scratch);
- __ ucomisd(xmm_scratch1, xmm_scratch0);
- __ j(not_equal, fail);
- __ j(parity_even, fail); // NaN.
- // Check if the key fits in the smi range.
- __ cmp(scratch, 0xc0000000);
- __ j(sign, fail);
- __ SmiTag(scratch);
- __ mov(key, scratch);
- __ bind(&key_ok);
- } else {
- __ JumpIfNotSmi(key, fail);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, slow, check_heap_number;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(ecx, FieldOperand(edi, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- __ JumpIfNotSmi(eax, &slow);
- } else {
- __ JumpIfNotSmi(eax, &check_heap_number);
- }
-
- // smi case
- __ mov(ebx, eax); // Preserve the value in eax as the return value.
- __ SmiUntag(ebx);
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // edi: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- __ ClampUint8(ebx);
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ push(ebx);
- __ fild_s(Operand(esp, 0));
- __ pop(ebx);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ fstp_s(Operand(edi, ecx, times_2, 0));
- } else { // elements_kind == EXTERNAL_DOUBLE_ELEMENTS.
- __ fstp_d(Operand(edi, ecx, times_4, 0));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0); // Return the original value.
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- __ bind(&check_heap_number);
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(not_equal, &slow);
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // edi: base pointer of external storage
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fstp_s(Operand(edi, ecx, times_2, 0));
- __ ret(0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fstp_d(Operand(edi, ecx, times_4, 0));
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
-
- // For the moment we make the slow call to the runtime on
- // processors that don't support SSE2. The code in IntegerConvert
- // (code-stubs-ia32.cc) is roughly what is needed here though the
- // conversion failure case does not need to be handled.
- if (CpuFeatures::IsSupported(SSE2)) {
- if ((elements_kind == EXTERNAL_INT_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) &&
- CpuFeatures::IsSupported(SSE3)) {
- CpuFeatureScope scope(masm, SSE3);
- // fisttp stores values as signed integers. To represent the
- // entire range of int and unsigned int arrays, store as a
- // 64-bit int and discard the high 32 bits.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ sub(esp, Immediate(2 * kPointerSize));
- __ fisttp_d(Operand(esp, 0));
-
- // If conversion failed (NaN, infinity, or a number outside
- // signed int64 range), the result is 0x8000000000000000, and
- // we must handle this case in the runtime.
- Label ok;
- __ cmp(Operand(esp, kPointerSize), Immediate(0x80000000u));
- __ j(not_equal, &ok);
- __ cmp(Operand(esp, 0), Immediate(0));
- __ j(not_equal, &ok);
- __ add(esp, Immediate(2 * kPointerSize)); // Restore the stack.
- __ jmp(&slow);
-
- __ bind(&ok);
- __ pop(ebx);
- __ add(esp, Immediate(kPointerSize));
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- } else {
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatureScope scope(masm, SSE2);
- __ cvttsd2si(ebx, FieldOperand(eax, HeapNumber::kValueOffset));
- __ cmp(ebx, 0x80000000u);
- __ j(equal, &slow);
- // ebx: untagged integer value
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- __ ClampUint8(ebx);
- // Fall through.
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- __ ret(0); // Return original value.
- }
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
-
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, grow, slow, transition_elements_kind;
- Label check_capacity, prepare_slow, finish_store, commit_backing_store;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(eax, &transition_elements_kind);
- }
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- if (is_js_array) {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
- if (IsGrowStoreMode(store_mode)) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
- __ j(above_equal, &miss_force_generic);
- }
-
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ j(not_equal, &miss_force_generic);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- // ecx is a smi, use times_half_pointer_size instead of
- // times_pointer_size
- __ mov(FieldOperand(edi,
- ecx,
- times_half_pointer_size,
- FixedArray::kHeaderSize), eax);
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- // Do the store and update the write barrier.
- // ecx is a smi, use times_half_pointer_size instead of
- // times_pointer_size
- __ lea(ecx, FieldOperand(edi,
- ecx,
- times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(Operand(ecx, 0), eax);
- // Make sure to preserve the value in register eax.
- __ mov(ebx, eax);
- __ RecordWrite(edi, ecx, ebx, kDontSaveFPRegs);
- }
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- // Handle transition to other elements kinds without using the generic stub.
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Handle transition requiring the array to grow.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(not_equal, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
- // Restore the key, which is known to be the array length.
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ mov(FieldOperand(edi, JSObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ mov(ebx, Immediate(masm->isolate()->factory()->the_hole_value()));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ mov(FieldOperand(edi, FixedArray::SizeFor(i)), ebx);
- }
-
- // Store the element at index zero.
- __ mov(FieldOperand(edi, FixedArray::SizeFor(0)), eax);
-
- // Install the new backing store in the JSArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
- __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ ret(0);
-
- __ bind(&check_capacity);
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_cow_array_map()));
- __ j(equal, &miss_force_generic);
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&finish_store);
-
- __ bind(&prepare_slow);
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label check_capacity, prepare_slow, finish_store, commit_backing_store;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(edi);
-
- if (is_js_array) {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
- if (IsGrowStoreMode(store_mode)) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
- __ j(above_equal, &miss_force_generic);
- }
-
- __ bind(&finish_store);
- __ StoreNumberToDoubleElements(eax, edi, ecx, edx, xmm0,
- &transition_elements_kind, true);
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- // Handle transition to other elements kinds without using the generic stub.
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Handle transition requiring the array to grow.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(eax, &value_is_smi);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
- __ j(not_equal, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(not_equal, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
-
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Initialize the new FixedDoubleArray.
- __ mov(FieldOperand(edi, JSObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_double_array_map()));
- __ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
- Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-
- __ StoreNumberToDoubleElements(eax, edi, ecx, ebx, xmm0,
- &transition_elements_kind, true);
-
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(FieldOperand(edi, offset), Immediate(kHoleNanLower32));
- __ mov(FieldOperand(edi, offset + kPointerSize),
- Immediate(kHoleNanUpper32));
- }
-
- // Install the new backing store in the JSArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
- __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ ret(0);
-
- __ bind(&check_capacity);
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmp(ecx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&finish_store);
-
- __ bind(&prepare_slow);
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/ic-inl.h b/chromium/v8/src/ic-inl.h
index ca02183dbdb..e6ff2daa62b 100644
--- a/chromium/v8/src/ic-inl.h
+++ b/chromium/v8/src/ic-inl.h
@@ -43,7 +43,6 @@ Address IC::address() const {
Address result = Assembler::target_address_from_return_address(pc());
#ifdef ENABLE_DEBUGGER_SUPPORT
- ASSERT(Isolate::Current() == isolate());
Debug* debug = isolate()->debug();
// First check if any break points are active if not just return the address
// of the call.
diff --git a/chromium/v8/src/ic.cc b/chromium/v8/src/ic.cc
index a55160a394e..55187514f90 100644
--- a/chromium/v8/src/ic.cc
+++ b/chromium/v8/src/ic.cc
@@ -375,22 +375,23 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
}
-void IC::Clear(Address address) {
+void IC::Clear(Isolate* isolate, Address address) {
Code* target = GetTargetAtAddress(address);
// Don't clear debug break inline cache as it will remove the break point.
- if (target->is_debug_break()) return;
+ if (target->is_debug_stub()) return;
switch (target->kind()) {
- case Code::LOAD_IC: return LoadIC::Clear(address, target);
- case Code::KEYED_LOAD_IC: return KeyedLoadIC::Clear(address, target);
- case Code::STORE_IC: return StoreIC::Clear(address, target);
- case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
+ case Code::LOAD_IC: return LoadIC::Clear(isolate, address, target);
+ case Code::KEYED_LOAD_IC:
+ return KeyedLoadIC::Clear(isolate, address, target);
+ case Code::STORE_IC: return StoreIC::Clear(isolate, address, target);
+ case Code::KEYED_STORE_IC:
+ return KeyedStoreIC::Clear(isolate, address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
- case Code::COMPARE_IC: return CompareIC::Clear(address, target);
+ case Code::COMPARE_IC: return CompareIC::Clear(isolate, address, target);
case Code::COMPARE_NIL_IC: return CompareNilIC::Clear(address, target);
- case Code::UNARY_OP_IC:
case Code::BINARY_OP_IC:
case Code::TO_BOOLEAN_IC:
// Clearing these is tricky and does not
@@ -405,7 +406,7 @@ void CallICBase::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
Code* code =
- Isolate::Current()->stub_cache()->FindCallInitialize(
+ target->GetIsolate()->stub_cache()->FindCallInitialize(
target->arguments_count(),
contextual ? RelocInfo::CODE_TARGET_CONTEXT : RelocInfo::CODE_TARGET,
target->kind());
@@ -413,40 +414,40 @@ void CallICBase::Clear(Address address, Code* target) {
}
-void KeyedLoadIC::Clear(Address address, Code* target) {
+void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
- SetTargetAtAddress(address, *initialize_stub());
+ SetTargetAtAddress(address, *initialize_stub(isolate));
}
-void LoadIC::Clear(Address address, Code* target) {
+void LoadIC::Clear(Isolate* isolate, Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
- SetTargetAtAddress(address, *initialize_stub());
+ SetTargetAtAddress(address, *initialize_stub(isolate));
}
-void StoreIC::Clear(Address address, Code* target) {
+void StoreIC::Clear(Isolate* isolate, Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address,
(Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? *initialize_stub_strict()
- : *initialize_stub());
+ ? *initialize_stub_strict(isolate)
+ : *initialize_stub(isolate));
}
-void KeyedStoreIC::Clear(Address address, Code* target) {
+void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address,
(Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? *initialize_stub_strict()
- : *initialize_stub());
+ ? *initialize_stub_strict(isolate)
+ : *initialize_stub(isolate));
}
-void CompareIC::Clear(Address address, Code* target) {
+void CompareIC::Clear(Isolate* isolate, Address address, Code* target) {
ASSERT(target->major_key() == CodeStub::CompareIC);
CompareIC::State handler_state;
Token::Value op;
@@ -454,7 +455,7 @@ void CompareIC::Clear(Address address, Code* target) {
&handler_state, &op);
// Only clear CompareICs that can retain objects.
if (handler_state != KNOWN_OBJECT) return;
- SetTargetAtAddress(address, GetRawUninitialized(op));
+ SetTargetAtAddress(address, GetRawUninitialized(isolate, op));
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
@@ -501,7 +502,7 @@ static void LookupForRead(Handle<Object> object,
Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
- Handle<Object> delegate = Execution::GetFunctionDelegate(object);
+ Handle<Object> delegate = Execution::GetFunctionDelegate(isolate(), object);
if (delegate->IsJSFunction() && !object->IsJSFunctionProxy()) {
// Patch the receiver and use the delegate as the function to
@@ -565,7 +566,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
// the element if so.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- Handle<Object> result = Object::GetElement(object, index);
+ Handle<Object> result = Object::GetElement(isolate(), object, index);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
if (result->IsJSFunction()) return *result;
@@ -926,7 +927,7 @@ MaybeObject* LoadIC::Load(State state,
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif
}
- return *Accessors::FunctionGetPrototype(object);
+ return *Accessors::FunctionGetPrototype(Handle<JSFunction>::cast(object));
}
}
@@ -995,7 +996,7 @@ static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
bool IC::UpdatePolymorphicIC(State state,
- Handle<JSObject> receiver,
+ Handle<HeapObject> receiver,
Handle<String> name,
Handle<Code> code,
StrictModeFlag strict_mode) {
@@ -1076,44 +1077,44 @@ Handle<Code> StoreIC::ComputePolymorphicIC(MapHandleList* receiver_maps,
}
-void LoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
+void LoadIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode) {
- if (handler->type() == Code::NORMAL) return set_target(*handler);
+ if (handler->is_load_stub()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicLoadIC(
receiver, handler, name);
set_target(*ic);
}
-void KeyedLoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
+void KeyedLoadIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode) {
- if (handler->type() == Code::NORMAL) return set_target(*handler);
+ if (handler->is_keyed_load_stub()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicKeyedLoadIC(
receiver, handler, name);
set_target(*ic);
}
-void StoreIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
+void StoreIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode) {
- if (handler->type() == Code::NORMAL) return set_target(*handler);
+ if (handler->is_store_stub()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicStoreIC(
receiver, handler, name, strict_mode);
set_target(*ic);
}
-void KeyedStoreIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
+void KeyedStoreIC::UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode) {
- if (handler->type() == Code::NORMAL) return set_target(*handler);
+ if (handler->is_keyed_store_stub()) return set_target(*handler);
Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicKeyedStoreIC(
receiver, handler, name, strict_mode);
set_target(*ic);
@@ -1154,7 +1155,7 @@ bool IC::IsTransitionedMapOfMonomorphicTarget(Map* receiver_map) {
// not necessarily equal to target()->state().
void IC::PatchCache(State state,
StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
+ Handle<HeapObject> receiver,
Handle<String> name,
Handle<Code> code) {
switch (state) {
@@ -1264,32 +1265,26 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
State state,
Handle<Object> object,
Handle<String> name) {
- // Bail out if the result is not cacheable.
- if (!lookup->IsCacheable()) {
- set_target(*generic_stub());
- return;
- }
+ if (!object->IsHeapObject()) return;
- // TODO(jkummerow): It would be nice to support non-JSObjects in
- // UpdateCaches, then we wouldn't need to go generic here.
- if (!object->IsJSObject()) {
- set_target(*generic_stub());
- return;
- }
+ Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
Handle<Code> code;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
code = pre_monomorphic_stub();
+ } else if (!lookup->IsCacheable()) {
+ // Bail out if the result is not cacheable.
+ code = slow_stub();
+ } else if (!object->IsJSObject()) {
+ // TODO(jkummerow): It would be nice to support non-JSObjects in
+ // ComputeLoadHandler, then we wouldn't need to go generic here.
+ code = slow_stub();
} else {
- code = ComputeLoadHandler(lookup, receiver, name);
- if (code.is_null()) {
- set_target(*generic_stub());
- return;
- }
+ code = ComputeLoadHandler(lookup, Handle<JSObject>::cast(receiver), name);
+ if (code.is_null()) code = slow_stub();
}
PatchCache(state, kNonStrictMode, receiver, name, code);
@@ -1356,8 +1351,16 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
if (!getter->IsJSFunction()) break;
if (holder->IsGlobalObject()) break;
if (!holder->HasFastProperties()) break;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+ CallOptimization call_optimization(function);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(*receiver) &&
+ FLAG_js_accessor_ics) {
+ return isolate()->stub_cache()->ComputeLoadCallback(
+ name, receiver, holder, call_optimization);
+ }
return isolate()->stub_cache()->ComputeLoadViaGetter(
- name, receiver, holder, Handle<JSFunction>::cast(getter));
+ name, receiver, holder, function);
} else if (receiver->IsJSArray() &&
name->Equals(isolate()->heap()->length_string())) {
PropertyIndex lengthIndex =
@@ -1543,13 +1546,30 @@ Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
case CALLBACKS: {
Handle<Object> callback_object(lookup->GetCallbackObject(), isolate());
// TODO(dcarney): Handle DeclaredAccessorInfo correctly.
- if (!callback_object->IsExecutableAccessorInfo()) break;
- Handle<ExecutableAccessorInfo> callback =
- Handle<ExecutableAccessorInfo>::cast(callback_object);
- if (v8::ToCData<Address>(callback->getter()) == 0) break;
- if (!callback->IsCompatibleReceiver(*receiver)) break;
- return isolate()->stub_cache()->ComputeKeyedLoadCallback(
- name, receiver, holder, callback);
+ if (callback_object->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> callback =
+ Handle<ExecutableAccessorInfo>::cast(callback_object);
+ if (v8::ToCData<Address>(callback->getter()) == 0) break;
+ if (!callback->IsCompatibleReceiver(*receiver)) break;
+ return isolate()->stub_cache()->ComputeKeyedLoadCallback(
+ name, receiver, holder, callback);
+ } else if (callback_object->IsAccessorPair()) {
+ Handle<Object> getter(
+ Handle<AccessorPair>::cast(callback_object)->getter(),
+ isolate());
+ if (!getter->IsJSFunction()) break;
+ if (holder->IsGlobalObject()) break;
+ if (!holder->HasFastProperties()) break;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+ CallOptimization call_optimization(function);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(*receiver) &&
+ FLAG_js_accessor_ics) {
+ return isolate()->stub_cache()->ComputeKeyedLoadCallback(
+ name, receiver, holder, call_optimization);
+ }
+ }
+ break;
}
case INTERCEPTOR:
ASSERT(HasInterceptorGetter(lookup->holder()));
@@ -1616,7 +1636,8 @@ static bool LookupForWrite(Handle<JSObject> receiver,
if (!value->FitsRepresentation(target_details.representation())) {
Handle<Map> target(lookup->GetTransitionMapFromMap(receiver->map()));
Map::GeneralizeRepresentation(
- target, target->LastAdded(), value->OptimalRepresentation());
+ target, target->LastAdded(),
+ value->OptimalRepresentation(), FORCE_FIELD);
// Lookup the transition again since the transition tree may have changed
// entirely by the migration above.
receiver->map()->LookupTransition(*holder, *name, lookup);
@@ -1709,21 +1730,30 @@ MaybeObject* StoreIC::Store(State state,
}
LookupResult lookup(isolate());
- if (LookupForWrite(receiver, name, value, &lookup, &state)) {
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
- }
- } else if (strict_mode == kStrictMode &&
- !(lookup.IsProperty() && lookup.IsReadOnly()) &&
- IsUndeclaredGlobal(object)) {
+ bool can_store = LookupForWrite(receiver, name, value, &lookup, &state);
+ if (!can_store &&
+ strict_mode == kStrictMode &&
+ !(lookup.IsProperty() && lookup.IsReadOnly()) &&
+ IsUndeclaredGlobal(object)) {
// Strict mode doesn't allow setting non-existent global property.
return ReferenceError("not_defined", name);
- } else if (FLAG_use_ic &&
- (lookup.IsNormal() ||
- (lookup.IsField() && lookup.CanHoldValue(value)))) {
- Handle<Code> stub = strict_mode == kStrictMode
- ? generic_stub_strict() : generic_stub();
- set_target(*stub);
+ }
+ if (FLAG_use_ic) {
+ if (state == UNINITIALIZED) {
+ Handle<Code> stub = (strict_mode == kStrictMode)
+ ? pre_monomorphic_stub_strict()
+ : pre_monomorphic_stub();
+ set_target(*stub);
+ TRACE_IC("StoreIC", name, state, *stub);
+ } else if (can_store) {
+ UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+ } else if (!name->IsCacheable(isolate()) ||
+ lookup.IsNormal() ||
+ (lookup.IsField() && lookup.CanHoldValue(value))) {
+ Handle<Code> stub = (strict_mode == kStrictMode) ? generic_stub_strict()
+ : generic_stub();
+ set_target(*stub);
+ }
}
// Set the property.
@@ -1797,6 +1827,14 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
if (!setter->IsJSFunction()) break;
if (holder->IsGlobalObject()) break;
if (!holder->HasFastProperties()) break;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
+ CallOptimization call_optimization(function);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(*receiver) &&
+ FLAG_js_accessor_ics) {
+ return isolate()->stub_cache()->ComputeStoreCallback(
+ name, receiver, holder, call_optimization, strict_mode);
+ }
return isolate()->stub_cache()->ComputeStoreViaSetter(
name, receiver, holder, Handle<JSFunction>::cast(setter),
strict_mode);
@@ -1848,18 +1886,6 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
}
- if (!FLAG_compiled_keyed_stores &&
- (store_mode == STORE_NO_TRANSITION_HANDLE_COW ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS)) {
- // TODO(danno): We'll soon handle MONOMORPHIC ICs that also support
- // copying COW arrays and silently ignoring some OOB stores into external
- // arrays, but for now use the generic.
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "COW/OOB external array");
- return strict_mode == kStrictMode
- ? generic_stub_strict()
- : generic_stub();
- }
-
State ic_state = target()->ic_state();
Handle<Map> receiver_map(receiver->map(), isolate());
if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
@@ -2140,8 +2166,7 @@ MaybeObject* KeyedStoreIC::Store(State state,
if (receiver->map()->is_deprecated()) {
JSObject::MigrateInstance(receiver);
}
- bool key_is_smi_like = key->IsSmi() ||
- (FLAG_compiled_keyed_stores && !key->ToSmi()->IsFailure());
+ bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure();
if (receiver->elements()->map() ==
isolate()->heap()->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
@@ -2589,33 +2614,12 @@ void BinaryOpIC::StubInfoToType(int minor_key,
}
-MaybeObject* UnaryOpIC::Transition(Handle<Object> object) {
- Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state();
- UnaryOpStub stub(extra_ic_state);
-
- stub.UpdateStatus(object);
-
- Handle<Code> code = stub.GetCode(isolate());
- set_target(*code);
-
- return stub.Result(object, isolate());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss) {
- HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0);
- UnaryOpIC ic(isolate);
- return ic.Transition(object);
-}
-
-
static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
Token::Value op) {
v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(value);
if (type.IsSmi()) return BinaryOpIC::SMI;
if (type.IsInteger32()) {
- if (kSmiValueSize == 32) return BinaryOpIC::SMI;
+ if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
return BinaryOpIC::INT32;
}
if (type.IsNumber()) return BinaryOpIC::NUMBER;
@@ -2627,7 +2631,7 @@ static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
op == Token::SAR ||
op == Token::SHL ||
op == Token::SHR) {
- if (kSmiValueSize == 32) return BinaryOpIC::SMI;
+ if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
return BinaryOpIC::INT32;
}
return BinaryOpIC::ODDBALL;
@@ -2705,7 +2709,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
if (op == Token::DIV ||
op == Token::MUL ||
op == Token::SHR ||
- kSmiValueSize == 32) {
+ SmiValuesAre32Bits()) {
// Arithmetic on two Smi inputs has yielded a heap number.
// That is the only way to get here from the Smi stub.
// With 32-bit Smis, all overflows give heap numbers, but with
@@ -2792,7 +2796,8 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
bool caught_exception;
Handle<Object> builtin_args[] = { right };
- Handle<Object> result = Execution::Call(builtin_function,
+ Handle<Object> result = Execution::Call(isolate,
+ builtin_function,
left,
ARRAY_SIZE(builtin_args),
builtin_args,
@@ -2804,10 +2809,10 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
}
-Code* CompareIC::GetRawUninitialized(Token::Value op) {
+Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code, Isolate::Current()));
+ CHECK(stub.FindCodeInCache(&code, isolate));
return code;
}
diff --git a/chromium/v8/src/ic.h b/chromium/v8/src/ic.h
index 7820d407ec1..8f09e1d0a2c 100644
--- a/chromium/v8/src/ic.h
+++ b/chromium/v8/src/ic.h
@@ -102,7 +102,7 @@ class IC {
static State StateFrom(Code* target, Object* receiver, Object* name);
// Clear the inline cache to initial state.
- static void Clear(Address address);
+ static void Clear(Isolate* isolate, Address address);
// Computes the reloc info for this IC. This is a fairly expensive
// operation as it has to search through the heap to find the code
@@ -167,14 +167,14 @@ class IC {
static inline void SetTargetAtAddress(Address address, Code* target);
static void PostPatching(Address address, Code* target, Code* old_target);
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode) {
set_target(*handler);
}
bool UpdatePolymorphicIC(State state,
- Handle<JSObject> receiver,
+ Handle<HeapObject> receiver,
Handle<String> name,
Handle<Code> code,
StrictModeFlag strict_mode);
@@ -192,7 +192,7 @@ class IC {
bool IsTransitionedMapOfMonomorphicTarget(Map* receiver_map);
void PatchCache(State state,
StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
+ Handle<HeapObject> receiver,
Handle<String> name,
Handle<Code> code);
virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code);
@@ -388,7 +388,7 @@ class LoadIC: public IC {
protected:
virtual Code::Kind kind() const { return Code::LOAD_IC; }
- virtual Handle<Code> generic_stub() const {
+ virtual Handle<Code> slow_stub() const {
return isolate()->builtins()->LoadIC_Slow();
}
@@ -403,7 +403,7 @@ class LoadIC: public IC {
Handle<Object> object,
Handle<String> name);
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode);
@@ -420,14 +420,14 @@ class LoadIC: public IC {
private:
// Stub accessors.
- static Handle<Code> initialize_stub() {
- return Isolate::Current()->builtins()->LoadIC_Initialize();
+ static Handle<Code> initialize_stub(Isolate* isolate) {
+ return isolate->builtins()->LoadIC_Initialize();
}
virtual Handle<Code> pre_monomorphic_stub() {
return isolate()->builtins()->LoadIC_PreMonomorphic();
}
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
friend class IC;
};
@@ -483,9 +483,12 @@ class KeyedLoadIC: public LoadIC {
virtual Handle<Code> generic_stub() const {
return isolate()->builtins()->KeyedLoadIC_Generic();
}
+ virtual Handle<Code> slow_stub() const {
+ return isolate()->builtins()->KeyedLoadIC_Slow();
+ }
// Update the inline cache.
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode);
@@ -496,8 +499,8 @@ class KeyedLoadIC: public LoadIC {
private:
// Stub accessors.
- static Handle<Code> initialize_stub() {
- return Isolate::Current()->builtins()->KeyedLoadIC_Initialize();
+ static Handle<Code> initialize_stub(Isolate* isolate) {
+ return isolate->builtins()->KeyedLoadIC_Initialize();
}
virtual Handle<Code> pre_monomorphic_stub() {
return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
@@ -512,7 +515,7 @@ class KeyedLoadIC: public LoadIC {
return isolate()->builtins()->KeyedLoadIC_String();
}
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
friend class IC;
};
@@ -527,6 +530,9 @@ class StoreIC: public IC {
// Code generators for stub routines. Only called once at startup.
static void GenerateSlow(MacroAssembler* masm);
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+ static void GeneratePreMonomorphic(MacroAssembler* masm) {
+ GenerateMiss(masm);
+ }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm,
StrictModeFlag strict_mode);
@@ -558,6 +564,12 @@ class StoreIC: public IC {
virtual Handle<Code> generic_stub_strict() const {
return isolate()->builtins()->StoreIC_Generic_Strict();
}
+ virtual Handle<Code> pre_monomorphic_stub() const {
+ return isolate()->builtins()->StoreIC_PreMonomorphic();
+ }
+ virtual Handle<Code> pre_monomorphic_stub_strict() const {
+ return isolate()->builtins()->StoreIC_PreMonomorphic_Strict();
+ }
virtual Handle<Code> global_proxy_stub() {
return isolate()->builtins()->StoreIC_GlobalProxy();
}
@@ -565,7 +577,7 @@ class StoreIC: public IC {
return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
}
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode);
@@ -601,13 +613,13 @@ class StoreIC: public IC {
IC::set_target(code);
}
- static Handle<Code> initialize_stub() {
- return Isolate::Current()->builtins()->StoreIC_Initialize();
+ static Handle<Code> initialize_stub(Isolate* isolate) {
+ return isolate->builtins()->StoreIC_Initialize();
}
- static Handle<Code> initialize_stub_strict() {
- return Isolate::Current()->builtins()->StoreIC_Initialize_Strict();
+ static Handle<Code> initialize_stub_strict(Isolate* isolate) {
+ return isolate->builtins()->StoreIC_Initialize_Strict();
}
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
friend class IC;
};
@@ -643,6 +655,9 @@ class KeyedStoreIC: public StoreIC {
static void GenerateInitialize(MacroAssembler* masm) {
GenerateMiss(masm, MISS);
}
+ static void GeneratePreMonomorphic(MacroAssembler* masm) {
+ GenerateMiss(masm, MISS);
+ }
static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
static void GenerateSlow(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
@@ -660,6 +675,12 @@ class KeyedStoreIC: public StoreIC {
Handle<Object> value);
virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
+ virtual Handle<Code> pre_monomorphic_stub() const {
+ return isolate()->builtins()->KeyedStoreIC_PreMonomorphic();
+ }
+ virtual Handle<Code> pre_monomorphic_stub_strict() const {
+ return isolate()->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
+ }
virtual Handle<Code> megamorphic_stub() {
return isolate()->builtins()->KeyedStoreIC_Generic();
}
@@ -671,7 +692,7 @@ class KeyedStoreIC: public StoreIC {
KeyedAccessStoreMode store_mode,
StrictModeFlag strict_mode);
- virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<String> name,
StrictModeFlag strict_mode);
@@ -685,11 +706,11 @@ class KeyedStoreIC: public StoreIC {
}
// Stub accessors.
- static Handle<Code> initialize_stub() {
- return Isolate::Current()->builtins()->KeyedStoreIC_Initialize();
+ static Handle<Code> initialize_stub(Isolate* isolate) {
+ return isolate->builtins()->KeyedStoreIC_Initialize();
}
- static Handle<Code> initialize_stub_strict() {
- return Isolate::Current()->builtins()->KeyedStoreIC_Initialize_Strict();
+ static Handle<Code> initialize_stub_strict(Isolate* isolate) {
+ return isolate->builtins()->KeyedStoreIC_Initialize_Strict();
}
Handle<Code> generic_stub() const {
return isolate()->builtins()->KeyedStoreIC_Generic();
@@ -701,7 +722,7 @@ class KeyedStoreIC: public StoreIC {
return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
}
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
Handle<Object> key,
@@ -714,14 +735,6 @@ class KeyedStoreIC: public StoreIC {
};
-class UnaryOpIC: public IC {
- public:
- explicit UnaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
-
- MUST_USE_RESULT MaybeObject* Transition(Handle<Object> object);
-};
-
-
// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
class BinaryOpIC: public IC {
public:
@@ -815,9 +828,9 @@ class CompareIC: public IC {
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return ComputeCondition(op_); }
- static Code* GetRawUninitialized(Token::Value op);
+ static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
- static void Clear(Address address, Code* target);
+ static void Clear(Isolate* isolate, Address address, Code* target);
Token::Value op_;
diff --git a/chromium/v8/src/isolate-inl.h b/chromium/v8/src/isolate-inl.h
index 9fb16fbe96d..45076f56578 100644
--- a/chromium/v8/src/isolate-inl.h
+++ b/chromium/v8/src/isolate-inl.h
@@ -28,20 +28,19 @@
#ifndef V8_ISOLATE_INL_H_
#define V8_ISOLATE_INL_H_
-#include "isolate.h"
-
#include "debug.h"
+#include "isolate.h"
+#include "utils/random-number-generator.h"
namespace v8 {
namespace internal {
-SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
+SaveContext::SaveContext(Isolate* isolate)
+ : isolate_(isolate),
+ prev_(isolate->save_context()) {
if (isolate->context() != NULL) {
context_ = Handle<Context>(isolate->context());
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- dummy_ = Handle<Context>(isolate->context());
-#endif
}
isolate->set_save_context(this);
@@ -68,6 +67,13 @@ bool Isolate::DebuggerHasBreakPoints() {
}
+RandomNumberGenerator* Isolate::random_number_generator() {
+ if (random_number_generator_ == NULL) {
+ random_number_generator_ = new RandomNumberGenerator;
+ }
+ return random_number_generator_;
+}
+
} } // namespace v8::internal
#endif // V8_ISOLATE_INL_H_
diff --git a/chromium/v8/src/isolate.cc b/chromium/v8/src/isolate.cc
index 61f1e2dcfa5..6fa496a9026 100644
--- a/chromium/v8/src/isolate.cc
+++ b/chromium/v8/src/isolate.cc
@@ -39,7 +39,7 @@
#include "deoptimizer.h"
#include "heap-profiler.h"
#include "hydrogen.h"
-#include "isolate.h"
+#include "isolate-inl.h"
#include "lithium-allocator.h"
#include "log.h"
#include "marking-thread.h"
@@ -54,6 +54,7 @@
#include "spaces.h"
#include "stub-cache.h"
#include "sweeper-thread.h"
+#include "utils/random-number-generator.h"
#include "version.h"
#include "vm-state-inl.h"
@@ -137,7 +138,7 @@ v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
int SystemThreadManager::NumberOfParallelSystemThreads(
ParallelSystemComponent type) {
- int number_of_threads = Min(OS::NumberOfCores(), kMaxThreads);
+ int number_of_threads = Min(CPU::NumberOfProcessorsOnline(), kMaxThreads);
ASSERT(number_of_threads > 0);
if (number_of_threads == 1) {
return 0;
@@ -226,8 +227,8 @@ class PreallocatedMemoryThread: public Thread {
PreallocatedMemoryThread()
: Thread("v8:PreallocMem"),
keep_running_(true),
- wait_for_ever_semaphore_(OS::CreateSemaphore(0)),
- data_ready_semaphore_(OS::CreateSemaphore(0)),
+ wait_for_ever_semaphore_(new Semaphore(0)),
+ data_ready_semaphore_(new Semaphore(0)),
data_(NULL),
length_(0) {
}
@@ -343,35 +344,23 @@ Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
#ifdef DEBUG
Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
#endif // DEBUG
-Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
+Mutex Isolate::process_wide_mutex_;
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
Atomic32 Isolate::isolate_counter_ = 0;
-Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
- ThreadId thread_id) {
- ASSERT(!thread_id.Equals(ThreadId::Invalid()));
- PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
- {
- ScopedLock lock(process_wide_mutex_);
- ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
- thread_data_table_->Insert(per_thread);
- ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
- }
- return per_thread;
-}
-
-
Isolate::PerIsolateThreadData*
Isolate::FindOrAllocatePerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = NULL;
{
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
if (per_thread == NULL) {
- per_thread = AllocatePerIsolateThreadData(thread_id);
+ per_thread = new PerIsolateThreadData(this, thread_id);
+ thread_data_table_->Insert(per_thread);
}
}
+ ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
return per_thread;
}
@@ -386,7 +375,7 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
ThreadId thread_id) {
PerIsolateThreadData* per_thread = NULL;
{
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
}
return per_thread;
@@ -394,7 +383,7 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
void Isolate::EnsureDefaultIsolate() {
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
if (default_isolate_ == NULL) {
isolate_key_ = Thread::CreateThreadLocalKey();
thread_id_key_ = Thread::CreateThreadLocalKey();
@@ -522,7 +511,7 @@ void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) {
#ifdef DEBUG
bool Isolate::IsDeferredHandle(Object** handle) {
// Each DeferredHandles instance keeps the handles to one job in the
- // parallel recompilation queue, containing a list of blocks. Each block
+ // concurrent recompilation queue, containing a list of blocks. Each block
// contains kHandleBlockSize handles except for the first block, which may
// not be fully filled.
// We iterate through all the blocks to see whether the argument handle
@@ -567,11 +556,11 @@ Handle<String> Isolate::StackTraceString() {
if (stack_trace_nesting_level_ == 0) {
stack_trace_nesting_level_++;
HeapStringAllocator allocator;
- StringStream::ClearMentionedObjectCache();
+ StringStream::ClearMentionedObjectCache(this);
StringStream accumulator(&allocator);
incomplete_message_ = &accumulator;
PrintStack(&accumulator);
- Handle<String> stack_trace = accumulator.ToString();
+ Handle<String> stack_trace = accumulator.ToString(this);
incomplete_message_ = NULL;
stack_trace_nesting_level_ = 0;
return stack_trace;
@@ -734,7 +723,9 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("column"));
Handle<String> line_key =
factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("lineNumber"));
- Handle<String> script_key =
+ Handle<String> script_id_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptId"));
+ Handle<String> script_name_key =
factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptName"));
Handle<String> script_name_or_source_url_key =
factory()->InternalizeOneByteString(
@@ -790,11 +781,20 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
Handle<Smi>(Smi::FromInt(line_number + 1), this), NONE));
}
+ if (options & StackTrace::kScriptId) {
+ Handle<Smi> script_id(script->id(), this);
+ CHECK_NOT_EMPTY_HANDLE(this,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ stack_frame, script_id_key, script_id,
+ NONE));
+ }
+
if (options & StackTrace::kScriptName) {
Handle<Object> script_name(script->name(), this);
CHECK_NOT_EMPTY_HANDLE(this,
JSObject::SetLocalPropertyIgnoreAttributes(
- stack_frame, script_key, script_name, NONE));
+ stack_frame, script_name_key, script_name,
+ NONE));
}
if (options & StackTrace::kScriptNameOrSourceURL) {
@@ -860,13 +860,13 @@ void Isolate::PrintStack(FILE* out) {
allocator = preallocated_message_space_;
}
- StringStream::ClearMentionedObjectCache();
+ StringStream::ClearMentionedObjectCache(this);
StringStream accumulator(allocator);
incomplete_message_ = &accumulator;
PrintStack(&accumulator);
accumulator.OutputToFile(out);
InitializeLoggingAndCounters();
- accumulator.Log();
+ accumulator.Log(this);
incomplete_message_ = NULL;
stack_trace_nesting_level_ = 0;
if (preallocated_message_space_ == NULL) {
@@ -904,7 +904,7 @@ void Isolate::PrintStack(StringStream* accumulator) {
}
// The MentionedObjectCache is not GC-proof at the moment.
DisallowHeapAllocation no_gc;
- ASSERT(StringStream::IsMentionedObjectCacheClear());
+ ASSERT(StringStream::IsMentionedObjectCacheClear(this));
// Avoid printing anything if there are no frames.
if (c_entry_fp(thread_local_top()) == 0) return;
@@ -917,7 +917,7 @@ void Isolate::PrintStack(StringStream* accumulator) {
"\n==== Details ================================================\n\n");
PrintFrames(this, accumulator, StackFrame::DETAILS);
- accumulator->PrintMentionedObjectCache();
+ accumulator->PrintMentionedObjectCache(this);
accumulator->Add("=====================\n\n");
}
@@ -1358,7 +1358,8 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// exception object to be set later must not be turned into a string.
if (exception_arg->IsJSObject() && !IsErrorObject(exception_arg)) {
bool failed = false;
- exception_arg = Execution::ToDetailString(exception_arg, &failed);
+ exception_arg =
+ Execution::ToDetailString(this, exception_arg, &failed);
if (failed) {
exception_arg = factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("exception"));
@@ -1400,17 +1401,19 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
// to the console for easier debugging.
int line_number = GetScriptLineNumberSafe(location->script(),
location->start_pos());
- if (exception->IsString()) {
+ if (exception->IsString() && location->script()->name()->IsString()) {
OS::PrintError(
"Extension or internal compilation error: %s in %s at line %d.\n",
*String::cast(exception)->ToCString(),
*String::cast(location->script()->name())->ToCString(),
line_number + 1);
- } else {
+ } else if (location->script()->name()->IsString()) {
OS::PrintError(
"Extension or internal compilation error in %s at line %d.\n",
*String::cast(location->script()->name())->ToCString(),
line_number + 1);
+ } else {
+ OS::PrintError("Extension or internal compilation error.\n");
}
}
}
@@ -1703,15 +1706,6 @@ void Isolate::ThreadDataTable::Remove(PerIsolateThreadData* data) {
}
-void Isolate::ThreadDataTable::Remove(Isolate* isolate,
- ThreadId thread_id) {
- PerIsolateThreadData* data = Lookup(isolate, thread_id);
- if (data != NULL) {
- Remove(data);
- }
-}
-
-
void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
PerIsolateThreadData* data = list_;
while (data != NULL) {
@@ -1748,11 +1742,7 @@ Isolate::Isolate()
compilation_cache_(NULL),
counters_(NULL),
code_range_(NULL),
- // Must be initialized early to allow v8::SetResourceConstraints calls.
- break_access_(OS::CreateMutex()),
debugger_initialized_(false),
- // Must be initialized early to allow v8::Debug calls.
- debugger_access_(OS::CreateMutex()),
logger_(NULL),
stats_table_(NULL),
stub_cache_(NULL),
@@ -1774,6 +1764,7 @@ Isolate::Isolate()
inner_pointer_to_code_cache_(NULL),
write_iterator_(NULL),
global_handles_(NULL),
+ eternal_handles_(NULL),
context_switcher_(NULL),
thread_manager_(NULL),
fp_stubs_generated_(false),
@@ -1782,7 +1773,12 @@ Isolate::Isolate()
regexp_stack_(NULL),
date_cache_(NULL),
code_stub_interface_descriptors_(NULL),
- context_exit_happened_(false),
+ // TODO(bmeurer) Initialized lazily because it depends on flags; can
+ // be fixed once the default isolate cleanup is done.
+ random_number_generator_(NULL),
+ is_memory_constrained_(false),
+ has_fatal_error_(false),
+ use_crankshaft_(true),
initialized_from_snapshot_(false),
cpu_profiler_(NULL),
heap_profiler_(NULL),
@@ -1791,7 +1787,6 @@ Isolate::Isolate()
optimizing_compiler_thread_(this),
marking_thread_(NULL),
sweeper_thread_(NULL),
- callback_table_(NULL),
stress_deopt_count_(0) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
@@ -1853,7 +1848,7 @@ void Isolate::TearDown() {
Deinit();
- { ScopedLock lock(process_wide_mutex_);
+ { LockGuard<Mutex> lock_guard(&process_wide_mutex_);
thread_data_table_->RemoveAllThreads(this);
}
@@ -1884,7 +1879,7 @@ void Isolate::Deinit() {
debugger()->UnloadDebugger();
#endif
- if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
+ if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Stop();
if (FLAG_sweeper_threads > 0) {
for (int i = 0; i < FLAG_sweeper_threads; i++) {
@@ -2024,10 +2019,6 @@ Isolate::~Isolate() {
delete handle_scope_implementer_;
handle_scope_implementer_ = NULL;
- delete break_access_;
- break_access_ = NULL;
- delete debugger_access_;
- debugger_access_ = NULL;
delete compilation_cache_;
compilation_cache_ = NULL;
@@ -2052,6 +2043,8 @@ Isolate::~Isolate() {
code_range_ = NULL;
delete global_handles_;
global_handles_ = NULL;
+ delete eternal_handles_;
+ eternal_handles_ = NULL;
delete string_stream_debug_object_cache_;
string_stream_debug_object_cache_ = NULL;
@@ -2059,8 +2052,8 @@ Isolate::~Isolate() {
delete external_reference_table_;
external_reference_table_ = NULL;
- delete callback_table_;
- callback_table_ = NULL;
+ delete random_number_generator_;
+ random_number_generator_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
delete debugger_;
@@ -2127,7 +2120,7 @@ void Isolate::InitializeLoggingAndCounters() {
void Isolate::InitializeDebugger() {
#ifdef ENABLE_DEBUGGER_SUPPORT
- ScopedLock lock(debugger_access_);
+ LockGuard<RecursiveMutex> lock_guard(debugger_access());
if (NoBarrier_Load(&debugger_initialized_)) return;
InitializeLoggingAndCounters();
debug_ = new Debug(this);
@@ -2139,11 +2132,16 @@ void Isolate::InitializeDebugger() {
bool Isolate::Init(Deserializer* des) {
ASSERT(state_ != INITIALIZED);
- ASSERT(Isolate::Current() == this);
TRACE_ISOLATE(init);
stress_deopt_count_ = FLAG_deopt_every_n_times;
+ has_fatal_error_ = false;
+
+ use_crankshaft_ = FLAG_crankshaft
+ && !Serializer::enabled()
+ && CPU::SupportsCrankshaft();
+
if (function_entry_hook() != NULL) {
// When function entry hooking is in effect, we have to create the code
// stubs from scratch to get entry hooks, rather than loading the previously
@@ -2162,8 +2160,7 @@ bool Isolate::Init(Deserializer* des) {
memory_allocator_ = new MemoryAllocator(this);
code_range_ = new CodeRange(this);
- // Safe after setting Heap::isolate_, initializing StackGuard and
- // ensuring that Isolate::Current() == this.
+ // Safe after setting Heap::isolate_, and initializing StackGuard
heap_.SetStackLimits();
#define ASSIGN_ELEMENT(CamelName, hacker_name) \
@@ -2175,7 +2172,7 @@ bool Isolate::Init(Deserializer* des) {
string_tracker_ = new StringTracker();
string_tracker_->isolate_ = this;
compilation_cache_ = new CompilationCache(this);
- transcendental_cache_ = new TranscendentalCache();
+ transcendental_cache_ = new TranscendentalCache(this);
keyed_lookup_cache_ = new KeyedLookupCache();
context_slot_cache_ = new ContextSlotCache();
descriptor_lookup_cache_ = new DescriptorLookupCache();
@@ -2183,6 +2180,7 @@ bool Isolate::Init(Deserializer* des) {
inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
write_iterator_ = new ConsStringIteratorOp();
global_handles_ = new GlobalHandles(this);
+ eternal_handles_ = new EternalHandles();
bootstrapper_ = new Bootstrapper(this);
handle_scope_implementer_ = new HandleScopeImplementer(this);
stub_cache_ = new StubCache(this);
@@ -2235,7 +2233,7 @@ bool Isolate::Init(Deserializer* des) {
InitializeThreadLocal();
bootstrapper_->Initialize(create_heap_objects);
- builtins_.SetUp(create_heap_objects);
+ builtins_.SetUp(this, create_heap_objects);
// Only preallocate on the first initialization.
if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
@@ -2259,7 +2257,7 @@ bool Isolate::Init(Deserializer* des) {
// If we are deserializing, read the state into the now-empty heap.
if (!create_heap_objects) {
- des->Deserialize();
+ des->Deserialize(this);
}
stub_cache_->Initialize();
@@ -2324,9 +2322,10 @@ bool Isolate::Init(Deserializer* des) {
ToBooleanStub::InitializeForIsolate(this);
ArrayConstructorStubBase::InstallDescriptors(this);
InternalArrayConstructorStubBase::InstallDescriptors(this);
+ FastNewClosureStub::InstallDescriptors(this);
}
- if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
+ if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Start();
if (FLAG_marking_threads > 0) {
marking_thread_ = new MarkingThread*[FLAG_marking_threads];
diff --git a/chromium/v8/src/isolate.h b/chromium/v8/src/isolate.h
index c0083177373..b826ec596ab 100644
--- a/chromium/v8/src/isolate.h
+++ b/chromium/v8/src/isolate.h
@@ -52,7 +52,6 @@ namespace v8 {
namespace internal {
class Bootstrapper;
-class CallbackTable;
class CodeGenerator;
class CodeRange;
struct CodeStubInterfaceDescriptor;
@@ -78,6 +77,7 @@ class NoAllocationStringAllocator;
class InnerPointerToCodeCache;
class MarkingThread;
class PreallocatedMemoryThread;
+class RandomNumberGenerator;
class RegExpStack;
class SaveContext;
class UnicodeCache;
@@ -321,7 +321,6 @@ class SystemThreadManager {
#ifdef ENABLE_DEBUGGER_SUPPORT
#define ISOLATE_DEBUGGER_INIT_LIST(V) \
- V(v8::Debug::EventCallback, debug_event_callback, NULL) \
V(DebuggerAgent*, debugger_agent_instance, NULL)
#else
@@ -361,7 +360,6 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
V(byte*, assembler_spare_buffer, NULL) \
V(FatalErrorCallback, exception_behavior, NULL) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
- V(v8::Debug::MessageHandler, message_handler, NULL) \
/* To distinguish the function templates, so that we can find them in the */ \
/* function cache of the native context. */ \
V(int, next_serial_number, 0) \
@@ -544,10 +542,10 @@ class Isolate {
static void EnterDefaultIsolate();
// Mutex for serializing access to break control structures.
- Mutex* break_access() { return break_access_; }
+ RecursiveMutex* break_access() { return &break_access_; }
// Mutex for serializing access to debugger.
- Mutex* debugger_access() { return debugger_access_; }
+ RecursiveMutex* debugger_access() { return &debugger_access_; }
Address get_address_from_id(AddressId id);
@@ -661,9 +659,9 @@ class Isolate {
}
inline Address* handler_address() { return &thread_local_top_.handler_; }
- // Bottom JS entry (see StackTracer::Trace in sampler.cc).
- static Address js_entry_sp(ThreadLocalTop* thread) {
- return thread->js_entry_sp_;
+ // Bottom JS entry.
+ Address js_entry_sp() {
+ return thread_local_top_.js_entry_sp_;
}
inline Address* js_entry_sp_address() {
return &thread_local_top_.js_entry_sp_;
@@ -922,6 +920,8 @@ class Isolate {
GlobalHandles* global_handles() { return global_handles_; }
+ EternalHandles* eternal_handles() { return eternal_handles_; }
+
ThreadManager* thread_manager() { return thread_manager_; }
ContextSwitcher* context_switcher() { return context_switcher_; }
@@ -1060,12 +1060,10 @@ class Isolate {
thread_local_top_.top_lookup_result_ = top;
}
- bool context_exit_happened() {
- return context_exit_happened_;
- }
- void set_context_exit_happened(bool context_exit_happened) {
- context_exit_happened_ = context_exit_happened;
- }
+ bool IsDead() { return has_fatal_error_; }
+ void SignalFatalError() { has_fatal_error_ = true; }
+
+ bool use_crankshaft() const { return use_crankshaft_; }
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
@@ -1116,13 +1114,6 @@ class Isolate {
return sweeper_thread_;
}
- CallbackTable* callback_table() {
- return callback_table_;
- }
- void set_callback_table(CallbackTable* callback_table) {
- callback_table_ = callback_table;
- }
-
int id() const { return static_cast<int>(id_); }
HStatistics* GetHStatistics();
@@ -1135,9 +1126,18 @@ class Isolate {
void* stress_deopt_count_address() { return &stress_deopt_count_; }
+ inline RandomNumberGenerator* random_number_generator();
+
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
+ bool is_memory_constrained() const {
+ return is_memory_constrained_;
+ }
+ void set_is_memory_constrained(bool value) {
+ is_memory_constrained_ = value;
+ }
+
private:
Isolate();
@@ -1165,7 +1165,6 @@ class Isolate {
PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
void Insert(PerIsolateThreadData* data);
- void Remove(Isolate* isolate, ThreadId thread_id);
void Remove(PerIsolateThreadData* data);
void RemoveAllThreads(Isolate* isolate);
@@ -1200,7 +1199,7 @@ class Isolate {
// This mutex protects highest_thread_id_, thread_data_table_ and
// default_isolate_.
- static Mutex* process_wide_mutex_;
+ static Mutex process_wide_mutex_;
static Thread::LocalStorageKey per_isolate_thread_data_key_;
static Thread::LocalStorageKey isolate_key_;
@@ -1216,10 +1215,6 @@ class Isolate {
static void SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data);
- // Allocate and insert PerIsolateThreadData into the ThreadDataTable
- // (regardless of whether such data already exists).
- PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
-
// Find the PerThread for this particular (isolate, thread) combination.
// If one does not yet exist, allocate a new one.
PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
@@ -1268,9 +1263,9 @@ class Isolate {
CompilationCache* compilation_cache_;
Counters* counters_;
CodeRange* code_range_;
- Mutex* break_access_;
+ RecursiveMutex break_access_;
Atomic32 debugger_initialized_;
- Mutex* debugger_access_;
+ RecursiveMutex debugger_access_;
Logger* logger_;
StackGuard stack_guard_;
StatsTable* stats_table_;
@@ -1295,6 +1290,7 @@ class Isolate {
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
ConsStringIteratorOp* write_iterator_;
GlobalHandles* global_handles_;
+ EternalHandles* eternal_handles_;
ContextSwitcher* context_switcher_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
@@ -1313,10 +1309,14 @@ class Isolate {
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
+ RandomNumberGenerator* random_number_generator_;
+ bool is_memory_constrained_;
- // The garbage collector should be a little more aggressive when it knows
- // that a context was recently exited.
- bool context_exit_happened_;
+ // True if fatal error has been signaled for this isolate.
+ bool has_fatal_error_;
+
+ // True if we are using the Crankshaft optimizing compiler.
+ bool use_crankshaft_;
// True if this isolate was initialized from a snapshot.
bool initialized_from_snapshot_;
@@ -1371,7 +1371,6 @@ class Isolate {
OptimizingCompilerThread optimizing_compiler_thread_;
MarkingThread** marking_thread_;
SweeperThread** sweeper_thread_;
- CallbackTable* callback_table_;
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
@@ -1404,15 +1403,8 @@ class SaveContext BASE_EMBEDDED {
inline explicit SaveContext(Isolate* isolate);
~SaveContext() {
- if (context_.is_null()) {
- Isolate* isolate = Isolate::Current();
- isolate->set_context(NULL);
- isolate->set_save_context(prev_);
- } else {
- Isolate* isolate = context_->GetIsolate();
- isolate->set_context(*context_);
- isolate->set_save_context(prev_);
- }
+ isolate_->set_context(context_.is_null() ? NULL : *context_);
+ isolate_->set_save_context(prev_);
}
Handle<Context> context() { return context_; }
@@ -1424,10 +1416,8 @@ class SaveContext BASE_EMBEDDED {
}
private:
+ Isolate* isolate_;
Handle<Context> context_;
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- Handle<Context> dummy_;
-#endif
SaveContext* prev_;
Address c_entry_fp_;
};
@@ -1436,21 +1426,44 @@ class SaveContext BASE_EMBEDDED {
class AssertNoContextChange BASE_EMBEDDED {
#ifdef DEBUG
public:
- AssertNoContextChange() :
- scope_(Isolate::Current()),
- context_(Isolate::Current()->context(), Isolate::Current()) {
+ AssertNoContextChange()
+ : isolate_(Isolate::Current()),
+ context_(isolate_->context()) { }
+ ~AssertNoContextChange() {
+ ASSERT(isolate_->context() == *context_);
}
- ~AssertNoContextChange() {
- ASSERT(Isolate::Current()->context() == *context_);
+ private:
+ Isolate* isolate_;
+ Handle<Context> context_;
+#else
+ public:
+ AssertNoContextChange() { }
+#endif
+};
+
+
+// TODO(mstarzinger): Depracate as soon as everything is handlified.
+class AssertNoContextChangeWithHandleScope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ AssertNoContextChangeWithHandleScope() :
+ isolate_(Isolate::Current()),
+ scope_(isolate_),
+ context_(isolate_->context(), isolate_) {
+ }
+
+ ~AssertNoContextChangeWithHandleScope() {
+ ASSERT(isolate_->context() == *context_);
}
private:
+ Isolate* isolate_;
HandleScope scope_;
Handle<Context> context_;
#else
public:
- AssertNoContextChange() { }
+ AssertNoContextChangeWithHandleScope() { }
#endif
};
@@ -1462,11 +1475,11 @@ class ExecutionAccess BASE_EMBEDDED {
}
~ExecutionAccess() { Unlock(isolate_); }
- static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
- static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
+ static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
+ static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
static bool TryLock(Isolate* isolate) {
- return isolate->break_access_->TryLock();
+ return isolate->break_access()->TryLock();
}
private:
@@ -1510,12 +1523,6 @@ class PostponeInterruptsScope BASE_EMBEDDED {
};
-// Temporary macros for accessing current isolate and its subobjects.
-// They provide better readability, especially when used a lot in the code.
-#define HEAP (v8::internal::Isolate::Current()->heap())
-#define ISOLATE (v8::internal::Isolate::Current())
-
-
// Tells whether the native context is marked with out of memory.
inline bool Context::has_out_of_memory() {
return native_context()->out_of_memory()->IsTrue();
@@ -1524,7 +1531,7 @@ inline bool Context::has_out_of_memory() {
// Mark the native context with out of memory.
inline void Context::mark_out_of_memory() {
- native_context()->set_out_of_memory(HEAP->true_value());
+ native_context()->set_out_of_memory(GetIsolate()->heap()->true_value());
}
diff --git a/chromium/v8/src/json-stringifier.h b/chromium/v8/src/json-stringifier.h
index 5ebdb40b519..0d17b356abb 100644
--- a/chromium/v8/src/json-stringifier.h
+++ b/chromium/v8/src/json-stringifier.h
@@ -367,7 +367,7 @@ Handle<Object> BasicJsonStringifier::ApplyToJsonFunction(
Handle<Object> argv[] = { key };
bool has_exception = false;
HandleScope scope(isolate_);
- object = Execution::Call(fun, object, 1, argv, &has_exception);
+ object = Execution::Call(isolate_, fun, object, 1, argv, &has_exception);
// Return empty handle to signal an exception.
if (has_exception) return Handle<Object>::null();
return scope.CloseAndEscape(object);
@@ -470,7 +470,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
Handle<Object> argv[] = { key, object };
bool has_exception = false;
Handle<Object> result =
- Execution::Call(builtin, object, 2, argv, &has_exception);
+ Execution::Call(isolate_, builtin, object, 2, argv, &has_exception);
if (has_exception) return EXCEPTION;
if (result->IsUndefined()) return UNCHANGED;
if (deferred_key) {
@@ -495,11 +495,13 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
bool has_exception = false;
String* class_name = object->class_name();
if (class_name == isolate_->heap()->String_string()) {
- Handle<Object> value = Execution::ToString(object, &has_exception);
+ Handle<Object> value =
+ Execution::ToString(isolate_, object, &has_exception);
if (has_exception) return EXCEPTION;
SerializeString(Handle<String>::cast(value));
} else if (class_name == isolate_->heap()->Number_string()) {
- Handle<Object> value = Execution::ToNumber(object, &has_exception);
+ Handle<Object> value =
+ Execution::ToNumber(isolate_, object, &has_exception);
if (has_exception) return EXCEPTION;
if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
SerializeHeapNumber(Handle<HeapNumber>::cast(value));
@@ -600,12 +602,12 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
Handle<JSArray> object, int length) {
for (int i = 0; i < length; i++) {
if (i > 0) Append(',');
- Handle<Object> element = Object::GetElement(object, i);
+ Handle<Object> element = Object::GetElement(isolate_, object, i);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate_, element, EXCEPTION);
if (element->IsUndefined()) {
AppendAscii("null");
} else {
- Result result = SerializeElement(object->GetIsolate(), element, i);
+ Result result = SerializeElement(isolate_, element, i);
if (result == SUCCESS) continue;
if (result == UNCHANGED) {
AppendAscii("null");
@@ -676,9 +678,10 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
key_handle = factory_->NumberToString(Handle<Object>(key, isolate_));
uint32_t index;
if (key->IsSmi()) {
- property = Object::GetElement(object, Smi::cast(key)->value());
+ property = Object::GetElement(
+ isolate_, object, Smi::cast(key)->value());
} else if (key_handle->AsArrayIndex(&index)) {
- property = Object::GetElement(object, index);
+ property = Object::GetElement(isolate_, object, index);
} else {
property = GetProperty(isolate_, object, key_handle);
}
@@ -832,14 +835,14 @@ Vector<const uc16> BasicJsonStringifier::GetCharVector(Handle<String> string) {
void BasicJsonStringifier::SerializeString(Handle<String> object) {
object = FlattenGetString(object);
if (is_ascii_) {
- if (object->IsOneByteRepresentation()) {
+ if (object->IsOneByteRepresentationUnderneath()) {
SerializeString_<true, uint8_t>(object);
} else {
ChangeEncoding();
SerializeString(object);
}
} else {
- if (object->IsOneByteRepresentation()) {
+ if (object->IsOneByteRepresentationUnderneath()) {
SerializeString_<false, uint8_t>(object);
} else {
SerializeString_<false, uc16>(object);
diff --git a/chromium/v8/src/jsregexp.cc b/chromium/v8/src/jsregexp.cc
index 666866ed32e..3a3d91599c3 100644
--- a/chromium/v8/src/jsregexp.cc
+++ b/chromium/v8/src/jsregexp.cc
@@ -933,27 +933,25 @@ void RegExpText::AppendToText(RegExpText* text, Zone* zone) {
TextElement TextElement::Atom(RegExpAtom* atom) {
- TextElement result = TextElement(ATOM);
- result.data.u_atom = atom;
- return result;
+ return TextElement(ATOM, atom);
}
-TextElement TextElement::CharClass(
- RegExpCharacterClass* char_class) {
- TextElement result = TextElement(CHAR_CLASS);
- result.data.u_char_class = char_class;
- return result;
+TextElement TextElement::CharClass(RegExpCharacterClass* char_class) {
+ return TextElement(CHAR_CLASS, char_class);
}
-int TextElement::length() {
- if (text_type == ATOM) {
- return data.u_atom->length();
- } else {
- ASSERT(text_type == CHAR_CLASS);
- return 1;
+int TextElement::length() const {
+ switch (text_type()) {
+ case ATOM:
+ return atom()->length();
+
+ case CHAR_CLASS:
+ return 1;
}
+ UNREACHABLE();
+ return 0;
}
@@ -1087,8 +1085,8 @@ class RecursionCheck {
};
-static RegExpEngine::CompilationResult IrregexpRegExpTooBig() {
- return RegExpEngine::CompilationResult("RegExp too big");
+static RegExpEngine::CompilationResult IrregexpRegExpTooBig(Isolate* isolate) {
+ return RegExpEngine::CompilationResult(isolate, "RegExp too big");
}
@@ -1145,7 +1143,7 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
while (!work_list.is_empty()) {
work_list.RemoveLast()->Emit(this, &new_trace);
}
- if (reg_exp_too_big_) return IrregexpRegExpTooBig();
+ if (reg_exp_too_big_) return IrregexpRegExpTooBig(zone_->isolate());
Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
heap->IncreaseTotalRegexpCodeGenerated(code->Size());
@@ -1871,7 +1869,7 @@ static void EmitUseLookupTable(
for (int i = j; i < kSize; i++) {
templ[i] = bit;
}
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = masm->zone()->isolate()->factory();
// TODO(erikcorry): Cache these.
Handle<ByteArray> ba = factory->NewByteArray(kSize, TENURED);
for (int i = 0; i < kSize; i++) {
@@ -2550,7 +2548,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = compiler->macro_assembler()->zone()->isolate();
ASSERT(characters_filled_in < details->characters());
int characters = details->characters();
int char_mask;
@@ -2561,8 +2559,8 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
}
for (int k = 0; k < elms_->length(); k++) {
TextElement elm = elms_->at(k);
- if (elm.text_type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
for (int i = 0; i < characters && i < quarks.length(); i++) {
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
@@ -2624,7 +2622,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
} else {
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
- RegExpCharacterClass* tree = elm.data.u_char_class;
+ RegExpCharacterClass* tree = elm.char_class();
ZoneList<CharacterRange>* ranges = tree->ranges(zone());
if (tree->is_negated()) {
// A quick check uses multi-character mask and compare. There is no
@@ -2814,8 +2812,8 @@ RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
- if (elm.text_type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
for (int j = 0; j < quarks.length(); j++) {
uint16_t c = quarks[j];
if (c <= String::kMaxOneByteCharCode) continue;
@@ -2830,8 +2828,8 @@ RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) {
copy[j] = converted;
}
} else {
- ASSERT(elm.text_type == TextElement::CHAR_CLASS);
- RegExpCharacterClass* cc = elm.data.u_char_class;
+ ASSERT(elm.text_type() == TextElement::CHAR_CLASS);
+ RegExpCharacterClass* cc = elm.char_class();
ZoneList<CharacterRange>* ranges = cc->ranges(zone());
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
@@ -3248,20 +3246,20 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
Trace* trace,
bool first_element_checked,
int* checked_up_to) {
- Isolate* isolate = Isolate::Current();
RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ Isolate* isolate = assembler->zone()->isolate();
bool ascii = compiler->ascii();
Label* backtrack = trace->backtrack();
QuickCheckDetails* quick_check = trace->quick_check_performed();
int element_count = elms_->length();
for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
TextElement elm = elms_->at(i);
- int cp_offset = trace->cp_offset() + elm.cp_offset;
- if (elm.text_type == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.data.u_atom->data();
+ int cp_offset = trace->cp_offset() + elm.cp_offset();
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
if (first_element_checked && i == 0 && j == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset + j)) continue;
+ if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
EmitCharacterFunction* emit_function = NULL;
switch (pass) {
case NON_ASCII_MATCH:
@@ -3295,11 +3293,11 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
}
}
} else {
- ASSERT_EQ(elm.text_type, TextElement::CHAR_CLASS);
+ ASSERT_EQ(TextElement::CHAR_CLASS, elm.text_type());
if (pass == CHARACTER_CLASS_MATCH) {
if (first_element_checked && i == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset)) continue;
- RegExpCharacterClass* cc = elm.data.u_char_class;
+ if (DeterminedAlready(quick_check, elm.cp_offset())) continue;
+ RegExpCharacterClass* cc = elm.char_class();
EmitCharClass(assembler,
cc,
ascii,
@@ -3317,12 +3315,8 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
int TextNode::Length() {
TextElement elm = elms_->last();
- ASSERT(elm.cp_offset >= 0);
- if (elm.text_type == TextElement::ATOM) {
- return elm.cp_offset + elm.data.u_atom->data().length();
- } else {
- return elm.cp_offset + 1;
- }
+ ASSERT(elm.cp_offset() >= 0);
+ return elm.cp_offset() + elm.length();
}
@@ -3424,8 +3418,8 @@ void TextNode::MakeCaseIndependent(bool is_ascii) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
- if (elm.text_type == TextElement::CHAR_CLASS) {
- RegExpCharacterClass* cc = elm.data.u_char_class;
+ if (elm.text_type() == TextElement::CHAR_CLASS) {
+ RegExpCharacterClass* cc = elm.char_class();
// None of the standard character classes is different in the case
// independent case and it slows us down if we don't know that.
if (cc->is_standard(zone())) continue;
@@ -3441,11 +3435,7 @@ void TextNode::MakeCaseIndependent(bool is_ascii) {
int TextNode::GreedyLoopTextLength() {
TextElement elm = elms_->at(elms_->length() - 1);
- if (elm.text_type == TextElement::CHAR_CLASS) {
- return elm.cp_offset + 1;
- } else {
- return elm.cp_offset + elm.data.u_atom->data().length();
- }
+ return elm.cp_offset() + elm.length();
}
@@ -3453,8 +3443,8 @@ RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler) {
if (elms_->length() != 1) return NULL;
TextElement elm = elms_->at(0);
- if (elm.text_type != TextElement::CHAR_CLASS) return NULL;
- RegExpCharacterClass* node = elm.data.u_char_class;
+ if (elm.text_type() != TextElement::CHAR_CLASS) return NULL;
+ RegExpCharacterClass* node = elm.char_class();
ZoneList<CharacterRange>* ranges = node->ranges(zone());
if (!CharacterRange::IsCanonical(ranges)) {
CharacterRange::Canonicalize(ranges);
@@ -3830,7 +3820,7 @@ bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
return true;
}
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = masm->zone()->isolate()->factory();
Handle<ByteArray> boolean_skip_table = factory->NewByteArray(kSize, TENURED);
int skip_distance = GetSkipTable(
min_lookahead, max_lookahead, boolean_skip_table);
@@ -4528,13 +4518,13 @@ void DotPrinter::VisitText(TextNode* that) {
for (int i = 0; i < that->elements()->length(); i++) {
if (i > 0) stream()->Add(" ");
TextElement elm = that->elements()->at(i);
- switch (elm.text_type) {
+ switch (elm.text_type()) {
case TextElement::ATOM: {
- stream()->Add("'%w'", elm.data.u_atom->data());
+ stream()->Add("'%w'", elm.atom()->data());
break;
}
case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* node = elm.data.u_char_class;
+ RegExpCharacterClass* node = elm.char_class();
stream()->Add("[");
if (node->is_negated())
stream()->Add("^");
@@ -5302,7 +5292,7 @@ void CharacterRange::Split(ZoneList<CharacterRange>* base,
void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
bool is_ascii,
Zone* zone) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = zone->isolate();
uc16 bottom = from();
uc16 top = to();
if (is_ascii && !RangeContainsLatin1Equivalents(*this)) {
@@ -5690,7 +5680,7 @@ OutSet* DispatchTable::Get(uc16 value) {
void Analysis::EnsureAnalyzed(RegExpNode* that) {
- StackLimitCheck check(Isolate::Current());
+ StackLimitCheck check(that->zone()->isolate());
if (check.HasOverflowed()) {
fail("Stack overflow");
return;
@@ -5716,12 +5706,8 @@ void TextNode::CalculateOffsets() {
int cp_offset = 0;
for (int i = 0; i < element_count; i++) {
TextElement& elm = elements()->at(i);
- elm.cp_offset = cp_offset;
- if (elm.text_type == TextElement::ATOM) {
- cp_offset += elm.data.u_atom->data().length();
- } else {
- cp_offset++;
- }
+ elm.set_cp_offset(cp_offset);
+ cp_offset += elm.length();
}
}
@@ -5837,8 +5823,8 @@ void TextNode::FillInBMInfo(int initial_offset,
return;
}
TextElement text = elements()->at(i);
- if (text.text_type == TextElement::ATOM) {
- RegExpAtom* atom = text.data.u_atom;
+ if (text.text_type() == TextElement::ATOM) {
+ RegExpAtom* atom = text.atom();
for (int j = 0; j < atom->length(); j++, offset++) {
if (offset >= bm->length()) {
if (initial_offset == 0) set_bm_info(not_at_start, bm);
@@ -5848,7 +5834,7 @@ void TextNode::FillInBMInfo(int initial_offset,
if (bm->compiler()->ignore_case()) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
int length = GetCaseIndependentLetters(
- ISOLATE,
+ Isolate::Current(),
character,
bm->max_char() == String::kMaxOneByteCharCode,
chars);
@@ -5860,8 +5846,8 @@ void TextNode::FillInBMInfo(int initial_offset,
}
}
} else {
- ASSERT(text.text_type == TextElement::CHAR_CLASS);
- RegExpCharacterClass* char_class = text.data.u_char_class;
+ ASSERT_EQ(TextElement::CHAR_CLASS, text.text_type());
+ RegExpCharacterClass* char_class = text.char_class();
ZoneList<CharacterRange>* ranges = char_class->ranges(zone());
if (char_class->is_negated()) {
bm->SetAll(offset);
@@ -5973,14 +5959,14 @@ void DispatchTableConstructor::AddInverse(ZoneList<CharacterRange>* ranges) {
void DispatchTableConstructor::VisitText(TextNode* that) {
TextElement elm = that->elements()->at(0);
- switch (elm.text_type) {
+ switch (elm.text_type()) {
case TextElement::ATOM: {
- uc16 c = elm.data.u_atom->data()[0];
+ uc16 c = elm.atom()->data()[0];
AddRange(CharacterRange(c, c));
break;
}
case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* tree = elm.data.u_char_class;
+ RegExpCharacterClass* tree = elm.char_class();
ZoneList<CharacterRange>* ranges = tree->ranges(that->zone());
if (tree->is_negated()) {
AddInverse(ranges);
@@ -6013,7 +5999,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
bool is_ascii,
Zone* zone) {
if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
- return IrregexpRegExpTooBig();
+ return IrregexpRegExpTooBig(zone->isolate());
}
RegExpCompiler compiler(data->capture_count, ignore_case, is_ascii, zone);
@@ -6077,7 +6063,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
analysis.EnsureAnalyzed(node);
if (analysis.has_failed()) {
const char* error_message = analysis.error_message();
- return CompilationResult(error_message);
+ return CompilationResult(zone->isolate(), error_message);
}
// Create the correct assembler for the architecture.
diff --git a/chromium/v8/src/jsregexp.h b/chromium/v8/src/jsregexp.h
index 20c0ac416f0..dfd415d5af8 100644
--- a/chromium/v8/src/jsregexp.h
+++ b/chromium/v8/src/jsregexp.h
@@ -426,20 +426,41 @@ FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
#undef FORWARD_DECLARE
-class TextElement {
+class TextElement V8_FINAL BASE_EMBEDDED {
public:
- enum TextType {UNINITIALIZED, ATOM, CHAR_CLASS};
- TextElement() : text_type(UNINITIALIZED) { }
- explicit TextElement(TextType t) : text_type(t), cp_offset(-1) { }
+ enum TextType {
+ ATOM,
+ CHAR_CLASS
+ };
+
static TextElement Atom(RegExpAtom* atom);
static TextElement CharClass(RegExpCharacterClass* char_class);
- int length();
- TextType text_type;
- union {
- RegExpAtom* u_atom;
- RegExpCharacterClass* u_char_class;
- } data;
- int cp_offset;
+
+ int cp_offset() const { return cp_offset_; }
+ void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
+ int length() const;
+
+ TextType text_type() const { return text_type_; }
+
+ RegExpTree* tree() const { return tree_; }
+
+ RegExpAtom* atom() const {
+ ASSERT(text_type() == ATOM);
+ return reinterpret_cast<RegExpAtom*>(tree());
+ }
+
+ RegExpCharacterClass* char_class() const {
+ ASSERT(text_type() == CHAR_CLASS);
+ return reinterpret_cast<RegExpCharacterClass*>(tree());
+ }
+
+ private:
+ TextElement(TextType text_type, RegExpTree* tree)
+ : cp_offset_(-1), text_type_(text_type), tree_(tree) {}
+
+ int cp_offset_;
+ TextType text_type_;
+ RegExpTree* tree_;
};
@@ -1594,9 +1615,9 @@ struct RegExpCompileData {
class RegExpEngine: public AllStatic {
public:
struct CompilationResult {
- explicit CompilationResult(const char* error_message)
+ CompilationResult(Isolate* isolate, const char* error_message)
: error_message(error_message),
- code(HEAP->the_hole_value()),
+ code(isolate->heap()->the_hole_value()),
num_registers(0) {}
CompilationResult(Object* code, int registers)
: error_message(NULL),
diff --git a/chromium/v8/src/lazy-instance.h b/chromium/v8/src/lazy-instance.h
index 9d68b8cacce..fc03f4d1260 100644
--- a/chromium/v8/src/lazy-instance.h
+++ b/chromium/v8/src/lazy-instance.h
@@ -91,12 +91,13 @@
#ifndef V8_LAZY_INSTANCE_H_
#define V8_LAZY_INSTANCE_H_
+#include "checks.h"
#include "once.h"
namespace v8 {
namespace internal {
-#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, {} }
+#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, { {} } }
#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
// Default to static mode.
@@ -111,17 +112,15 @@ struct LeakyInstanceTrait {
// Traits that define how an instance is allocated and accessed.
-// TODO(kalmard): __alignof__ is only defined for GCC > 4.2. Fix alignment issue
-// on MIPS with other compilers.
-#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
-#define LAZY_ALIGN(x) __attribute__((aligned(__alignof__(x))))
-#else
-#define LAZY_ALIGN(x)
-#endif
template <typename T>
struct StaticallyAllocatedInstanceTrait {
- typedef char StorageType[sizeof(T)] LAZY_ALIGN(T);
+ // 16-byte alignment fallback to be on the safe side here.
+ struct V8_ALIGNAS(T, 16) StorageType {
+ char x[sizeof(T)];
+ };
+
+ STATIC_ASSERT(V8_ALIGNOF(StorageType) >= V8_ALIGNOF(T));
static T* MutableInstance(StorageType* storage) {
return reinterpret_cast<T*>(storage);
@@ -133,8 +132,6 @@ struct StaticallyAllocatedInstanceTrait {
}
};
-#undef LAZY_ALIGN
-
template <typename T>
struct DynamicallyAllocatedInstanceTrait {
diff --git a/chromium/v8/src/lithium-allocator.cc b/chromium/v8/src/lithium-allocator.cc
index 2e2f802558c..3c5abd19846 100644
--- a/chromium/v8/src/lithium-allocator.cc
+++ b/chromium/v8/src/lithium-allocator.cc
@@ -2189,7 +2189,7 @@ LAllocatorPhase::~LAllocatorPhase() {
if (FLAG_hydrogen_stats) {
unsigned size = allocator_->zone()->allocation_size() -
allocator_zone_start_allocation_size_;
- isolate()->GetHStatistics()->SaveTiming(name(), 0, size);
+ isolate()->GetHStatistics()->SaveTiming(name(), TimeDelta(), size);
}
if (ShouldProduceTraceOutput()) {
diff --git a/chromium/v8/src/lithium.cc b/chromium/v8/src/lithium.cc
index e9c3531e385..95310bffd3f 100644
--- a/chromium/v8/src/lithium.cc
+++ b/chromium/v8/src/lithium.cc
@@ -233,36 +233,6 @@ void LPointerMap::PrintTo(StringStream* stream) {
}
-int ElementsKindToShiftSize(ElementsKind elements_kind) {
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return 0;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return 1;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- return 2;
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return 3;
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- return kPointerSizeLog2;
- }
- UNREACHABLE();
- return 0;
-}
-
-
int StackSlotOffset(int index) {
if (index >= 0) {
// Local or spill slot. Skip the frame pointer, function, and
@@ -422,10 +392,11 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunk::NewChunk(HGraph* graph) {
DisallowHandleAllocation no_handles;
DisallowHeapAllocation no_gc;
+ graph->DisallowAddingNewValues();
int values = graph->GetMaximumValueID();
CompilationInfo* info = graph->info();
if (values > LUnallocated::kMaxVirtualRegisters) {
- info->set_bailout_reason("not enough virtual registers for values");
+ info->set_bailout_reason(kNotEnoughVirtualRegistersForValues);
return NULL;
}
LAllocator allocator(values, graph);
@@ -434,7 +405,7 @@ LChunk* LChunk::NewChunk(HGraph* graph) {
if (chunk == NULL) return NULL;
if (!allocator.Allocate(chunk)) {
- info->set_bailout_reason("not enough virtual registers (regalloc)");
+ info->set_bailout_reason(kNotEnoughVirtualRegistersRegalloc);
return NULL;
}
@@ -461,12 +432,10 @@ Handle<Code> LChunk::Codegen() {
CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
generator.FinishCode(code);
code->set_is_crankshafted(true);
- if (!code.is_null()) {
- void* jit_handler_data =
- assembler.positions_recorder()->DetachJITHandlerData();
- LOG_CODE_EVENT(info()->isolate(),
- CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
- }
+ void* jit_handler_data =
+ assembler.positions_recorder()->DetachJITHandlerData();
+ LOG_CODE_EVENT(info()->isolate(),
+ CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
CodeGenerator::PrintCode(code, info());
return code;
diff --git a/chromium/v8/src/lithium.h b/chromium/v8/src/lithium.h
index 1e0784eb98d..d6aa205640f 100644
--- a/chromium/v8/src/lithium.h
+++ b/chromium/v8/src/lithium.h
@@ -43,7 +43,7 @@ namespace internal {
V(DoubleRegister, DOUBLE_REGISTER)
-class LOperand: public ZoneObject {
+class LOperand : public ZoneObject {
public:
enum Kind {
INVALID,
@@ -90,7 +90,7 @@ class LOperand: public ZoneObject {
};
-class LUnallocated: public LOperand {
+class LUnallocated : public LOperand {
public:
enum BasicPolicy {
FIXED_SLOT,
@@ -271,7 +271,7 @@ class LUnallocated: public LOperand {
};
-class LMoveOperands BASE_EMBEDDED {
+class LMoveOperands V8_FINAL BASE_EMBEDDED {
public:
LMoveOperands(LOperand* source, LOperand* destination)
: source_(source), destination_(destination) {
@@ -317,7 +317,7 @@ class LMoveOperands BASE_EMBEDDED {
};
-class LConstantOperand: public LOperand {
+class LConstantOperand V8_FINAL : public LOperand {
public:
static LConstantOperand* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -342,7 +342,7 @@ class LConstantOperand: public LOperand {
};
-class LArgument: public LOperand {
+class LArgument V8_FINAL : public LOperand {
public:
explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
@@ -353,7 +353,7 @@ class LArgument: public LOperand {
};
-class LStackSlot: public LOperand {
+class LStackSlot V8_FINAL : public LOperand {
public:
static LStackSlot* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -378,7 +378,7 @@ class LStackSlot: public LOperand {
};
-class LDoubleStackSlot: public LOperand {
+class LDoubleStackSlot V8_FINAL : public LOperand {
public:
static LDoubleStackSlot* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -403,7 +403,7 @@ class LDoubleStackSlot: public LOperand {
};
-class LRegister: public LOperand {
+class LRegister V8_FINAL : public LOperand {
public:
static LRegister* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -428,7 +428,7 @@ class LRegister: public LOperand {
};
-class LDoubleRegister: public LOperand {
+class LDoubleRegister V8_FINAL : public LOperand {
public:
static LDoubleRegister* Create(int index, Zone* zone) {
ASSERT(index >= 0);
@@ -453,7 +453,7 @@ class LDoubleRegister: public LOperand {
};
-class LParallelMove : public ZoneObject {
+class LParallelMove V8_FINAL : public ZoneObject {
public:
explicit LParallelMove(Zone* zone) : move_operands_(4, zone) { }
@@ -474,7 +474,7 @@ class LParallelMove : public ZoneObject {
};
-class LPointerMap: public ZoneObject {
+class LPointerMap V8_FINAL : public ZoneObject {
public:
explicit LPointerMap(int position, Zone* zone)
: pointer_operands_(8, zone),
@@ -510,7 +510,7 @@ class LPointerMap: public ZoneObject {
};
-class LEnvironment: public ZoneObject {
+class LEnvironment V8_FINAL : public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
FrameType frame_type,
@@ -533,6 +533,7 @@ class LEnvironment: public ZoneObject {
values_(value_count, zone),
is_tagged_(value_count, zone),
is_uint32_(value_count, zone),
+ object_mapping_(0, zone),
outer_(outer),
entry_(entry),
zone_(zone) { }
@@ -573,6 +574,38 @@ class LEnvironment: public ZoneObject {
return is_uint32_.Contains(index);
}
+ void AddNewObject(int length, bool is_arguments) {
+ uint32_t encoded = LengthOrDupeField::encode(length) |
+ IsArgumentsField::encode(is_arguments) |
+ IsDuplicateField::encode(false);
+ object_mapping_.Add(encoded, zone());
+ }
+
+ void AddDuplicateObject(int dupe_of) {
+ uint32_t encoded = LengthOrDupeField::encode(dupe_of) |
+ IsDuplicateField::encode(true);
+ object_mapping_.Add(encoded, zone());
+ }
+
+ int ObjectDuplicateOfAt(int index) {
+ ASSERT(ObjectIsDuplicateAt(index));
+ return LengthOrDupeField::decode(object_mapping_[index]);
+ }
+
+ int ObjectLengthAt(int index) {
+ ASSERT(!ObjectIsDuplicateAt(index));
+ return LengthOrDupeField::decode(object_mapping_[index]);
+ }
+
+ bool ObjectIsArgumentsAt(int index) {
+ ASSERT(!ObjectIsDuplicateAt(index));
+ return IsArgumentsField::decode(object_mapping_[index]);
+ }
+
+ bool ObjectIsDuplicateAt(int index) {
+ return IsDuplicateField::decode(object_mapping_[index]);
+ }
+
void Register(int deoptimization_index,
int translation_index,
int pc_offset) {
@@ -587,6 +620,14 @@ class LEnvironment: public ZoneObject {
void PrintTo(StringStream* stream);
+ // Marker value indicating a de-materialized object.
+ static LOperand* materialization_marker() { return NULL; }
+
+ // Encoding used for the object_mapping map below.
+ class LengthOrDupeField : public BitField<int, 0, 30> { };
+ class IsArgumentsField : public BitField<bool, 30, 1> { };
+ class IsDuplicateField : public BitField<bool, 31, 1> { };
+
private:
Handle<JSFunction> closure_;
FrameType frame_type_;
@@ -603,6 +644,10 @@ class LEnvironment: public ZoneObject {
ZoneList<LOperand*> values_;
GrowableBitVector is_tagged_;
GrowableBitVector is_uint32_;
+
+ // Map with encoded information about materialization_marker operands.
+ ZoneList<uint32_t> object_mapping_;
+
LEnvironment* outer_;
HEnterInlined* entry_;
Zone* zone_;
@@ -610,7 +655,7 @@ class LEnvironment: public ZoneObject {
// Iterates over the non-null, non-constant operands in an environment.
-class ShallowIterator BASE_EMBEDDED {
+class ShallowIterator V8_FINAL BASE_EMBEDDED {
public:
explicit ShallowIterator(LEnvironment* env)
: env_(env),
@@ -654,7 +699,7 @@ class ShallowIterator BASE_EMBEDDED {
// Iterator for non-null, non-constant operands incl. outer environments.
-class DeepIterator BASE_EMBEDDED {
+class DeepIterator V8_FINAL BASE_EMBEDDED {
public:
explicit DeepIterator(LEnvironment* env)
: current_iterator_(env) {
@@ -691,7 +736,7 @@ class LLabel;
// Superclass providing data and behavior common to all the
// arch-specific LPlatformChunk classes.
-class LChunk: public ZoneObject {
+class LChunk : public ZoneObject {
public:
static LChunk* NewChunk(HGraph* graph);
@@ -749,13 +794,11 @@ class LChunk: public ZoneObject {
};
-int ElementsKindToShiftSize(ElementsKind elements_kind);
int StackSlotOffset(int index);
enum NumberUntagDMode {
NUMBER_CANDIDATE_IS_SMI,
- NUMBER_CANDIDATE_IS_ANY_TAGGED,
- NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE
+ NUMBER_CANDIDATE_IS_ANY_TAGGED
};
diff --git a/chromium/v8/src/liveedit.cc b/chromium/v8/src/liveedit.cc
index 859cf2b94f3..feaafd471e1 100644
--- a/chromium/v8/src/liveedit.cc
+++ b/chromium/v8/src/liveedit.cc
@@ -631,8 +631,8 @@ static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
// Wraps any object into a OpaqueReference, that will hide the object
// from JavaScript.
-static Handle<JSValue> WrapInJSValue(Handle<Object> object) {
- Isolate* isolate = Isolate::Current();
+static Handle<JSValue> WrapInJSValue(Handle<HeapObject> object) {
+ Isolate* isolate = object->GetIsolate();
Handle<JSFunction> constructor = isolate->opaque_reference_function();
Handle<JSValue> result =
Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
@@ -662,8 +662,8 @@ static int GetArrayLength(Handle<JSArray> array) {
template<typename S>
class JSArrayBasedStruct {
public:
- static S Create() {
- Factory* factory = Isolate::Current()->factory();
+ static S Create(Isolate* isolate) {
+ Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(S::kSize_);
return S(array);
}
@@ -691,7 +691,7 @@ class JSArrayBasedStruct {
Handle<Smi>(Smi::FromInt(value), isolate()));
}
Object* GetField(int field_position) {
- return array_->GetElementNoExceptionThrown(field_position);
+ return array_->GetElementNoExceptionThrown(isolate(), field_position);
}
int GetSmiValueField(int field_position) {
Object* res = GetField(field_position);
@@ -724,7 +724,7 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
this->SetSmiValueField(kParentIndexOffset_, parent_index);
}
void SetFunctionCode(Handle<Code> function_code,
- Handle<Object> code_scope_info) {
+ Handle<HeapObject> code_scope_info) {
Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
this->SetField(kCodeOffset_, code_wrapper);
@@ -788,7 +788,8 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
public:
static bool IsInstance(Handle<JSArray> array) {
return array->length() == Smi::FromInt(kSize_) &&
- array->GetElementNoExceptionThrown(kSharedInfoOffset_)->IsJSValue();
+ array->GetElementNoExceptionThrown(
+ array->GetIsolate(), kSharedInfoOffset_)->IsJSValue();
}
explicit SharedInfoWrapper(Handle<JSArray> array)
@@ -832,7 +833,7 @@ class FunctionInfoListener {
void FunctionStarted(FunctionLiteral* fun) {
HandleScope scope(isolate());
- FunctionInfoWrapper info = FunctionInfoWrapper::Create();
+ FunctionInfoWrapper info = FunctionInfoWrapper::Create(isolate());
info.SetInitialProperties(fun->name(), fun->start_position(),
fun->end_position(), fun->parameter_count(),
fun->materialized_literal_count(),
@@ -846,7 +847,8 @@ class FunctionInfoListener {
HandleScope scope(isolate());
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
+ result_->GetElementNoExceptionThrown(
+ isolate(), current_parent_index_));
current_parent_index_ = info.GetParentIndex();
}
@@ -855,10 +857,10 @@ class FunctionInfoListener {
void FunctionCode(Handle<Code> function_code) {
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
+ result_->GetElementNoExceptionThrown(
+ isolate(), current_parent_index_));
info.SetFunctionCode(function_code,
- Handle<Object>(isolate()->heap()->null_value(),
- isolate()));
+ Handle<HeapObject>(isolate()->heap()->null_value()));
}
// Saves full information about a function: its code, its scope info
@@ -870,9 +872,10 @@ class FunctionInfoListener {
}
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
- result_->GetElementNoExceptionThrown(current_parent_index_));
+ result_->GetElementNoExceptionThrown(
+ isolate(), current_parent_index_));
info.SetFunctionCode(Handle<Code>(shared->code()),
- Handle<Object>(shared->scope_info(), isolate()));
+ Handle<HeapObject>(shared->scope_info()));
info.SetSharedFunctionInfo(shared);
Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone),
@@ -935,7 +938,7 @@ class FunctionInfoListener {
JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<String> source) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = script->GetIsolate();
FunctionInfoListener listener(isolate);
Handle<Object> original_source =
@@ -1001,12 +1004,14 @@ JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
- HandleScope scope(array->GetIsolate());
+ Isolate* isolate = array->GetIsolate();
+ HandleScope scope(isolate);
int len = GetArrayLength(array);
for (int i = 0; i < len; i++) {
Handle<SharedFunctionInfo> info(
- SharedFunctionInfo::cast(array->GetElementNoExceptionThrown(i)));
- SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create();
+ SharedFunctionInfo::cast(
+ array->GetElementNoExceptionThrown(isolate, i)));
+ SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create(isolate);
Handle<String> name_handle(String::cast(info->name()));
info_wrapper.SetProperties(name_handle, info->start_position(),
info->end_position(), info);
@@ -1228,7 +1233,9 @@ static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
DeoptimizationInputData* data =
DeoptimizationInputData::cast(function->code()->deoptimization_data());
- if (data == HEAP->empty_fixed_array()) return false;
+ if (data == function->GetIsolate()->heap()->empty_fixed_array()) {
+ return false;
+ }
FixedArray* literals = data->LiteralArray();
@@ -1242,34 +1249,48 @@ static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
}
-class DependentFunctionFilter : public OptimizedFunctionFilter {
+// Marks code that shares the same shared function info or has inlined
+// code that shares the same function info.
+class DependentFunctionMarker: public OptimizedFunctionVisitor {
public:
- explicit DependentFunctionFilter(
- SharedFunctionInfo* function_info)
- : function_info_(function_info) {}
-
- virtual bool TakeFunction(JSFunction* function) {
- return (function->shared() == function_info_ ||
- IsInlined(function, function_info_));
+ SharedFunctionInfo* shared_info_;
+ bool found_;
+
+ explicit DependentFunctionMarker(SharedFunctionInfo* shared_info)
+ : shared_info_(shared_info), found_(false) { }
+
+ virtual void EnterContext(Context* context) { } // Don't care.
+ virtual void LeaveContext(Context* context) { } // Don't care.
+ virtual void VisitFunction(JSFunction* function) {
+ // It should be guaranteed by the iterator that everything is optimized.
+ ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
+ if (shared_info_ == function->shared() ||
+ IsInlined(function, shared_info_)) {
+ // Mark the code for deoptimization.
+ function->code()->set_marked_for_deoptimization(true);
+ found_ = true;
+ }
}
-
- private:
- SharedFunctionInfo* function_info_;
};
static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
DisallowHeapAllocation no_allocation;
+ DependentFunctionMarker marker(function_info);
+ // TODO(titzer): need to traverse all optimized code to find OSR code here.
+ Deoptimizer::VisitAllOptimizedFunctions(function_info->GetIsolate(), &marker);
- DependentFunctionFilter filter(function_info);
- Deoptimizer::DeoptimizeAllFunctionsWith(function_info->GetIsolate(), &filter);
+ if (marker.found_) {
+ // Only go through with the deoptimization if something was found.
+ Deoptimizer::DeoptimizeMarkedCode(function_info->GetIsolate());
+ }
}
MaybeObject* LiveEdit::ReplaceFunctionCode(
Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = new_compile_info_array->GetIsolate();
HandleScope scope(isolate);
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
@@ -1290,6 +1311,7 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
if (code_scope_info->IsFixedArray()) {
shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
}
+ shared_info->DisableOptimization(kLiveEdit);
}
if (shared_info->debug_info()->IsDebugInfo()) {
@@ -1342,7 +1364,7 @@ void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
CHECK(script_handle->IsScript() || script_handle->IsUndefined());
shared_info->set_script(*script_handle);
- Isolate::Current()->compilation_cache()->Remove(shared_info);
+ function_wrapper->GetIsolate()->compilation_cache()->Remove(shared_info);
}
@@ -1359,20 +1381,24 @@ static int TranslatePosition(int original_position,
Handle<JSArray> position_change_array) {
int position_diff = 0;
int array_len = GetArrayLength(position_change_array);
+ Isolate* isolate = position_change_array->GetIsolate();
// TODO(635): binary search may be used here
for (int i = 0; i < array_len; i += 3) {
- Object* element = position_change_array->GetElementNoExceptionThrown(i);
+ Object* element =
+ position_change_array->GetElementNoExceptionThrown(isolate, i);
CHECK(element->IsSmi());
int chunk_start = Smi::cast(element)->value();
if (original_position < chunk_start) {
break;
}
- element = position_change_array->GetElementNoExceptionThrown(i + 1);
+ element = position_change_array->GetElementNoExceptionThrown(isolate,
+ i + 1);
CHECK(element->IsSmi());
int chunk_end = Smi::cast(element)->value();
// Position mustn't be inside a chunk.
ASSERT(original_position >= chunk_end);
- element = position_change_array->GetElementNoExceptionThrown(i + 2);
+ element = position_change_array->GetElementNoExceptionThrown(isolate,
+ i + 2);
CHECK(element->IsSmi());
int chunk_changed_end = Smi::cast(element)->value();
position_diff = chunk_changed_end - chunk_end;
@@ -1507,7 +1533,7 @@ static Handle<Code> PatchPositionsInCode(
MaybeObject* LiveEdit::PatchFunctionPositions(
Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Isolate::Current()->ThrowIllegalOperation();
+ return shared_info_array->GetIsolate()->ThrowIllegalOperation();
}
SharedInfoWrapper shared_info_wrapper(shared_info_array);
@@ -1525,7 +1551,7 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
info->set_end_position(new_function_end);
info->set_function_token_position(new_function_token_pos);
- HEAP->EnsureHeapIsIterable();
+ info->GetIsolate()->heap()->EnsureHeapIsIterable();
if (IsJSFunctionCode(info->code())) {
// Patch relocation info section of the code.
@@ -1541,7 +1567,7 @@ MaybeObject* LiveEdit::PatchFunctionPositions(
}
}
- return HEAP->undefined_value();
+ return info->GetIsolate()->heap()->undefined_value();
}
@@ -1587,7 +1613,7 @@ Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script,
original_script->set_source(*new_source);
// Drop line ends so that they will be recalculated.
- original_script->set_line_ends(HEAP->undefined_value());
+ original_script->set_line_ends(isolate->heap()->undefined_value());
return *old_script_object;
}
@@ -1629,7 +1655,8 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
Isolate* isolate = shared_info_array->GetIsolate();
int len = GetArrayLength(shared_info_array);
for (int i = 0; i < len; i++) {
- Object* element = shared_info_array->GetElementNoExceptionThrown(i);
+ Object* element =
+ shared_info_array->GetElementNoExceptionThrown(isolate, i);
CHECK(element->IsJSValue());
Handle<JSValue> jsvalue(JSValue::cast(element));
Handle<SharedFunctionInfo> shared =
@@ -1650,7 +1677,7 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
static bool FixTryCatchHandler(StackFrame* top_frame,
StackFrame* bottom_frame) {
Address* pointer_address =
- &Memory::Address_at(Isolate::Current()->get_address_from_id(
+ &Memory::Address_at(top_frame->isolate()->get_address_from_id(
Isolate::kHandlerAddress));
while (*pointer_address < top_frame->sp()) {
@@ -1686,11 +1713,11 @@ static const char* DropFrames(Vector<StackFrame*> frames,
ASSERT(bottom_js_frame->is_java_script());
// Check the nature of the top frame.
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = bottom_js_frame->isolate();
Code* pre_top_frame_code = pre_top_frame->LookupCode();
bool frame_has_padding;
if (pre_top_frame_code->is_inline_cache_stub() &&
- pre_top_frame_code->is_debug_break()) {
+ pre_top_frame_code->is_debug_stub()) {
// OK, we can drop inline cache calls.
*mode = Debug::FRAME_DROPPED_IN_IC_CALL;
frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
@@ -1789,7 +1816,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// Make sure FixTryCatchHandler is idempotent.
ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
- Handle<Code> code = Isolate::Current()->builtins()->FrameDropper_LiveEdit();
+ Handle<Code> code = isolate->builtins()->FrameDropper_LiveEdit();
*top_frame_pc_address = code->entry();
pre_top_frame->SetCallerFp(bottom_js_frame->fp());
@@ -1837,8 +1864,7 @@ class MultipleFunctionTarget {
// Drops all call frame matched by target and all frames above them.
template<typename TARGET>
static const char* DropActivationsInActiveThreadImpl(
- TARGET& target, bool do_drop) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate, TARGET& target, bool do_drop) {
Debug* debug = isolate->debug();
Zone zone(isolate);
Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
@@ -1937,8 +1963,8 @@ static const char* DropActivationsInActiveThread(
Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
MultipleFunctionTarget target(shared_info_array, result);
- const char* message =
- DropActivationsInActiveThreadImpl(target, do_drop);
+ const char* message = DropActivationsInActiveThreadImpl(
+ shared_info_array->GetIsolate(), target, do_drop);
if (message) {
return message;
}
@@ -1948,7 +1974,7 @@ static const char* DropActivationsInActiveThread(
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
- if (result->GetElement(i) ==
+ if (result->GetElement(result->GetIsolate(), i) ==
Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
Handle<Object> replaced(
Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate);
@@ -2003,7 +2029,7 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
// First check inactive threads. Fail if some functions are blocked there.
InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
result);
- Isolate::Current()->thread_manager()->IterateArchivedThreads(
+ isolate->thread_manager()->IterateArchivedThreads(
&inactive_threads_checker);
if (inactive_threads_checker.HasBlockedFunctions()) {
return result;
@@ -2055,7 +2081,8 @@ class SingleFrameTarget {
const char* LiveEdit::RestartFrame(JavaScriptFrame* frame) {
SingleFrameTarget target(frame);
- const char* result = DropActivationsInActiveThreadImpl(target, true);
+ const char* result = DropActivationsInActiveThreadImpl(
+ frame->isolate(), target, true);
if (result != NULL) {
return result;
}
diff --git a/chromium/v8/src/log-utils.cc b/chromium/v8/src/log-utils.cc
index 6bba8823e30..909d4a51396 100644
--- a/chromium/v8/src/log-utils.cc
+++ b/chromium/v8/src/log-utils.cc
@@ -41,14 +41,12 @@ const char* const Log::kLogToConsole = "-";
Log::Log(Logger* logger)
: is_stopped_(false),
output_handle_(NULL),
- mutex_(NULL),
message_buffer_(NULL),
logger_(logger) {
}
void Log::Initialize(const char* log_file_name) {
- mutex_ = OS::CreateMutex();
message_buffer_ = NewArray<char>(kMessageBufferSize);
// --log-all enables all the log flags.
@@ -66,11 +64,6 @@ void Log::Initialize(const char* log_file_name) {
// --prof implies --log-code.
if (FLAG_prof) FLAG_log_code = true;
- // --prof_lazy controls --log-code.
- if (FLAG_prof_lazy) {
- FLAG_log_code = false;
- }
-
// If we're logging anything, we need to open the log file.
if (Log::InitLogAtStart()) {
if (strcmp(log_file_name, kLogToConsole) == 0) {
@@ -116,9 +109,6 @@ FILE* Log::Close() {
DeleteArray(message_buffer_);
message_buffer_ = NULL;
- delete mutex_;
- mutex_ = NULL;
-
is_stopped_ = false;
return result;
}
@@ -126,7 +116,7 @@ FILE* Log::Close() {
Log::MessageBuilder::MessageBuilder(Log* log)
: log_(log),
- sl(log_->mutex_),
+ lock_guard_(&log_->mutex_),
pos_(0) {
ASSERT(log_->message_buffer_ != NULL);
}
diff --git a/chromium/v8/src/log-utils.h b/chromium/v8/src/log-utils.h
index 861a8263b84..ec8415e4b6e 100644
--- a/chromium/v8/src/log-utils.h
+++ b/chromium/v8/src/log-utils.h
@@ -107,7 +107,7 @@ class Log {
private:
Log* log_;
- ScopedLock sl;
+ LockGuard<Mutex> lock_guard_;
int pos_;
};
@@ -142,7 +142,7 @@ class Log {
// mutex_ is a Mutex used for enforcing exclusive
// access to the formatting buffer and the log file or log memory buffer.
- Mutex* mutex_;
+ Mutex mutex_;
// Buffer used for formatting log messages. This is a singleton buffer and
// mutex_ should be acquired before using it.
diff --git a/chromium/v8/src/log.cc b/chromium/v8/src/log.cc
index b89c2bfba01..0f0ad40398f 100644
--- a/chromium/v8/src/log.cc
+++ b/chromium/v8/src/log.cc
@@ -556,13 +556,20 @@ class Profiler: public Thread {
} else {
buffer_[head_] = *sample;
head_ = Succ(head_);
- buffer_semaphore_->Signal(); // Tell we have an element.
+ buffer_semaphore_.Signal(); // Tell we have an element.
}
}
+ virtual void Run();
+
+ // Pause and Resume TickSample data collection.
+ void pause() { paused_ = true; }
+ void resume() { paused_ = false; }
+
+ private:
// Waits for a signal and removes profiling data.
bool Remove(TickSample* sample) {
- buffer_semaphore_->Wait(); // Wait for an element.
+ buffer_semaphore_.Wait(); // Wait for an element.
*sample = buffer_[tail_];
bool result = overflow_;
tail_ = Succ(tail_);
@@ -570,14 +577,6 @@ class Profiler: public Thread {
return result;
}
- void Run();
-
- // Pause and Resume TickSample data collection.
- bool paused() const { return paused_; }
- void pause() { paused_ = true; }
- void resume() { paused_ = false; }
-
- private:
// Returns the next index in the cyclic buffer.
int Succ(int index) { return (index + 1) % kBufferSize; }
@@ -589,7 +588,8 @@ class Profiler: public Thread {
int head_; // Index to the buffer head.
int tail_; // Index to the buffer tail.
bool overflow_; // Tell whether a buffer overflow has occurred.
- Semaphore* buffer_semaphore_; // Sempahore used for buffer synchronization.
+ // Sempahore used for buffer synchronization.
+ Semaphore buffer_semaphore_;
// Tells whether profiler is engaged, that is, processing thread is stated.
bool engaged_;
@@ -622,13 +622,13 @@ class Ticker: public Sampler {
ASSERT(profiler_ == NULL);
profiler_ = profiler;
IncreaseProfilingDepth();
- if (!FLAG_prof_lazy && !IsActive()) Start();
+ if (!IsActive()) Start();
}
void ClearProfiler() {
- DecreaseProfilingDepth();
profiler_ = NULL;
if (IsActive()) Stop();
+ DecreaseProfilingDepth();
}
private:
@@ -645,7 +645,7 @@ Profiler::Profiler(Isolate* isolate)
head_(0),
tail_(0),
overflow_(false),
- buffer_semaphore_(OS::CreateSemaphore(0)),
+ buffer_semaphore_(0),
engaged_(false),
running_(false),
paused_(false) {
@@ -656,7 +656,7 @@ void Profiler::Engage() {
if (engaged_) return;
engaged_ = true;
- OS::LogSharedLibraryAddresses();
+ OS::LogSharedLibraryAddresses(isolate_);
// Start thread processing the profiler buffer.
running_ = true;
@@ -686,7 +686,7 @@ void Profiler::Disengage() {
Insert(&sample);
Join();
- LOG(ISOLATE, UncheckedStringEvent("profiler", "end"));
+ LOG(isolate_, UncheckedStringEvent("profiler", "end"));
}
@@ -709,14 +709,12 @@ Logger::Logger(Isolate* isolate)
ticker_(NULL),
profiler_(NULL),
log_events_(NULL),
- logging_nesting_(0),
- cpu_profiler_nesting_(0),
+ is_logging_(false),
log_(new Log(this)),
ll_logger_(NULL),
jit_logger_(NULL),
listeners_(5),
- is_initialized_(false),
- epoch_(0) {
+ is_initialized_(false) {
}
@@ -867,7 +865,7 @@ void Logger::CodeDeoptEvent(Code* code) {
if (!log_->IsEnabled()) return;
ASSERT(FLAG_log_internal_timer_events);
Log::MessageBuilder msg(log_);
- int since_epoch = static_cast<int>(OS::Ticks() - epoch_);
+ int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
msg.Append("code-deopt,%ld,%d\n", since_epoch, code->CodeSize());
msg.WriteToLogFile();
}
@@ -877,7 +875,7 @@ void Logger::TimerEvent(StartEnd se, const char* name) {
if (!log_->IsEnabled()) return;
ASSERT(FLAG_log_internal_timer_events);
Log::MessageBuilder msg(log_);
- int since_epoch = static_cast<int>(OS::Ticks() - epoch_);
+ int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
const char* format = (se == START) ? "timer-event-start,\"%s\",%ld\n"
: "timer-event-end,\"%s\",%ld\n";
msg.Append(format, name, since_epoch);
@@ -906,8 +904,8 @@ void Logger::TimerEventScope::LogTimerEvent(StartEnd se) {
const char* Logger::TimerEventScope::v8_recompile_synchronous =
"V8.RecompileSynchronous";
-const char* Logger::TimerEventScope::v8_recompile_parallel =
- "V8.RecompileParallel";
+const char* Logger::TimerEventScope::v8_recompile_concurrent =
+ "V8.RecompileConcurrent";
const char* Logger::TimerEventScope::v8_compile_full_code =
"V8.CompileFullCode";
const char* Logger::TimerEventScope::v8_execute = "V8.Execute";
@@ -976,7 +974,7 @@ void Logger::LogRuntime(Vector<const char> format,
if (c == '%' && i <= format.length() - 2) {
i++;
ASSERT('0' <= format[i] && format[i] <= '9');
- MaybeObject* maybe = args->GetElement(format[i] - '0');
+ MaybeObject* maybe = args->GetElement(isolate_, format[i] - '0');
Object* obj;
if (!maybe->ToObject(&obj)) {
msg.Append("<exception>");
@@ -1233,7 +1231,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
- Name* source, int line) {
+ Name* source, int line, int column) {
PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line));
if (!is_logging_code_events()) return;
@@ -1252,7 +1250,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
} else {
msg.AppendSymbolName(Symbol::cast(source));
}
- msg.Append(":%d\",", line);
+ msg.Append(":%d:%d\",", line, column);
msg.AppendAddress(shared->address());
msg.Append(",%s", ComputeMarker(code));
msg.Append('\n');
@@ -1500,7 +1498,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
msg.AppendAddress(sample->pc);
- msg.Append(",%ld", static_cast<int>(OS::Ticks() - epoch_));
+ msg.Append(",%ld", static_cast<int>(timer_.Elapsed().InMicroseconds()));
if (sample->has_external_callback) {
msg.Append(",1,");
msg.AppendAddress(sample->external_callback);
@@ -1521,43 +1519,11 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
}
-bool Logger::IsProfilerPaused() {
- return profiler_ == NULL || profiler_->paused();
-}
-
-
-void Logger::PauseProfiler() {
+void Logger::StopProfiler() {
if (!log_->IsEnabled()) return;
if (profiler_ != NULL) {
- // It is OK to have negative nesting.
- if (--cpu_profiler_nesting_ == 0) {
- profiler_->pause();
- if (FLAG_prof_lazy) {
- ticker_->Stop();
- FLAG_log_code = false;
- LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
- }
- --logging_nesting_;
- }
- }
-}
-
-
-void Logger::ResumeProfiler() {
- if (!log_->IsEnabled()) return;
- if (profiler_ != NULL) {
- if (cpu_profiler_nesting_++ == 0) {
- ++logging_nesting_;
- if (FLAG_prof_lazy) {
- profiler_->Engage();
- LOG(ISOLATE, UncheckedStringEvent("profiler", "resume"));
- FLAG_log_code = true;
- LogCompiledFunctions();
- LogAccessorCallbacks();
- if (!ticker_->IsActive()) ticker_->Start();
- }
- profiler_->resume();
- }
+ profiler_->pause();
+ is_logging_ = false;
}
}
@@ -1565,7 +1531,7 @@ void Logger::ResumeProfiler() {
// This function can be called when Log's mutex is acquired,
// either from main or Profiler's thread.
void Logger::LogFailure() {
- PauseProfiler();
+ StopProfiler();
}
@@ -1644,7 +1610,6 @@ void Logger::LogCodeObject(Object* object) {
case Code::FUNCTION:
case Code::OPTIMIZED_FUNCTION:
return; // We log this later using LogCompiledFunctions.
- case Code::UNARY_OP_IC: // fall through
case Code::BINARY_OP_IC: // fall through
case Code::COMPARE_IC: // fall through
case Code::COMPARE_NIL_IC: // fall through
@@ -1713,6 +1678,8 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
if (shared->script()->IsScript()) {
Handle<Script> script(Script::cast(shared->script()));
int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
+ int column_num =
+ GetScriptColumnNumber(script, shared->start_position()) + 1;
if (script->name()->IsString()) {
Handle<String> script_name(String::cast(script->name()));
if (line_num > 0) {
@@ -1720,7 +1687,7 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
*code, *shared, NULL,
- *script_name, line_num));
+ *script_name, line_num, column_num));
} else {
// Can't distinguish eval and script here, so always use Script.
PROFILE(isolate_,
@@ -1733,7 +1700,7 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
*code, *shared, NULL,
- isolate_->heap()->empty_string(), line_num));
+ isolate_->heap()->empty_string(), line_num, column_num));
}
} else if (shared->IsApiFunction()) {
// API function.
@@ -1797,21 +1764,21 @@ void Logger::LogAccessorCallbacks() {
}
-static void AddIsolateIdIfNeeded(StringStream* stream) {
- Isolate* isolate = Isolate::Current();
+static void AddIsolateIdIfNeeded(Isolate* isolate, StringStream* stream) {
if (isolate->IsDefaultIsolate()) return;
stream->Add("isolate-%p-", isolate);
}
-static SmartArrayPointer<const char> PrepareLogFileName(const char* file_name) {
+static SmartArrayPointer<const char> PrepareLogFileName(
+ Isolate* isolate, const char* file_name) {
if (strchr(file_name, '%') != NULL ||
- !Isolate::Current()->IsDefaultIsolate()) {
+ !isolate->IsDefaultIsolate()) {
// If there's a '%' in the log file name we have to expand
// placeholders.
HeapStringAllocator allocator;
StringStream stream(&allocator);
- AddIsolateIdIfNeeded(&stream);
+ AddIsolateIdIfNeeded(isolate, &stream);
for (const char* p = file_name; *p; p++) {
if (*p == '%') {
p++;
@@ -1864,13 +1831,8 @@ bool Logger::SetUp(Isolate* isolate) {
FLAG_log_snapshot_positions = true;
}
- // --prof_lazy controls --log-code.
- if (FLAG_prof_lazy) {
- FLAG_log_code = false;
- }
-
SmartArrayPointer<const char> log_file_name =
- PrepareLogFileName(FLAG_logfile);
+ PrepareLogFileName(isolate, FLAG_logfile);
log_->Initialize(*log_file_name);
if (FLAG_ll_prof) {
@@ -1881,20 +1843,16 @@ bool Logger::SetUp(Isolate* isolate) {
ticker_ = new Ticker(isolate, kSamplingIntervalMs);
if (Log::InitLogAtStart()) {
- logging_nesting_ = 1;
+ is_logging_ = true;
}
if (FLAG_prof) {
profiler_ = new Profiler(isolate);
- if (FLAG_prof_lazy) {
- profiler_->pause();
- } else {
- logging_nesting_ = 1;
- profiler_->Engage();
- }
+ is_logging_ = true;
+ profiler_->Engage();
}
- if (FLAG_log_internal_timer_events || FLAG_prof) epoch_ = OS::Ticks();
+ if (FLAG_log_internal_timer_events || FLAG_prof) timer_.Start();
return true;
}
diff --git a/chromium/v8/src/log.h b/chromium/v8/src/log.h
index 24d83ef1277..81d45e507b4 100644
--- a/chromium/v8/src/log.h
+++ b/chromium/v8/src/log.h
@@ -31,6 +31,7 @@
#include "allocation.h"
#include "objects.h"
#include "platform.h"
+#include "platform/elapsed-timer.h"
namespace v8 {
namespace internal {
@@ -248,7 +249,7 @@ class Logger {
Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info,
- Name* source, int line);
+ Name* source, int line, int column);
void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
void CodeMovingGCEvent();
// Emits a code create event for a RegExp.
@@ -321,7 +322,7 @@ class Logger {
void LogTimerEvent(StartEnd se);
static const char* v8_recompile_synchronous;
- static const char* v8_recompile_parallel;
+ static const char* v8_recompile_concurrent;
static const char* v8_compile_full_code;
static const char* v8_execute;
static const char* v8_external;
@@ -340,19 +341,16 @@ class Logger {
void LogRuntime(Vector<const char> format, JSArray* args);
bool is_logging() {
- return logging_nesting_ > 0;
+ return is_logging_;
}
bool is_logging_code_events() {
return is_logging() || jit_logger_ != NULL;
}
- // Pause/Resume collection of profiling data.
- // When data collection is paused, CPU Tick events are discarded until
- // data collection is Resumed.
- void PauseProfiler();
- void ResumeProfiler();
- bool IsProfilerPaused();
+ // Stop collection of profiling data.
+ // When data collection is paused, CPU Tick events are discarded.
+ void StopProfiler();
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<Code> code);
@@ -434,13 +432,9 @@ class Logger {
friend class TimeLog;
friend class Profiler;
template <StateTag Tag> friend class VMState;
-
friend class LoggerTestHelper;
-
- int logging_nesting_;
- int cpu_profiler_nesting_;
-
+ bool is_logging_;
Log* log_;
LowLevelLogger* ll_logger_;
JitLogger* jit_logger_;
@@ -450,7 +444,7 @@ class Logger {
// 'true' between SetUp() and TearDown().
bool is_initialized_;
- int64_t epoch_;
+ ElapsedTimer timer_;
friend class CpuProfiler;
};
diff --git a/chromium/v8/src/macros.py b/chromium/v8/src/macros.py
index d50231dcefc..d699c146211 100644
--- a/chromium/v8/src/macros.py
+++ b/chromium/v8/src/macros.py
@@ -42,8 +42,8 @@ const SETTER = 1;
# These definitions must match the index of the properties in objects.h.
const kApiTagOffset = 0;
const kApiPropertyListOffset = 1;
-const kApiSerialNumberOffset = 2;
-const kApiConstructorOffset = 2;
+const kApiSerialNumberOffset = 3;
+const kApiConstructorOffset = 3;
const kApiPrototypeTemplateOffset = 5;
const kApiParentTemplateOffset = 6;
const kApiFlagOffset = 14;
@@ -67,7 +67,9 @@ const msPerMonth = 2592000000;
# For apinatives.js
const kUninitialized = -1;
-const kReadOnlyPrototypeBit = 3; # For FunctionTemplateInfo, matches objects.h
+const kReadOnlyPrototypeBit = 3;
+const kRemovePrototypeBit = 4; # For FunctionTemplateInfo, matches objects.h
+const kDoNotCacheBit = 5; # For FunctionTemplateInfo, matches objects.h
# Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1).
const kInvalidDate = 'Invalid Date';
diff --git a/chromium/v8/src/mark-compact-inl.h b/chromium/v8/src/mark-compact-inl.h
index 10773e7202a..321309c60e2 100644
--- a/chromium/v8/src/mark-compact-inl.h
+++ b/chromium/v8/src/mark-compact-inl.h
@@ -58,7 +58,7 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
mark_bit.Set();
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
ASSERT(IsMarked(obj));
- ASSERT(HEAP->Contains(obj));
+ ASSERT(obj->GetIsolate()->heap()->Contains(obj));
marking_deque_.PushBlack(obj);
}
}
diff --git a/chromium/v8/src/mark-compact.cc b/chromium/v8/src/mark-compact.cc
index 91da8a0117a..263de4878fc 100644
--- a/chromium/v8/src/mark-compact.cc
+++ b/chromium/v8/src/mark-compact.cc
@@ -74,16 +74,18 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
heap_(NULL),
code_flusher_(NULL),
encountered_weak_collections_(NULL),
- code_to_deoptimize_(NULL) { }
+ have_code_to_deoptimize_(false) { }
#ifdef VERIFY_HEAP
class VerifyMarkingVisitor: public ObjectVisitor {
public:
+ explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
+
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
- CHECK(HEAP->mark_compact_collector()->IsMarked(object));
+ CHECK(heap_->mark_compact_collector()->IsMarked(object));
}
}
}
@@ -97,11 +99,14 @@ class VerifyMarkingVisitor: public ObjectVisitor {
VisitPointer(rinfo->target_object_address());
}
}
+
+ private:
+ Heap* heap_;
};
-static void VerifyMarking(Address bottom, Address top) {
- VerifyMarkingVisitor visitor;
+static void VerifyMarking(Heap* heap, Address bottom, Address top) {
+ VerifyMarkingVisitor visitor(heap);
HeapObject* object;
Address next_object_must_be_here_or_later = bottom;
@@ -129,7 +134,7 @@ static void VerifyMarking(NewSpace* space) {
NewSpacePage* page = it.next();
Address limit = it.has_next() ? page->area_end() : end;
CHECK(limit == end || !page->Contains(end));
- VerifyMarking(page->area_start(), limit);
+ VerifyMarking(space->heap(), page->area_start(), limit);
}
}
@@ -139,7 +144,7 @@ static void VerifyMarking(PagedSpace* space) {
while (it.has_next()) {
Page* p = it.next();
- VerifyMarking(p->area_start(), p->area_end());
+ VerifyMarking(space->heap(), p->area_start(), p->area_end());
}
}
@@ -153,7 +158,7 @@ static void VerifyMarking(Heap* heap) {
VerifyMarking(heap->map_space());
VerifyMarking(heap->new_space());
- VerifyMarkingVisitor visitor;
+ VerifyMarkingVisitor visitor(heap);
LargeObjectIterator it(heap->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
@@ -431,8 +436,8 @@ void MarkCompactCollector::CollectGarbage() {
heap()->weak_embedded_maps_verification_enabled()) {
VerifyWeakEmbeddedMapsInOptimizedCode();
}
- if (FLAG_collect_maps && FLAG_omit_prototype_checks_for_leaf_maps) {
- VerifyOmittedPrototypeChecks();
+ if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
+ VerifyOmittedMapChecks();
}
#endif
@@ -503,13 +508,13 @@ void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() {
}
-void MarkCompactCollector::VerifyOmittedPrototypeChecks() {
+void MarkCompactCollector::VerifyOmittedMapChecks() {
HeapObjectIterator iterator(heap()->map_space());
for (HeapObject* obj = iterator.Next();
obj != NULL;
obj = iterator.Next()) {
Map* map = Map::cast(obj);
- map->VerifyOmittedPrototypeChecks();
+ map->VerifyOmittedMapChecks();
}
}
#endif // VERIFY_HEAP
@@ -961,22 +966,10 @@ void MarkCompactCollector::Finish() {
// objects (empty string, illegal builtin).
isolate()->stub_cache()->Clear();
- if (code_to_deoptimize_ != Smi::FromInt(0)) {
- // Convert the linked list of Code objects into a ZoneList.
- Zone zone(isolate());
- ZoneList<Code*> codes(4, &zone);
-
- Object *list = code_to_deoptimize_;
- while (list->IsCode()) {
- Code *code = Code::cast(list);
- list = code->code_to_deoptimize_link();
- codes.Add(code, &zone);
- // Destroy the link and don't ever try to deoptimize this code again.
- code->set_code_to_deoptimize_link(Smi::FromInt(0));
- }
- code_to_deoptimize_ = Smi::FromInt(0);
-
- Deoptimizer::DeoptimizeCodeList(isolate(), &codes);
+ if (have_code_to_deoptimize_) {
+ // Some code objects were marked for deoptimization during the GC.
+ Deoptimizer::DeoptimizeMarkedCode(isolate());
+ have_code_to_deoptimize_ = false;
}
}
@@ -1420,8 +1413,8 @@ class MarkCompactMarkingVisitor
INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
HeapObject* obj)) {
#ifdef DEBUG
- ASSERT(Isolate::Current()->heap()->Contains(obj));
- ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
+ ASSERT(collector->heap()->Contains(obj));
+ ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj));
#endif
Map* map = obj->map();
Heap* heap = obj->GetHeap();
@@ -1795,8 +1788,6 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
void MarkCompactCollector::PrepareForCodeFlushing() {
- ASSERT(heap() == Isolate::Current()->heap());
-
// Enable code flushing for non-incremental cycles.
if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
EnableCodeFlushing(!was_marked_incrementally_);
@@ -2590,7 +2581,7 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
for (int i = new_number_of_transitions * step;
i < number_of_transitions * step;
i++) {
- prototype_transitions->set_undefined(heap_, header + i);
+ prototype_transitions->set_undefined(header + i);
}
}
@@ -2623,16 +2614,9 @@ void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
ASSERT(entries->is_code_at(i));
Code* code = entries->code_at(i);
- if (IsMarked(code) && !WillBeDeoptimized(code)) {
- // Insert the code into the code_to_deoptimize linked list.
- Object* next = code_to_deoptimize_;
- if (next != Smi::FromInt(0)) {
- // Record the slot so that it is updated.
- Object** slot = code->code_to_deoptimize_link_slot();
- RecordSlot(slot, slot, next);
- }
- code->set_code_to_deoptimize_link(next);
- code_to_deoptimize_ = code;
+ if (IsMarked(code) && !code->marked_for_deoptimization()) {
+ code->set_marked_for_deoptimization(true);
+ have_code_to_deoptimize_ = true;
}
entries->clear_at(i);
}
@@ -2743,12 +2727,10 @@ void MarkCompactCollector::MigrateObject(Address dst,
int size,
AllocationSpace dest) {
HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
- // TODO(hpayer): Replace that check with an assert.
+ // TODO(hpayer): Replace these checks with asserts.
+ CHECK(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
CHECK(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
- // TODO(hpayer): Replace this check with an assert.
- HeapObject* heap_object = HeapObject::FromAddress(src);
- CHECK(heap_->TargetSpace(heap_object) == heap_->old_pointer_space());
Address src_slot = src;
Address dst_slot = dst;
ASSERT(IsAligned(size, kPointerSize));
@@ -2794,13 +2776,6 @@ void MarkCompactCollector::MigrateObject(Address dst,
Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
} else {
ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
- // Objects in old data space can just be moved by compaction to a different
- // page in old data space.
- // TODO(hpayer): Replace the following check with an assert.
- CHECK(!heap_->old_data_space()->Contains(src) ||
- (heap_->old_data_space()->Contains(dst) &&
- heap_->TargetSpace(HeapObject::FromAddress(src)) ==
- heap_->old_data_space()));
heap()->MoveBlock(dst, src, size);
}
Memory::Address_at(src) = dst;
@@ -3074,13 +3049,14 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
};
-static inline void UpdateSlot(ObjectVisitor* v,
+static inline void UpdateSlot(Isolate* isolate,
+ ObjectVisitor* v,
SlotsBuffer::SlotType slot_type,
Address addr) {
switch (slot_type) {
case SlotsBuffer::CODE_TARGET_SLOT: {
RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
- rinfo.Visit(v);
+ rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::CODE_ENTRY_SLOT: {
@@ -3094,17 +3070,17 @@ static inline void UpdateSlot(ObjectVisitor* v,
}
case SlotsBuffer::DEBUG_TARGET_SLOT: {
RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
- if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
+ if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::JS_RETURN_SLOT: {
RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
- if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
+ if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
- rinfo.Visit(v);
+ rinfo.Visit(isolate, v);
break;
}
default:
@@ -3292,11 +3268,7 @@ void MarkCompactCollector::InvalidateCode(Code* code) {
// Return true if the given code is deoptimized or will be deoptimized.
bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
- // We assume the code_to_deoptimize_link is initialized to undefined.
- // If it is 0, or refers to another Code object, then this code
- // is already linked, or was already linked into the list.
- return code->code_to_deoptimize_link() != heap()->undefined_value()
- || code->marked_for_deoptimization();
+ return code->marked_for_deoptimization();
}
@@ -3483,9 +3455,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
}
- // Update the heads of the native contexts list the code to deoptimize list.
+ // Update the head of the native contexts list in the heap.
updating_visitor.VisitPointer(heap_->native_contexts_list_address());
- updating_visitor.VisitPointer(&code_to_deoptimize_);
heap_->string_table()->Iterate(&updating_visitor);
@@ -4296,7 +4267,8 @@ void SlotsBuffer::UpdateSlots(Heap* heap) {
} else {
++slot_idx;
ASSERT(slot_idx < idx_);
- UpdateSlot(&v,
+ UpdateSlot(heap->isolate(),
+ &v,
DecodeSlotType(slot),
reinterpret_cast<Address>(slots_[slot_idx]));
}
@@ -4318,7 +4290,8 @@ void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
ASSERT(slot_idx < idx_);
Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
if (!IsOnInvalidatedCodeObject(pc)) {
- UpdateSlot(&v,
+ UpdateSlot(heap->isolate(),
+ &v,
DecodeSlotType(slot),
reinterpret_cast<Address>(slots_[slot_idx]));
}
diff --git a/chromium/v8/src/mark-compact.h b/chromium/v8/src/mark-compact.h
index 16e49e10295..df2f7821139 100644
--- a/chromium/v8/src/mark-compact.h
+++ b/chromium/v8/src/mark-compact.h
@@ -638,7 +638,7 @@ class MarkCompactCollector {
static void VerifyMarkbitsAreClean(PagedSpace* space);
static void VerifyMarkbitsAreClean(NewSpace* space);
void VerifyWeakEmbeddedMapsInOptimizedCode();
- void VerifyOmittedPrototypeChecks();
+ void VerifyOmittedMapChecks();
#endif
// Sweep a single page from the given space conservatively.
@@ -949,7 +949,7 @@ class MarkCompactCollector {
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
Object* encountered_weak_collections_;
- Object* code_to_deoptimize_;
+ bool have_code_to_deoptimize_;
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;
diff --git a/chromium/v8/src/marking-thread.cc b/chromium/v8/src/marking-thread.cc
index ac9f944fe7e..58bca3662dd 100644
--- a/chromium/v8/src/marking-thread.cc
+++ b/chromium/v8/src/marking-thread.cc
@@ -39,9 +39,9 @@ MarkingThread::MarkingThread(Isolate* isolate)
: Thread("MarkingThread"),
isolate_(isolate),
heap_(isolate->heap()),
- start_marking_semaphore_(OS::CreateSemaphore(0)),
- end_marking_semaphore_(OS::CreateSemaphore(0)),
- stop_semaphore_(OS::CreateSemaphore(0)) {
+ start_marking_semaphore_(0),
+ end_marking_semaphore_(0),
+ stop_semaphore_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
id_ = NoBarrier_AtomicIncrement(&id_counter_, 1);
}
@@ -57,33 +57,33 @@ void MarkingThread::Run() {
DisallowHandleDereference no_deref;
while (true) {
- start_marking_semaphore_->Wait();
+ start_marking_semaphore_.Wait();
if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_->Signal();
+ stop_semaphore_.Signal();
return;
}
- end_marking_semaphore_->Signal();
+ end_marking_semaphore_.Signal();
}
}
void MarkingThread::Stop() {
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
- start_marking_semaphore_->Signal();
- stop_semaphore_->Wait();
+ start_marking_semaphore_.Signal();
+ stop_semaphore_.Wait();
Join();
}
void MarkingThread::StartMarking() {
- start_marking_semaphore_->Signal();
+ start_marking_semaphore_.Signal();
}
void MarkingThread::WaitForMarkingThread() {
- end_marking_semaphore_->Wait();
+ end_marking_semaphore_.Wait();
}
} } // namespace v8::internal
diff --git a/chromium/v8/src/marking-thread.h b/chromium/v8/src/marking-thread.h
index 9efa3af1326..021cd5b48c7 100644
--- a/chromium/v8/src/marking-thread.h
+++ b/chromium/v8/src/marking-thread.h
@@ -43,24 +43,19 @@ namespace internal {
class MarkingThread : public Thread {
public:
explicit MarkingThread(Isolate* isolate);
+ ~MarkingThread() {}
void Run();
void Stop();
void StartMarking();
void WaitForMarkingThread();
- ~MarkingThread() {
- delete start_marking_semaphore_;
- delete end_marking_semaphore_;
- delete stop_semaphore_;
- }
-
private:
Isolate* isolate_;
Heap* heap_;
- Semaphore* start_marking_semaphore_;
- Semaphore* end_marking_semaphore_;
- Semaphore* stop_semaphore_;
+ Semaphore start_marking_semaphore_;
+ Semaphore end_marking_semaphore_;
+ Semaphore stop_semaphore_;
volatile AtomicWord stop_thread_;
int id_;
static Atomic32 id_counter_;
diff --git a/chromium/v8/src/messages.js b/chromium/v8/src/messages.js
index b586d24882b..2debbf86540 100644
--- a/chromium/v8/src/messages.js
+++ b/chromium/v8/src/messages.js
@@ -228,16 +228,18 @@ function NoSideEffectToString(obj) {
}
}
}
- if (IsNativeErrorObject(obj)) return %_CallFunction(obj, ErrorToString);
+ if (CanBeSafelyTreatedAsAnErrorObject(obj)) {
+ return %_CallFunction(obj, ErrorToString);
+ }
return %_CallFunction(obj, ObjectToString);
}
-
-// To check if something is a native error we need to check the
-// concrete native error types. It is not sufficient to use instanceof
-// since it possible to create an object that has Error.prototype on
-// its prototype chain. This is the case for DOMException for example.
-function IsNativeErrorObject(obj) {
+// To determine whether we can safely stringify an object using ErrorToString
+// without the risk of side-effects, we need to check whether the object is
+// either an instance of a native error type (via '%_ClassOf'), or has $Error
+// in its prototype chain and hasn't overwritten 'toString' with something
+// strange and unusual.
+function CanBeSafelyTreatedAsAnErrorObject(obj) {
switch (%_ClassOf(obj)) {
case 'Error':
case 'EvalError':
@@ -248,7 +250,9 @@ function IsNativeErrorObject(obj) {
case 'URIError':
return true;
}
- return false;
+
+ var objToString = %GetDataProperty(obj, "toString");
+ return obj instanceof $Error && objToString === ErrorToString;
}
@@ -257,7 +261,7 @@ function IsNativeErrorObject(obj) {
// the error to string method. This is to avoid leaking error
// objects between script tags in a browser setting.
function ToStringCheckErrorObject(obj) {
- if (IsNativeErrorObject(obj)) {
+ if (CanBeSafelyTreatedAsAnErrorObject(obj)) {
return %_CallFunction(obj, ErrorToString);
} else {
return ToString(obj);
diff --git a/chromium/v8/src/mips/assembler-mips-inl.h b/chromium/v8/src/mips/assembler-mips-inl.h
index 2ca00831cfb..2fa6804d198 100644
--- a/chromium/v8/src/mips/assembler-mips-inl.h
+++ b/chromium/v8/src/mips/assembler-mips-inl.h
@@ -338,7 +338,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
@@ -351,12 +351,11 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
diff --git a/chromium/v8/src/mips/assembler-mips.cc b/chromium/v8/src/mips/assembler-mips.cc
index fcf49f110d1..345b642454b 100644
--- a/chromium/v8/src/mips/assembler-mips.cc
+++ b/chromium/v8/src/mips/assembler-mips.cc
@@ -126,7 +126,8 @@ void CpuFeatures::Probe() {
supported_ |= static_cast<uint64_t>(1) << FPU;
#else
// Probe for additional features not already known to be available.
- if (OS::MipsCpuHasFeature(FPU)) {
+ CPU cpu;
+ if (cpu.has_fpu()) {
// This implementation also sets the FPU flags if
// runtime detection of FPU returns true.
supported_ |= static_cast<uint64_t>(1) << FPU;
@@ -237,15 +238,12 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// See assembler-mips-inl.h for inlined constructors.
Operand::Operand(Handle<Object> handle) {
-#ifdef DEBUG
- Isolate* isolate = Isolate::Current();
-#endif
AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!isolate->heap()->InNewSpace(obj));
if (obj->IsHeapObject()) {
+ ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@@ -2203,8 +2201,7 @@ void Assembler::set_target_address_at(Address pc, Address target) {
Instr instr3 = instr_at(pc + 2 * kInstrSize);
uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(itarget) >>
- (kImm26Bits + kImmFieldShift)) == 0;
+ bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
uint32_t target_field =
static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
bool patched_jump = false;
diff --git a/chromium/v8/src/mips/assembler-mips.h b/chromium/v8/src/mips/assembler-mips.h
index 8d533b36f40..cb0896a8ded 100644
--- a/chromium/v8/src/mips/assembler-mips.h
+++ b/chromium/v8/src/mips/assembler-mips.h
@@ -358,6 +358,11 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
+ inline int32_t immediate() const {
+ ASSERT(!is_reg());
+ return imm32_;
+ }
+
Register rm() const { return rm_; }
private:
diff --git a/chromium/v8/src/mips/builtins-mips.cc b/chromium/v8/src/mips/builtins-mips.cc
index 3f5dca00096..3aabd97b972 100644
--- a/chromium/v8/src/mips/builtins-mips.cc
+++ b/chromium/v8/src/mips/builtins-mips.cc
@@ -123,10 +123,10 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ And(t0, a2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for InternalArray function",
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction,
t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
- __ Assert(eq, "Unexpected initial map for InternalArray function",
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction,
t0, Operand(MAP_TYPE));
}
@@ -153,10 +153,10 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ And(t0, a2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function (1)",
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction1,
t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
- __ Assert(eq, "Unexpected initial map for Array function (2)",
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction2,
t0, Operand(MAP_TYPE));
}
@@ -185,7 +185,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
Register function = a1;
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
- __ Assert(eq, "Unexpected String function", function, Operand(a2));
+ __ Assert(eq, kUnexpectedStringFunction, function, Operand(a2));
}
// Load the first arguments in a0 and get rid of the rest.
@@ -231,10 +231,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ LoadGlobalFunctionInitialMap(function, map, t0);
if (FLAG_debug_code) {
__ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ Assert(eq, "Unexpected string wrapper instance size",
+ __ Assert(eq, kUnexpectedStringWrapperInstanceSize,
t0, Operand(JSValue::kSize >> kPointerSizeLog2));
__ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ Assert(eq, "Unexpected unused properties of string wrapper",
+ __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper,
t0, Operand(zero_reg));
}
__ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset));
@@ -299,6 +299,24 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+ // Function is also the parameter to the runtime call.
+ __ push(a1);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore receiver.
+ __ pop(a1);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
@@ -308,59 +326,27 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
- // Calculate the entry point.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
-
- // Tear down temporary frame.
- }
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(t0));
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
+ __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
- // Do a tail-call of the compiled function.
- __ Jump(t9);
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
}
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
-
- __ push(a1); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(t1);
- // Restore receiver.
- __ pop(a1);
-
- // Tear down internal frame.
- }
-
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -489,7 +475,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ addu(a0, t5, t0);
// a0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ Assert(le, "Unexpected number of pre-allocated property fields.",
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
a0, Operand(t6));
}
__ InitializeFieldsWithFiller(t5, a0, t7);
@@ -522,7 +508,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Done if no extra properties are to be allocated.
__ Branch(&allocated, eq, a3, Operand(zero_reg));
- __ Assert(greater_equal, "Property allocation count failed.",
+ __ Assert(greater_equal, kPropertyAllocationCountFailed,
a3, Operand(zero_reg));
// Scale the number of elements by pointer size and add the header for
@@ -569,7 +555,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
} else if (FLAG_debug_code) {
__ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+ __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t8));
}
__ jmp(&entry);
__ bind(&loop);
@@ -815,60 +801,17 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- // Call the runtime function.
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
-
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
-
- // Tear down temporary frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
// Do a tail-call of the compiled function.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(t9);
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
-
- // Tear down temporary frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(t9);
}
@@ -1000,27 +943,44 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Lookup the function in the JavaScript frame and push it as an
- // argument to the on-stack replacement function.
+ // Lookup the function in the JavaScript frame.
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Lookup and calculate pc offset.
+ __ lw(a1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ lw(a2, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Subu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Subu(a1, a1, a2);
+ __ SmiTag(a1);
+
+ // Pass both function and pc offset as arguments.
__ push(a0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ push(a1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- __ Ret(eq, v0, Operand(Smi::FromInt(-1)));
+ // If the code object is null, just return to the unoptimized code.
+ __ Ret(eq, v0, Operand(Smi::FromInt(0)));
- // Untag the AST id and push it on the stack.
- __ SmiUntag(v0);
- __ push(v0);
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ __ SmiUntag(a1);
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ addu(v0, v0, a1);
+ __ addiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
}
diff --git a/chromium/v8/src/mips/code-stubs-mips.cc b/chromium/v8/src/mips/code-stubs-mips.cc
index 0e1b224eadf..0589bf01624 100644
--- a/chromium/v8/src/mips/code-stubs-mips.cc
+++ b/chromium/v8/src/mips/code-stubs-mips.cc
@@ -39,6 +39,17 @@ namespace v8 {
namespace internal {
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -247,17 +258,6 @@ void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
-void UnaryOpStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { a0 };
- descriptor->register_param_count_ = 1;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(UnaryOpIC_Miss);
-}
-
-
void StoreGlobalStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -321,134 +321,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
-
- // Pop the function info from the stack.
- __ pop(a3);
-
- // Attempt to allocate new JSFunction in new space.
- __ Allocate(JSFunction::kSize, v0, a1, a2, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
-
- int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
- __ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
- __ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset));
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
- __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
- __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
- __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
- __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ lw(a1,
- FieldMemOperand(a3, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ And(at, a1, a1);
- __ Branch(&check_optimized, ne, at, Operand(zero_reg));
- }
- __ bind(&install_unoptimized);
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
- __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
- __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Return result. The argument function info has been popped already.
- __ Ret(USE_DELAY_SLOT);
- __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
-
- // a2 holds native context, a1 points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into t0.
- __ lw(t0, FieldMemOperand(a1, SharedFunctionInfo::kFirstCodeSlot));
- __ lw(t1, FieldMemOperand(a1, SharedFunctionInfo::kFirstContextSlot));
- __ Branch(&install_optimized, eq, a2, Operand(t1));
-
- // Iterate through the rest of map backwards. t0 holds an index as a Smi.
- Label loop;
- __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
- __ Branch(&install_unoptimized, eq, t0,
- Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
- __ Subu(t0, t0, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, t1, Operand(at));
- __ lw(t1, MemOperand(t1));
- __ Branch(&loop, ne, a2, Operand(t1));
- // Hit: fetch the optimized code.
- __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, t1, Operand(at));
- __ Addu(t1, t1, Operand(kPointerSize));
- __ lw(t0, MemOperand(t1));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(),
- 1, t2, t3);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sw(t0, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
-
- // Now link a function into a list of optimized functions.
- __ lw(t0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ sw(v0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(t0, v0);
- __ RecordWriteContextSlot(
- a2,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- t0,
- a1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ LoadRoot(t0, Heap::kFalseValueRootIndex);
- __ Push(cp, a3, t0);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
@@ -520,8 +392,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
Label after_sentinel;
__ JumpIfNotSmi(a3, &after_sentinel);
if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ Assert(eq, message, a3, Operand(zero_reg));
+ __ Assert(eq, kExpected0AsASmiSentinel, a3, Operand(zero_reg));
}
__ lw(a3, GlobalObjectOperand());
__ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
@@ -650,291 +521,135 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
}
-void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register scratch1,
- Register scratch2) {
- __ sra(scratch1, a0, kSmiTagSize);
- __ mtc1(scratch1, f14);
- __ cvt_d_w(f14, f14);
- __ sra(scratch1, a1, kSmiTagSize);
- __ mtc1(scratch1, f12);
- __ cvt_d_w(f12, f12);
- if (destination == kCoreRegisters) {
- __ Move(a2, a3, f14);
- __ Move(a0, a1, f12);
- }
-}
-
-
-void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
-
- Label is_smi, done;
-
- // Smi-check
- __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
- // Heap number check
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
-
- // Handle loading a double from a heap number.
- if (destination == kFPURegisters) {
- // Load the double from tagged HeapNumber to double register.
-
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
- // point in generating even more instructions.
- __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
- } else {
- ASSERT(destination == kCoreRegisters);
- // Load the double from heap number to dst1 and dst2 in double format.
- __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ lw(dst2, FieldMemOperand(object,
- HeapNumber::kValueOffset + kPointerSize));
- }
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Label out_of_range, only_low, negate, done;
+ Register input_reg = source();
+ Register result_reg = destination();
+
+ int double_offset = offset();
+ // Account for saved regs if input is sp.
+ if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
+
+ Register scratch =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg);
+ Register scratch2 =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+ Register scratch3 =
+ GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
+ DoubleRegister double_scratch = kLithiumScratchDouble.low();
+ DoubleRegister double_input = f12;
+
+ __ Push(scratch, scratch2, scratch3);
+
+ __ ldc1(double_input, MemOperand(input_reg, double_offset));
+
+ if (!skip_fastpath()) {
+ // Clear cumulative exception flags and save the FCSR.
+ __ cfc1(scratch2, FCSR);
+ __ ctc1(zero_reg, FCSR);
+ // Try a conversion to a signed integer.
+ __ trunc_w_d(double_scratch, double_input);
+ __ mfc1(result_reg, double_scratch);
+ // Retrieve and restore the FCSR.
+ __ cfc1(scratch, FCSR);
+ __ ctc1(scratch2, FCSR);
+ // Check for overflow and NaNs.
+ __ And(
+ scratch, scratch,
+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
+ | kFCSRInvalidOpFlagMask);
+ // If we had no exceptions we are done.
+ __ Branch(&done, eq, scratch, Operand(zero_reg));
+ }
+
+ // Load the double value and perform a manual truncation.
+ Register input_high = scratch2;
+ Register input_low = scratch3;
+ __ Move(input_low, input_high, double_input);
+
+ Label normal_exponent, restore_sign;
+ // Extract the biased exponent in result.
+ __ Ext(result_reg,
+ input_high,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Check for Infinity and NaNs, which should return 0.
+ __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
+ __ Movz(result_reg, zero_reg, scratch);
+ __ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ __ Subu(result_reg,
+ result_reg,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
+ __ mov(result_reg, zero_reg);
__ Branch(&done);
- // Handle loading a double from a smi.
- __ bind(&is_smi);
- // Convert smi to double using FPU instructions.
- __ mtc1(scratch1, dst);
- __ cvt_d_w(dst, dst);
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ Move(dst1, dst2, dst);
- }
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- Label done;
- Label not_in_int32_range;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
- __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
- __ ConvertToInt32(object,
- dst,
- scratch1,
- scratch2,
- double_scratch,
- &not_in_int32_range);
- __ jmp(&done);
-
- __ bind(&not_in_int32_range);
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- __ EmitOutOfInt32RangeTruncate(dst,
- scratch1,
- scratch2,
- scratch3);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- FPURegister double_dst,
- Register dst_mantissa,
- Register dst_exponent,
- Register scratch2,
- FPURegister single_scratch) {
- ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst_mantissa));
- ASSERT(!int_scratch.is(dst_exponent));
-
- __ mtc1(int_scratch, single_scratch);
- __ cvt_d_w(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
- }
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DoubleRegister double_dst,
- DoubleRegister double_scratch,
- Register dst_mantissa,
- Register dst_exponent,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister single_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
-
- Label done, obj_is_not_smi;
-
- __ JumpIfNotSmi(object, &obj_is_not_smi);
- __ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
- dst_exponent, scratch2, single_scratch);
- __ Branch(&done);
+ __ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+ // Save the sign.
+ Register sign = result_reg;
+ result_reg = no_reg;
+ __ And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
+ // to check for this specific case.
+ Label high_shift_needed, high_shift_done;
+ __ Branch(&high_shift_needed, lt, scratch, Operand(32));
+ __ mov(input_high, zero_reg);
+ __ Branch(&high_shift_done);
+ __ bind(&high_shift_needed);
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ __ Or(input_high,
+ input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ __ sllv(input_high, input_high, scratch);
+
+ __ bind(&high_shift_done);
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ __ li(at, 32);
+ __ subu(scratch, at, scratch);
+ __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+ // Negate scratch.
+ __ Subu(scratch, zero_reg, scratch);
+ __ sllv(input_low, input_low, scratch);
+ __ Branch(&shift_done);
- __ bind(&obj_is_not_smi);
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Load the number.
- // Load the double value.
- __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- scratch1,
- double_dst,
- at,
- double_scratch,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
- if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
- }
- __ bind(&done);
-}
+ __ bind(&pos_shift);
+ __ srlv(input_low, input_low, scratch);
-
-void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DoubleRegister double_scratch0,
- DoubleRegister double_scratch1,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
- ASSERT(!scratch1.is(scratch2) &&
- !scratch1.is(scratch3) &&
- !scratch2.is(scratch3));
-
- Label done, maybe_undefined;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
-
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
-
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- // Load the double value.
- __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- dst,
- double_scratch0,
- scratch1,
- double_scratch1,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
- __ Branch(&done);
-
- __ bind(&maybe_undefined);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(not_int32, ne, object, Operand(at));
- // |undefined| is truncated to 0.
- __ li(dst, Operand(Smi::FromInt(0)));
- // Fall through.
+ __ bind(&shift_done);
+ __ Or(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ __ mov(scratch, sign);
+ result_reg = sign;
+ sign = no_reg;
+ __ Subu(result_reg, zero_reg, input_high);
+ __ Movz(result_reg, input_high, scratch);
__ bind(&done);
-}
-
-void FloatingPointHelper::CallCCodeForDoubleOperation(
- MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Using core registers:
- // a0: Left value (least significant part of mantissa).
- // a1: Left value (sign, exponent, top of mantissa).
- // a2: Right value (least significant part of mantissa).
- // a3: Right value (sign, exponent, top of mantissa).
-
- // Assert that heap_number_result is saved.
- // We currently always use s0 to pass it.
- ASSERT(heap_number_result.is(s0));
-
- // Push the current return address before the C call.
- __ push(ra);
- __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
- if (!IsMipsSoftFloatABI) {
- // We are not using MIPS FPU instructions, and parameters for the runtime
- // function call are prepaired in a0-a3 registers, but function we are
- // calling is compiled with hard-float flag and expecting hard float ABI
- // (parameters in f12/f14 registers). We need to copy parameters from
- // a0-a3 registers to f12/f14 register pairs.
- __ Move(f12, a0, a1);
- __ Move(f14, a2, a3);
- }
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number.
- if (!IsMipsSoftFloatABI) {
- // Double returned in register f0.
- __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- } else {
- // Double returned in registers v0 and v1.
- __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
- __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
- }
- // Place heap_number_result in v0 and return to the pushed return address.
- __ pop(ra);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
+ __ Pop(scratch, scratch2, scratch3);
+ __ Ret();
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
// These variants are compiled ahead of time. See next method.
if (the_int_.is(a1) &&
the_heap_number_.is(v0) &&
@@ -1601,6 +1316,42 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
+// Generates code to call a C function to do a double operation.
+// This code never falls through, but returns with a heap number containing
+// the result in v0.
+// Register heap_number_result must be a heap number in which the
+// result of the operation will be stored.
+// Requires the following layout on entry:
+// a0: Left value (least significant part of mantissa).
+// a1: Left value (sign, exponent, top of mantissa).
+// a2: Right value (least significant part of mantissa).
+// a3: Right value (sign, exponent, top of mantissa).
+static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch) {
+ // Assert that heap_number_result is saved.
+ // We currently always use s0 to pass it.
+ ASSERT(heap_number_result.is(s0));
+
+ // Push the current return address before the C call.
+ __ push(ra);
+ __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+ }
+ // Store answer in the overwritable heap number.
+ // Double returned in register f0.
+ __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ // Place heap_number_result in v0 and return to the pushed return address.
+ __ pop(ra);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, heap_number_result);
+}
+
+
void BinaryOpStub::Initialize() {
platform_specific_bit_ = true; // FPU is a base requirement for V8.
}
@@ -1805,7 +1556,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
Register right = a0;
Register scratch1 = t3;
Register scratch2 = t5;
- Register scratch3 = t0;
ASSERT(smi_operands || (not_numbers != NULL));
if (smi_operands) {
@@ -1828,49 +1578,41 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
case Token::MUL:
case Token::DIV:
case Token::MOD: {
- // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
- // depending on operation.
- FloatingPointHelper::Destination destination =
- op != Token::MOD ?
- FloatingPointHelper::kFPURegisters :
- FloatingPointHelper::kCoreRegisters;
-
// Allocate new heap number for result.
Register result = s0;
BinaryOpStub_GenerateHeapResultAllocation(
masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
- // Load the operands.
+ // Load left and right operands into f12 and f14.
if (smi_operands) {
- FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+ __ SmiUntag(scratch1, a0);
+ __ mtc1(scratch1, f14);
+ __ cvt_d_w(f14, f14);
+ __ SmiUntag(scratch1, a1);
+ __ mtc1(scratch1, f12);
+ __ cvt_d_w(f12, f12);
} else {
- // Load right operand to f14 or a2/a3.
+ // Load right operand to f14.
if (right_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, right, destination, f14, f16, a2, a3, heap_number_map,
- scratch1, scratch2, f2, miss);
+ __ LoadNumberAsInt32Double(
+ right, f14, heap_number_map, scratch1, scratch2, f2, miss);
} else {
Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, right, f14, a2, a3, heap_number_map,
- scratch1, scratch2, fail);
+ __ LoadNumber(right, f14, heap_number_map, scratch1, fail);
}
// Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
// jumps to |miss|.
if (left_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, left, destination, f12, f16, a0, a1, heap_number_map,
- scratch1, scratch2, f2, miss);
+ __ LoadNumberAsInt32Double(
+ left, f12, heap_number_map, scratch1, scratch2, f2, miss);
} else {
Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, left, f12, a0, a1, heap_number_map,
- scratch1, scratch2, fail);
+ __ LoadNumber(left, f12, heap_number_map, scratch1, fail);
}
}
// Calculate the result.
- if (destination == FloatingPointHelper::kFPURegisters) {
+ if (op != Token::MOD) {
// Using FPU registers:
// f12: Left value.
// f14: Right value.
@@ -1899,10 +1641,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ mov(v0, result);
} else {
// Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op,
- result,
- scratch1);
+ CallCCodeForDoubleOperation(masm, op, result, scratch1);
if (FLAG_debug_code) {
__ stop("Unreachable code.");
}
@@ -1920,24 +1659,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ SmiUntag(a2, right);
} else {
// Convert operands to 32-bit integers. Right in a2 and left in a3.
- FloatingPointHelper::ConvertNumberToInt32(masm,
- left,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- not_numbers);
- FloatingPointHelper::ConvertNumberToInt32(masm,
- right,
- a2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- not_numbers);
+ __ TruncateNumberToI(left, a3, heap_number_map, scratch1, not_numbers);
+ __ TruncateNumberToI(right, a2, heap_number_map, scratch1, not_numbers);
}
Label result_not_a_smi;
switch (op) {
@@ -2171,36 +1894,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers a0 and a1 (right
// and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination = (op_ != Token::MOD)
- ? FloatingPointHelper::kFPURegisters
- : FloatingPointHelper::kCoreRegisters;
-
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- f14,
- f16,
- a2,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- f2,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- f12,
- f16,
- t0,
- t1,
- heap_number_map,
- scratch1,
- scratch2,
- f2,
- &transition);
-
- if (destination == FloatingPointHelper::kFPURegisters) {
+
+ __ LoadNumberAsInt32Double(
+ right, f14, heap_number_map, scratch1, scratch2, f2, &transition);
+ __ LoadNumberAsInt32Double(
+ left, f12, heap_number_map, scratch1, scratch2, f2, &transition);
+
+ if (op_ != Token::MOD) {
Label return_heap_number;
switch (op_) {
case Token::ADD:
@@ -2277,10 +1977,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ BranchF(&transition, NULL, ne, f14, f16);
}
- // We preserved a0 and a1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(t1, t0);
-
Label pop_and_call_runtime;
// Allocate a heap number to store the result.
@@ -2293,12 +1989,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
&pop_and_call_runtime,
mode_);
- // Load the left value from the value saved on the stack.
- __ Pop(a1, a0);
-
// Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(
- masm, op_, heap_number_result, scratch1);
+ CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
if (FLAG_debug_code) {
__ stop("Unreachable code.");
}
@@ -2318,30 +2010,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::SHR:
case Token::SHL: {
Label return_heap_number;
- Register scratch3 = t1;
// Convert operands to 32-bit integers. Right in a2 and left in a3. The
// registers a0 and a1 (right and left) are preserved for the runtime
// call.
- FloatingPointHelper::LoadNumberAsInt32(masm,
- left,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- f2,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32(masm,
- right,
- a2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- f0,
- f2,
- &transition);
+ __ LoadNumberAsInt32(
+ left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition);
+ __ LoadNumberAsInt32(
+ right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition);
// The ECMA-262 standard specifies that, for shift operations, only the
// 5 least significant bits of the shift value should be used.
@@ -2828,16 +2503,6 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = a1;
const Register exponent = a2;
@@ -3058,8 +2723,8 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
+ return (!save_doubles_ || isolate->fp_stubs_generated()) &&
result_size_ == 1;
}
@@ -4279,12 +3944,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ And(t0, regexp_data, Operand(kSmiTagMask));
__ Check(nz,
- "Unexpected type for RegExp data, FixedArray expected",
+ kUnexpectedTypeForRegExpDataFixedArrayExpected,
t0,
Operand(zero_reg));
__ GetObjectType(regexp_data, a0, a0);
__ Check(eq,
- "Unexpected type for RegExp data, FixedArray expected",
+ kUnexpectedTypeForRegExpDataFixedArrayExpected,
a0,
Operand(FIXED_ARRAY_TYPE));
}
@@ -4639,7 +4304,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Sequential strings have already been ruled out.
__ And(at, a0, Operand(kIsIndirectStringMask));
__ Assert(eq,
- "external string expected, but not found",
+ kExternalStringExpectedButNotFound,
at,
Operand(zero_reg));
}
@@ -4820,12 +4485,14 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
1 << 5 | // a1
1 << 6; // a2
+ __ SmiTag(a0);
__ MultiPush(kSavedRegs);
CreateAllocationSiteStub create_stub;
__ CallStub(&create_stub);
__ MultiPop(kSavedRegs);
+ __ SmiUntag(a0);
}
__ Branch(&done);
@@ -5020,7 +4687,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
// Index is not a smi.
__ bind(&index_not_smi_);
@@ -5069,7 +4736,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
@@ -5106,7 +4773,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
@@ -5117,7 +4784,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ Branch(&exit_);
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
@@ -5172,7 +4839,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
// that it is.
__ And(scratch4, dest, Operand(kPointerAlignmentMask));
__ Check(eq,
- "Destination of copy not aligned.",
+ kDestinationOfCopyNotAligned,
scratch4,
Operand(zero_reg));
}
@@ -5372,7 +5039,7 @@ void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
// Must be the hole (deleted entry).
if (FLAG_debug_code) {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "oddball in string table is not undefined or the hole",
+ __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole,
scratch, Operand(candidate));
}
__ jmp(&next_probe[i]);
@@ -6194,7 +5861,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
NumberToStringStub::GenerateLookupNumberStringCache(masm,
@@ -6203,23 +5869,9 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch2,
scratch3,
scratch4,
- &not_cached);
+ slow);
__ mov(arg, scratch1);
__ sw(arg, MemOperand(sp, stack_offset));
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
- __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
- __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ And(scratch2, scratch2, scratch4);
- __ Branch(slow, ne, scratch2, Operand(scratch4));
- __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
- __ sw(arg, MemOperand(sp, stack_offset));
-
__ bind(&done);
}
@@ -6580,7 +6232,7 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
// filled with kZapValue by the GC.
// Dereference the address and check for this.
__ lw(t0, MemOperand(t9));
- __ Assert(ne, "Received invalid return address.", t0,
+ __ Assert(ne, kReceivedInvalidReturnAddress, t0,
Operand(reinterpret_cast<uint32_t>(kZapValue)));
}
__ Jump(t9);
@@ -6887,8 +6539,6 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
{ REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(t0), REG(a1), REG(a2), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
{ REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
@@ -6920,7 +6570,7 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7224,10 +6874,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3,
- // Overwrites all regs after this.
- t1, t2, t3, t5, a2,
- &slow_elements);
+ __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
}
@@ -7299,6 +6946,9 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
+ // It additionally takes an isolate as a third parameter.
+ __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
+
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ li(at, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
@@ -7318,87 +6968,125 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Branch(&next, ne, a3, Operand(kind));
- T stub(kind);
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ Branch(&next, ne, a3, Operand(kind));
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort("Unexpected ElementsKind in array constructor");
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
- // a2 - type info cell
- // a3 - kind
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // a2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// a0 - number of arguments
// a1 - constructor?
// sp[0] - last argument
- ASSERT(FAST_SMI_ELEMENTS == 0);
- ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ASSERT(FAST_ELEMENTS == 2);
- ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
- // is the low bit set? If so, we are holey and that is good.
Label normal_sequence;
- __ And(at, a3, Operand(1));
- __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ And(at, a3, Operand(1));
+ __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
+ }
// look at the first argument
__ lw(t1, MemOperand(sp, 0));
__ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
- __ Addu(a3, a3, Operand(1));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&normal_sequence, eq, a2, Operand(at));
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
- __ lw(t1, FieldMemOperand(t1, 0));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&normal_sequence, ne, t1, Operand(at));
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
- // Save the resulting elements kind in type info
- __ SmiTag(a3);
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
- __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
- __ SmiUntag(a3);
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
- __ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ Branch(&next, ne, a3, Operand(kind));
- ArraySingleArgumentConstructorStub stub(kind);
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the cell).
+ __ Addu(a3, a3, Operand(1));
+ __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+
+ if (FLAG_debug_code) {
+ __ lw(t1, FieldMemOperand(t1, 0));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Assert(eq, kExpectedAllocationSiteInCell, t1, Operand(at));
+ __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+ }
+
+ // Save the resulting elements kind in type info
+ __ SmiTag(a3);
+ __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
+ __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
+ __ SmiUntag(a3);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ Branch(&next, ne, a3, Operand(kind));
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort("Unexpected ElementsKind in array constructor");
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -7431,6 +7119,33 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
}
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ And(at, a0, a0);
+ __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ Branch(&not_one_case, gt, a0, Operand(1));
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc (only if argument_count_ == ANY)
@@ -7447,10 +7162,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ And(at, a3, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function",
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
- __ Assert(eq, "Unexpected initial map for Array function",
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
t0, Operand(MAP_TYPE));
// We should either have undefined in a2 or a valid cell.
@@ -7459,54 +7174,29 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&okay_here, eq, a2, Operand(at));
__ lw(a3, FieldMemOperand(a2, 0));
- __ Assert(eq, "Expected property cell in register a2",
+ __ Assert(eq, kExpectedPropertyCellInRegisterA2,
a3, Operand(cell_map));
__ bind(&okay_here);
}
- Label no_info, switch_ready;
+ Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
__ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
- // The type cell may have undefined in its value.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&no_info, eq, a3, Operand(at));
-
- // The type cell has either an AllocationSite or a JSFunction.
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
__ lw(t0, FieldMemOperand(a3, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&no_info, ne, t0, Operand(at));
__ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ li(a3, Operand(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
-
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ And(at, a0, a0);
- __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
- __ bind(&not_zero_case);
- __ Branch(&not_one_case, gt, a0, Operand(1));
- CreateArrayDispatchOneArgument(masm);
-
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
}
@@ -7559,10 +7249,10 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ And(at, a3, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function",
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
- __ Assert(eq, "Unexpected initial map for Array function",
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
t0, Operand(MAP_TYPE));
}
@@ -7579,7 +7269,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
Label done;
__ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
__ Assert(
- eq, "Invalid ElementsKind for InternalArray or InternalPackedArray",
+ eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
a3, Operand(FAST_HOLEY_ELEMENTS));
__ bind(&done);
}
diff --git a/chromium/v8/src/mips/code-stubs-mips.h b/chromium/v8/src/mips/code-stubs-mips.h
index 1ae1d3454fb..8c9d22ae5dd 100644
--- a/chromium/v8/src/mips/code-stubs-mips.h
+++ b/chromium/v8/src/mips/code-stubs-mips.h
@@ -69,7 +69,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -240,7 +240,7 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
ASSERT(SignRegisterBits::is_valid(sign_.code()));
}
- bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
@@ -316,7 +316,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -391,7 +391,7 @@ class RecordWriteStub: public PlatformCodeStub {
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+ scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
@@ -434,19 +434,6 @@ class RecordWriteStub: public PlatformCodeStub {
Register scratch0_;
Register scratch1_;
- Register GetRegThatIsNotOneOf(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
friend class RecordWriteStub;
};
@@ -527,119 +514,6 @@ class DirectCEntryStub: public PlatformCodeStub {
bool NeedsImmovableCode() { return true; }
};
-class FloatingPointHelper : public AllStatic {
- public:
- enum Destination {
- kFPURegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from a0 and a1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
- // is floating point registers FPU must be supported. If core registers are
- // requested when FPU is supported f12 and f14 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Convert the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- static void ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch,
- Label* not_int32);
-
- // Converts the integer (untagged smi) in |int_scratch| to a double, storing
- // the result either in |double_dst| or |dst2:dst1|, depending on
- // |destination|.
- // Warning: The value in |int_scratch| will be changed in the process!
- static void ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- FPURegister double_dst,
- Register dst1,
- Register dst2,
- Register scratch2,
- FPURegister single_scratch);
-
- // Load the number from object into double_dst in the double format.
- // Control will jump to not_int32 if the value cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be loaded.
- static void LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- FPURegister double_dst,
- FPURegister double_scratch,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister single_scratch,
- Label* not_int32);
-
- // Loads the number from object into dst as a 32-bit integer.
- // Control will jump to not_int32 if the object cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be converted.
- // scratch3 is not used when FPU is supported.
- static void LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- FPURegister double_scratch0,
- FPURegister double_scratch1,
- Label* not_int32);
-
- // Generates code to call a C function to do a double operation using core
- // registers. (Used when FPU is not supported.)
- // This code never falls through, but returns with a heap number containing
- // the result in v0.
- // Register heapnumber_result must be a heap number in which the
- // result of the operation will be stored.
- // Requires the following layout on entry:
- // a0: Left value (least significant part of mantissa).
- // a1: Left value (sign, exponent, top of mantissa).
- // a2: Right value (least significant part of mantissa).
- // a3: Right value (sign, exponent, top of mantissa).
- static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch);
-
- // Loads the objects from |object| into floating point registers.
- // Depending on |destination| the value ends up either in |dst| or
- // in |dst1|/|dst2|. If |destination| is kFPURegisters, then FPU
- // must be supported. If kCoreRegisters are requested and FPU is
- // supported, |dst| will be scratched. If |object| is neither smi nor
- // heap number, |not_number| is jumped to with |object| still intact.
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
diff --git a/chromium/v8/src/mips/codegen-mips.cc b/chromium/v8/src/mips/codegen-mips.cc
index 3f74154f58a..5c847fc8f62 100644
--- a/chromium/v8/src/mips/codegen-mips.cc
+++ b/chromium/v8/src/mips/codegen-mips.cc
@@ -205,7 +205,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new FixedDoubleArray.
__ sll(scratch, t1, 2);
__ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
- __ Allocate(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
+ __ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT);
// t2: destination FixedDoubleArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
@@ -289,7 +289,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ SmiTag(t5);
__ Or(t5, t5, Operand(1));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "object found in smi-only array", at, Operand(t5));
+ __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
}
__ sw(t0, MemOperand(t3)); // mantissa
__ sw(t1, MemOperand(t3, kIntSize)); // exponent
@@ -489,7 +489,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ And(at, result, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found",
+ __ Assert(eq, kExternalStringExpectedButNotFound,
at, Operand(zero_reg));
}
// Rule out short external strings.
diff --git a/chromium/v8/src/mips/codegen-mips.h b/chromium/v8/src/mips/codegen-mips.h
index 240b02ce44b..32d7d0d65c7 100644
--- a/chromium/v8/src/mips/codegen-mips.h
+++ b/chromium/v8/src/mips/codegen-mips.h
@@ -46,8 +46,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
- CodeGenerator() {
- InitializeAstVisitor();
+ explicit CodeGenerator(Isolate* isolate) {
+ InitializeAstVisitor(isolate);
}
static bool MakeCode(CompilationInfo* info);
@@ -63,7 +63,7 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
diff --git a/chromium/v8/src/mips/cpu-mips.cc b/chromium/v8/src/mips/cpu-mips.cc
index d13b23330fe..49d0b377ebc 100644
--- a/chromium/v8/src/mips/cpu-mips.cc
+++ b/chromium/v8/src/mips/cpu-mips.cc
@@ -87,14 +87,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif // USE_SIMULATOR.
}
-
-void CPU::DebugBreak() {
-#ifdef __mips
- asm volatile("break");
-#endif // #ifdef __mips
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/chromium/v8/src/mips/debug-mips.cc b/chromium/v8/src/mips/debug-mips.cc
index 30cc4db6340..1535231dd81 100644
--- a/chromium/v8/src/mips/debug-mips.cc
+++ b/chromium/v8/src/mips/debug-mips.cc
@@ -60,7 +60,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// li and Call pseudo-instructions emit two instructions each.
patcher.masm()->li(v8::internal::t9,
Operand(reinterpret_cast<int32_t>(
- Isolate::Current()->debug()->debug_break_return()->entry())));
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry())));
patcher.masm()->Call(v8::internal::t9);
patcher.masm()->nop();
patcher.masm()->nop();
@@ -105,7 +105,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// call t9 (jalr t9 / nop instruction pair)
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
- Isolate::Current()->debug()->debug_break_slot()->entry())));
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry())));
patcher.masm()->Call(v8::internal::t9);
}
@@ -142,8 +142,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ And(at, reg, 0xc0000000);
- __ Assert(
- eq, "Unable to encode value as smi", at, Operand(zero_reg));
+ __ Assert(eq, kUnableToEncodeValueAsSmi, at, Operand(zero_reg));
}
__ sll(reg, reg, kSmiTagSize);
}
@@ -325,12 +324,12 @@ void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on mips");
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips);
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on mips");
+ masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips);
}
diff --git a/chromium/v8/src/mips/deoptimizer-mips.cc b/chromium/v8/src/mips/deoptimizer-mips.cc
index 57d3880edec..16f75b86326 100644
--- a/chromium/v8/src/mips/deoptimizer-mips.cc
+++ b/chromium/v8/src/mips/deoptimizer-mips.cc
@@ -101,12 +101,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
@@ -123,12 +118,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
@@ -143,191 +133,33 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
if (Assembler::IsAddImmediate(
Assembler::instr_at(pc_after - 6 * kInstrSize))) {
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
- return true;
+ reinterpret_cast<uint32_t>(osr_builtin->entry()));
+ return PATCHED_FOR_OSR;
} else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(interrupt_code->entry()));
- return false;
+ reinterpret_cast<uint32_t>(interrupt_builtin->entry()));
+ return NOT_PATCHED;
}
}
#endif // DEBUG
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
-
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
- output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -542,10 +374,8 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
- __ push(t2);
- }
+ __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
+ __ push(t2);
__ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
__ push(t2);
diff --git a/chromium/v8/src/mips/disasm-mips.cc b/chromium/v8/src/mips/disasm-mips.cc
index 708df39d24e..691df940f2d 100644
--- a/chromium/v8/src/mips/disasm-mips.cc
+++ b/chromium/v8/src/mips/disasm-mips.cc
@@ -50,9 +50,6 @@
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
#include "v8.h"
diff --git a/chromium/v8/src/mips/full-codegen-mips.cc b/chromium/v8/src/mips/full-codegen-mips.cc
index 1084af09298..df3f4170b1a 100644
--- a/chromium/v8/src/mips/full-codegen-mips.cc
+++ b/chromium/v8/src/mips/full-codegen-mips.cc
@@ -298,8 +298,7 @@ void FullCodeGenerator::Generate() {
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -369,9 +368,8 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterDecrement(weight);
__ slt(at, a3, zero_reg);
__ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay slot.
- InterruptStub stub;
- __ CallStub(&stub);
+ // Call will emit a li t9 first, so it is safe to use the delay slot.
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
@@ -418,8 +416,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(a2);
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(v0);
EmitProfilingCounterReset();
@@ -786,10 +784,10 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// Check that we're not inside a with or catch context.
__ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ LoadRoot(t0, Heap::kWithContextMapRootIndex);
- __ Check(ne, "Declaration in with context.",
+ __ Check(ne, kDeclarationInWithContext,
a1, Operand(t0));
__ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
- __ Check(ne, "Declaration in catch context.",
+ __ Check(ne, kDeclarationInCatchContext,
a1, Operand(t0));
}
}
@@ -1333,8 +1331,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
- __ li(a0, Operand(info));
- __ push(a0);
+ __ li(a2, Operand(info));
__ CallStub(&stub);
} else {
__ li(a0, Operand(info));
@@ -2529,7 +2526,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Check for an uninitialized let binding.
__ lw(a2, location);
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Check(eq, "Let binding re-initialization.", a2, Operand(t0));
+ __ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
}
// Perform the assignment.
__ sw(v0, location);
@@ -3031,7 +3028,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -3043,7 +3040,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
__ And(t0, t0, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ Branch(if_true, ne, t0, Operand(zero_reg));
+ __ Branch(&skip_lookup, ne, t0, Operand(zero_reg));
// Check for fast case object. Generate false result for slow case object.
__ lw(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
@@ -3089,6 +3086,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ Branch(&loop, ne, t0, Operand(a2));
__ bind(&done);
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+ __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+
+ __ bind(&skip_lookup);
+
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
@@ -3097,16 +3102,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
__ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ Branch(if_false, ne, a2, Operand(a3));
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
- __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a2, Operand(a3), if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -3339,7 +3337,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
@@ -3492,21 +3490,21 @@ void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
Register value,
uint32_t encoding_mask) {
__ And(at, index, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi index", at, Operand(zero_reg));
+ __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
__ And(at, value, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi value", at, Operand(zero_reg));
+ __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
__ lw(at, FieldMemOperand(string, String::kLengthOffset));
- __ Check(lt, "Index is too large", index, Operand(at));
+ __ Check(lt, kIndexIsTooLarge, index, Operand(at));
- __ Check(ge, "Index is negative", index, Operand(zero_reg));
+ __ Check(ge, kIndexIsNegative, index, Operand(zero_reg));
__ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
__ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
__ Subu(at, at, Operand(encoding_mask));
- __ Check(eq, "Unexpected string type", at, Operand(zero_reg));
+ __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
}
@@ -3881,7 +3879,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
+ __ Abort(kAttemptToUseUndefinedCache);
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
context()->Plug(v0);
return;
@@ -4063,7 +4061,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// element: Current array element.
// elements_end: Array end.
if (generate_debug_code_) {
- __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin",
+ __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin,
array_length, Operand(zero_reg));
}
__ bind(&loop);
@@ -4382,35 +4380,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
default:
UNREACHABLE();
}
}
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- // TODO(svenpanne): Allowing format strings in Comment would be nice here...
- Comment cmt(masm_, comment);
- UnaryOpStub stub(expr->op());
- // GenericUnaryOpStub expects the argument to be in a0.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- __ mov(a0, result_register());
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
diff --git a/chromium/v8/src/mips/ic-mips.cc b/chromium/v8/src/mips/ic-mips.cc
index eb730bb3881..e250e0ee4a5 100644
--- a/chromium/v8/src/mips/ic-mips.cc
+++ b/chromium/v8/src/mips/ic-mips.cc
@@ -359,7 +359,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
extra_state,
Code::NORMAL,
argc);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
// If the stub cache probing failed, the receiver might be a value.
@@ -395,7 +395,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
__ bind(&miss);
@@ -658,7 +658,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
// Cache miss: Jump to runtime.
@@ -1238,7 +1238,6 @@ static void KeyedStoreGenerateGenericHelper(
a3, // Scratch regs...
t0,
t1,
- t2,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
@@ -1499,7 +1498,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1, t2);
// Cache miss: Jump to runtime.
diff --git a/chromium/v8/src/mips/lithium-codegen-mips.cc b/chromium/v8/src/mips/lithium-codegen-mips.cc
index 88e7eb8f1d4..4964a242623 100644
--- a/chromium/v8/src/mips/lithium-codegen-mips.cc
+++ b/chromium/v8/src/mips/lithium-codegen-mips.cc
@@ -31,12 +31,13 @@
#include "mips/lithium-gap-resolver-mips.h"
#include "code-stubs.h"
#include "stub-cache.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -44,11 +45,11 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const { }
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -91,7 +92,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LChunkBuilder::Abort(const char* reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -247,6 +248,21 @@ bool LCodeGen::GeneratePrologue() {
}
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ Subu(sp, sp, Operand(slots * kPointerSize));
+}
+
+
bool LCodeGen::GenerateBody() {
ASSERT(is_generating());
bool emit_instructions = true;
@@ -268,6 +284,8 @@ bool LCodeGen::GenerateBody() {
instr->Mnemonic());
}
+ RecordAndUpdatePosition(instr->position());
+
instr->CompileToNative(this);
}
EnsureSpaceForLazyDeopt();
@@ -281,6 +299,10 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
+
+ int pos = instructions_->at(code->instruction_index())->position();
+ RecordAndUpdatePosition(pos);
+
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
code->instruction_index(),
@@ -317,16 +339,6 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateDeoptJumpTable() {
- // Check that the jump table is accessible from everywhere in the function
- // code, i.e. that offsets to the table can be encoded in the 16bit signed
- // immediate of a branch instruction.
- // To simplify we consider the code size from the first instruction to the
- // end of the jump table.
- if (!is_int16((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 12)) {
- Abort("Generated code is too large");
- }
-
if (deopt_jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
}
@@ -402,7 +414,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -411,9 +423,9 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
ASSERT(constant->HasSmiValue());
__ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
} else if (r.IsDouble()) {
- Abort("EmitLoadRegister: Unsupported double immediate.");
+ Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
} else {
- ASSERT(r.IsTagged());
+ ASSERT(r.IsSmiOrTagged());
__ LoadObject(scratch, literal);
}
return scratch;
@@ -440,7 +452,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
+ Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@@ -449,9 +461,9 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
__ cvt_d_w(dbl_scratch, flt_scratch);
return dbl_scratch;
} else if (r.IsDouble()) {
- Abort("unsupported double immediate");
+ Abort(kUnsupportedDoubleImmediate);
} else if (r.IsTagged()) {
- Abort("unsupported tagged immediate");
+ Abort(kUnsupportedTaggedImmediate);
}
} else if (op->IsStackSlot() || op->IsArgument()) {
MemOperand mem_op = ToMemOperand(op);
@@ -466,7 +478,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle();
+ return constant->handle(isolate());
}
@@ -520,14 +532,14 @@ Operand LCodeGen::ToOperand(LOperand* op) {
ASSERT(constant->HasInteger32Value());
return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
- Abort("ToOperand Unsupported double immediate.");
+ Abort(kToOperandUnsupportedDoubleImmediate);
}
ASSERT(r.IsTagged());
- return Operand(constant->handle());
+ return Operand(constant->handle(isolate()));
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
- Abort("ToOperand IsDoubleRegister unimplemented");
+ Abort(kToOperandIsDoubleRegisterUnimplemented);
return Operand(0);
}
// Stack slots not implemented, use ToMemOperand instead.
@@ -591,37 +603,57 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
break;
}
+ int object_index = 0;
+ int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
-
- // TODO(mstarzinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- if (value == NULL) {
- int arguments_count = environment->values()->length() - translation_size;
- translation->BeginArgumentsObject(arguments_count);
- for (int i = 0; i < arguments_count; ++i) {
- LOperand* value = environment->values()->at(translation_size + i);
- AddToTranslation(translation,
- value,
- environment->HasTaggedValueAt(translation_size + i),
- environment->HasUint32ValueAt(translation_size + i));
- }
- continue;
- }
-
- AddToTranslation(translation,
+ AddToTranslation(environment,
+ translation,
value,
environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i));
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
}
}
-void LCodeGen::AddToTranslation(Translation* translation,
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32) {
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@@ -650,7 +682,7 @@ void LCodeGen::AddToTranslation(Translation* translation,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -736,7 +768,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
-void LCodeGen::DeoptimizeIf(Condition cc,
+void LCodeGen::DeoptimizeIf(Condition condition,
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type,
Register src1,
@@ -748,7 +780,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
- Abort("bailout was not prepared");
+ Abort(kBailoutWasNotPrepared);
return;
}
@@ -761,18 +793,18 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return;
}
- if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
+ if (info()->ShouldTrapOnDeopt()) {
Label skip;
- if (cc != al) {
- __ Branch(&skip, NegateCondition(cc), src1, src2);
+ if (condition != al) {
+ __ Branch(&skip, NegateCondition(condition), src1, src2);
}
__ stop("trap_on_deopt");
__ bind(&skip);
}
ASSERT(info()->IsStub() || frame_is_built_);
- if (cc == al && frame_is_built_) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+ if (condition == al && frame_is_built_) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
@@ -785,19 +817,19 @@ void LCodeGen::DeoptimizeIf(Condition cc,
!frame_is_built_);
deopt_jump_table_.Add(table_entry, zone());
}
- __ Branch(&deopt_jump_table_.last().label, cc, src1, src2);
+ __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
}
}
-void LCodeGen::DeoptimizeIf(Condition cc,
+void LCodeGen::DeoptimizeIf(Condition condition,
LEnvironment* environment,
Register src1,
const Operand& src2) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
- DeoptimizeIf(cc, environment, bailout_type, src1, src2);
+ DeoptimizeIf(condition, environment, bailout_type, src1, src2);
}
@@ -960,6 +992,14 @@ void LCodeGen::RecordPosition(int position) {
}
+void LCodeGen::RecordAndUpdatePosition(int position) {
+ if (position >= 0 && position != old_position_) {
+ masm()->positions_recorder()->RecordPosition(position);
+ old_position_ = position;
+ }
+}
+
+
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -1047,8 +1087,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Record the address of the first unknown OSR value as the place to enter.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
@@ -1374,10 +1413,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
if (right_op->IsConstantOperand() && !can_overflow) {
- // Use optimized code for specific constants.
- int32_t constant = ToRepresentation(
- LConstantOperand::cast(right_op),
- instr->hydrogen()->right()->representation());
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
@@ -1502,7 +1538,11 @@ void LCodeGen::DoBitI(LBitI* instr) {
__ Or(result, left, right);
break;
case Token::BIT_XOR:
- __ Xor(result, left, right);
+ if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+ __ Nor(result, zero_reg, left);
+ } else {
+ __ Xor(result, left, right);
+ }
break;
default:
UNREACHABLE();
@@ -1661,7 +1701,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
+ Handle<Object> value = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
__ LoadObject(ToRegister(instr->result()), value);
}
@@ -1763,7 +1803,7 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, "Unexpected string type", at, Operand(zero_reg));
+ __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
}
__ Addu(scratch,
@@ -1780,13 +1820,6 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ Nor(result, zero_reg, Operand(input));
-}
-
-
void LCodeGen::DoThrow(LThrow* instr) {
Register input_reg = EmitLoadRegister(instr->value(), at);
__ push(input_reg);
@@ -1962,20 +1995,22 @@ int LCodeGen::GetNextEmittedBlock() const {
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr,
- Condition cc, Register src1, const Operand& src2) {
+ Condition condition,
+ Register src1,
+ const Operand& src2) {
int left_block = instr->TrueDestination(chunk_);
int right_block = instr->FalseDestination(chunk_);
int next_block = GetNextEmittedBlock();
- if (right_block == left_block || cc == al) {
+ if (right_block == left_block || condition == al) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ Branch(chunk_->GetAssemblyLabel(right_block),
- NegateCondition(cc), src1, src2);
+ NegateCondition(condition), src1, src2);
} else if (right_block == next_block) {
- __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+ __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
} else {
- __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
+ __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
__ Branch(chunk_->GetAssemblyLabel(right_block));
}
}
@@ -1983,7 +2018,9 @@ void LCodeGen::EmitBranch(InstrType instr,
template<class InstrType>
void LCodeGen::EmitBranchF(InstrType instr,
- Condition cc, FPURegister src1, FPURegister src2) {
+ Condition condition,
+ FPURegister src1,
+ FPURegister src2) {
int right_block = instr->FalseDestination(chunk_);
int left_block = instr->TrueDestination(chunk_);
@@ -1992,16 +2029,29 @@ void LCodeGen::EmitBranchF(InstrType instr,
EmitGoto(left_block);
} else if (left_block == next_block) {
__ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
- NegateCondition(cc), src1, src2);
+ NegateCondition(condition), src1, src2);
} else if (right_block == next_block) {
- __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+ __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
+ condition, src1, src2);
} else {
- __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
+ __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
+ condition, src1, src2);
__ Branch(chunk_->GetAssemblyLabel(right_block));
}
}
+template<class InstrType>
+void LCodeGen::EmitFalseBranchF(InstrType instr,
+ Condition condition,
+ FPURegister src1,
+ FPURegister src2) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
+ condition, src1, src2);
+}
+
+
void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
__ stop("LDebugBreak");
}
@@ -2262,6 +2312,23 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ li(at, Operand(factory()->the_hole_value()));
+ EmitBranch(instr, eq, input_reg, Operand(at));
+ return;
+ }
+
+ DoubleRegister input_reg = ToDoubleRegister(instr->object());
+ EmitFalseBranchF(instr, eq, input_reg, input_reg);
+
+ Register scratch = scratch0();
+ __ FmoveHigh(scratch, input_reg);
+ EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Register temp2,
@@ -2550,15 +2617,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
@@ -2890,90 +2957,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstant()) {
- Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
- __ LoadObject(result, constant);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ lw(result, FieldMemOperand(result, HeapObject::kMapOffset));
- DeoptimizeIf(ne, env, result, Operand(Handle<Map>(current->map())));
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- }
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- Register object_map = scratch0();
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(al, instr->environment());
- return;
- }
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMapAndBranch(object_map, map, &check_passed, eq, &check_passed);
- if (last && !need_generic) {
- DeoptimizeIf(al, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- __ Branch(&next);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ Branch(&done);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ li(a2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
-}
-
-
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -3069,7 +3052,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
key = ToRegister(instr->key());
@@ -3155,7 +3138,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
key = ToRegister(instr->key());
@@ -3426,7 +3409,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->value();
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort("DoPushArgument not implemented for double type.");
+ Abort(kDoPushArgumentNotImplementedForDoubleType);
} else {
Register argument_reg = EmitLoadRegister(argument, at);
__ push(argument_reg);
@@ -3628,14 +3611,14 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3645,7 +3628,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
FPURegister input = ToDoubleRegister(instr->value());
FPURegister result = ToDoubleRegister(instr->result());
__ abs_d(result, input);
- } else if (r.IsInteger32()) {
+ } else if (r.IsSmiOrInteger32()) {
EmitIntegerMathAbs(instr);
} else {
// Representation is tagged.
@@ -3830,79 +3813,64 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(f0));
- ASSERT(ToRegister(instr->global_object()).is(a0));
-
+ // Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
+ // Load native context.
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ lw(native_context, FieldMemOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds).
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
- // a2: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ lw(state, FieldMemOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
- __ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
+ Register state0 = ToRegister(instr->scratch());
+ __ lw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
- // a1: state[0].
- // a0: state[1].
+ Register state1 = ToRegister(instr->scratch2());
+ __ lw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ And(a3, a1, Operand(0xFFFF));
- __ li(t0, Operand(18273));
- __ Mul(a3, a3, t0);
- __ srl(a1, a1, 16);
- __ Addu(a1, a3, a1);
+ Register scratch3 = ToRegister(instr->scratch3());
+ Register scratch4 = scratch0();
+ __ And(scratch3, state0, Operand(0xFFFF));
+ __ li(scratch4, Operand(18273));
+ __ Mul(scratch3, scratch3, scratch4);
+ __ srl(state0, state0, 16);
+ __ Addu(state0, scratch3, state0);
// Save state[0].
- __ sw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
+ __ sw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ And(a3, a0, Operand(0xFFFF));
- __ li(t0, Operand(36969));
- __ Mul(a3, a3, t0);
- __ srl(a0, a0, 16),
- __ Addu(a0, a3, a0);
+ __ And(scratch3, state1, Operand(0xFFFF));
+ __ li(scratch4, Operand(36969));
+ __ Mul(scratch3, scratch3, scratch4);
+ __ srl(state1, state1, 16),
+ __ Addu(state1, scratch3, state1);
// Save state[1].
- __ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
+ __ sw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ And(a0, a0, Operand(0x3FFFF));
- __ sll(a1, a1, 14);
- __ Addu(v0, a0, a1);
-
- __ bind(deferred->exit());
+ Register random = scratch4;
+ __ And(random, state1, Operand(0x3FFFF));
+ __ sll(state0, state0, 14);
+ __ Addu(random, random, state0);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
- __ li(a2, Operand(0x41300000));
+ __ li(scratch3, Operand(0x41300000));
// Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
- __ Move(f12, v0, a2);
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ Move(result, random, scratch3);
// Move 0x4130000000000000 to FPU.
- __ Move(f14, zero_reg, a2);
- // Subtract to get the result.
- __ sub_d(f0, f12, f14);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, scratch0());
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in v0.
+ DoubleRegister scratch5 = double_scratch0();
+ __ Move(scratch5, zero_reg, scratch3);
+ __ sub_d(result, result, scratch5);
}
@@ -4096,6 +4064,16 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ Addu(code_object, code_object,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(code_object,
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
@@ -4202,17 +4180,17 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::ApplyCheckIf(Condition cc,
+void LCodeGen::ApplyCheckIf(Condition condition,
LBoundsCheck* check,
Register src1,
const Operand& src2) {
if (FLAG_debug_code && check->hydrogen()->skip_check()) {
Label done;
- __ Branch(&done, NegateCondition(cc), src1, src2);
+ __ Branch(&done, NegateCondition(condition), src1, src2);
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, check->environment(), src1, src2);
+ DeoptimizeIf(condition, check->environment(), src1, src2);
}
}
@@ -4251,7 +4229,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
key = ToRegister(instr->key());
@@ -4329,7 +4307,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
key = ToRegister(instr->key());
@@ -4458,12 +4436,13 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, GetRAState(), kDontSaveFPRegs);
} else {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ PushSafepointRegistersScope scope(
+ this, Safepoint::kWithRegistersAndDoubles);
__ mov(a0, object_reg);
__ li(a1, Operand(to_map));
TransitionElementsKindStub stub(from_kind, to_kind);
__ CallStub(&stub);
- RecordSafepointWithRegisters(
+ RecordSafepointWithRegistersAndDoubles(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
}
__ bind(&not_applicable);
@@ -4489,12 +4468,14 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4541,12 +4522,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4608,9 +4591,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
LOperand* input = instr->value();
- ASSERT(input->IsRegister());
LOperand* output = instr->result();
- ASSERT(output->IsRegister());
Register scratch = scratch0();
__ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
@@ -4631,17 +4612,30 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ LOperand* input = instr->value();
+ LOperand* output = instr->result();
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ Register scratch = scratch0();
+ __ And(scratch, ToRegister(input), Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ }
+ __ SmiTag(ToRegister(output), ToRegister(input));
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
+ class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
SIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -4658,16 +4652,16 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagI(instr_,
instr_->value(),
UNSIGNED_INT32);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4738,12 +4732,14 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4754,29 +4750,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register temp1 = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
- bool convert_hole = false;
- HValue* change_input = instr->hydrogen()->value();
- if (change_input->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(change_input);
- convert_hole = load->UsesMustHandleHole();
- }
-
- Label no_special_nan_handling;
- Label done;
- if (convert_hole) {
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- __ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg);
- __ Move(reg, scratch0(), input_reg);
- Label canonicalize;
- __ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
- __ li(reg, factory()->the_hole_value());
- __ Branch(&done);
- __ bind(&canonicalize);
- __ Move(input_reg,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- }
-
- __ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
@@ -4790,7 +4763,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
// Now that we have finished with the object's real address tag it
__ Addu(reg, reg, kHeapObjectTag);
- __ bind(&done);
}
@@ -4832,7 +4804,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
DoubleRegister result_reg,
- bool allow_undefined_as_nan,
+ bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
@@ -4840,16 +4812,14 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Label load_smi, heap_number, done;
- STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
- NUMBER_CANDIDATE_IS_ANY_TAGGED);
- if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
// Heap number map check.
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- if (!allow_undefined_as_nan) {
+ if (!can_convert_undefined_to_nan) {
DeoptimizeIf(ne, env, scratch, Operand(at));
} else {
Label heap_number, convert;
@@ -4857,10 +4827,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
- __ Branch(&convert, eq, input_reg, Operand(at));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- }
DeoptimizeIf(ne, env, input_reg, Operand(at));
__ bind(&convert);
@@ -4898,7 +4864,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_scratch = double_scratch0();
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4913,11 +4879,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// of the if.
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- FPURegister single_scratch = double_scratch.low();
- ASSERT(!scratch3.is(input_reg) &&
- !scratch3.is(scratch1) &&
- !scratch3.is(scratch2));
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
Label heap_number;
@@ -4931,14 +4892,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ Branch(&done);
__ bind(&heap_number);
- __ ldc1(double_scratch2,
- FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- __ EmitECMATruncate(input_reg,
- double_scratch2,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
+ __ mov(scratch2, input_reg);
+ __ TruncateHeapNumberToI(input_reg, scratch2);
} else {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
@@ -4972,12 +4927,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -5008,21 +4965,12 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
DoubleRegister result_reg = ToDoubleRegister(result);
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
- if (value->type().IsSmi()) {
- mode = NUMBER_CANDIDATE_IS_SMI;
- } else if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
- }
- }
- }
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->allow_undefined_as_nan(),
+ instr->hydrogen()->can_convert_undefined_to_nan(),
instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment(),
mode);
@@ -5032,20 +4980,12 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(instr->result());
Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_input = ToDoubleRegister(instr->value());
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- FPURegister single_scratch = double_scratch0().low();
- __ EmitECMATruncate(result_reg,
- double_input,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
+ __ TruncateDoubleToI(result_reg, double_input);
} else {
- Register except_flag = scratch2;
+ Register except_flag = LCodeGen::scratch1();
__ EmitFPUTruncate(kRoundToMinusInf,
result_reg,
@@ -5072,21 +5012,13 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
+ Register scratch1 = LCodeGen::scratch0();
DoubleRegister double_input = ToDoubleRegister(instr->value());
if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- FPURegister single_scratch = double_scratch0().low();
- __ EmitECMATruncate(result_reg,
- double_input,
- single_scratch,
- scratch1,
- scratch2,
- scratch3);
+ __ TruncateDoubleToI(result_reg, double_input);
} else {
- Register except_flag = scratch2;
+ Register except_flag = LCodeGen::scratch1();
__ EmitFPUTruncate(kRoundToMinusInf,
result_reg,
@@ -5168,49 +5100,81 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
+ Handle<HeapObject> object = instr->hydrogen()->object();
AllowDeferredHandleDereference smi_check;
- if (isolate()->heap()->InNewSpace(*target)) {
+ if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewPropertyCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr->environment(), reg,
Operand(at));
} else {
DeoptimizeIf(ne, instr->environment(), reg,
- Operand(target));
+ Operand(object));
}
}
-void LCodeGen::DoCheckMapCommon(Register map_reg,
- Handle<Map> map,
- LEnvironment* env) {
- Label success;
- __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
- DeoptimizeIf(al, env);
- __ bind(&success);
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ push(object);
+ CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
+ __ StoreToSafepointRegisterSlot(v0, scratch0());
+ }
+ __ And(at, scratch0(), Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
if (instr->hydrogen()->CanOmitMapChecks()) return;
Register map_reg = scratch0();
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- Label success;
SmallMapList* map_set = instr->hydrogen()->map_set();
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->has_migration_target()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+ __ bind(deferred->check_maps());
+ }
+
+ Label success;
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
__ CompareMapAndBranch(map_reg, map, &success, eq, &success);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(map_reg, map, instr->environment());
+ // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
+ if (instr->hydrogen()->has_migration_target()) {
+ __ Branch(deferred->entry(), ne, map_reg, Operand(map));
+ } else {
+ DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
+ }
+
__ bind(&success);
}
@@ -5265,32 +5229,15 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
-
- Register prototype_reg = ToRegister(instr->temp());
- Register map_reg = ToRegister(instr->temp2());
-
- ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
- ZoneList<Handle<Map> >* maps = instr->maps();
-
- ASSERT(prototypes->length() == maps->length());
-
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(prototype_reg, prototypes->at(i));
- __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
- DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
- }
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5451,8 +5398,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
- __ li(a1, Operand(instr->hydrogen()->shared_info()));
- __ push(a1);
+ __ li(a2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ li(a2, Operand(instr->hydrogen()->shared_info()));
@@ -5664,6 +5610,8 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
+
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
}
@@ -5685,12 +5633,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5704,8 +5654,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
Label done;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&done, hs, sp, Operand(at));
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt();
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
@@ -5741,9 +5692,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- // Normally we record the first unknown OSR value as the entrypoint to the OSR
- // code, but if there were none, record the entrypoint here.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
diff --git a/chromium/v8/src/mips/lithium-codegen-mips.h b/chromium/v8/src/mips/lithium-codegen-mips.h
index a485b67db94..84105cae35f 100644
--- a/chromium/v8/src/mips/lithium-codegen-mips.h
+++ b/chromium/v8/src/mips/lithium-codegen-mips.h
@@ -42,7 +42,7 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen V8_FINAL BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
@@ -65,7 +65,8 @@ class LCodeGen BASE_EMBEDDED {
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
+ expected_safepoint_kind_(Safepoint::kSimple),
+ old_position_(RelocInfo::kNoPosition) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -114,7 +115,7 @@ class LCodeGen BASE_EMBEDDED {
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
FloatRegister flt_scratch,
DoubleRegister dbl_scratch);
- int ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
@@ -146,14 +147,13 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- void DoCheckMapCommon(Register map_reg, Handle<Map> map, LEnvironment* env);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -213,7 +213,7 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(const char* reason);
+ void Abort(BailoutReason reason);
void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -226,6 +226,9 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@@ -276,24 +279,27 @@ class LCodeGen BASE_EMBEDDED {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc,
+ void DeoptimizeIf(Condition condition,
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void DeoptimizeIf(Condition cc,
+ void DeoptimizeIf(Condition condition,
LEnvironment* environment,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void ApplyCheckIf(Condition cc,
+ void ApplyCheckIf(Condition condition,
LBoundsCheck* check,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void AddToTranslation(Translation* translation,
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32);
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -319,19 +325,25 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
Safepoint::DeoptMode mode);
void RecordPosition(int position);
+ void RecordAndUpdatePosition(int position);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
template<class InstrType>
void EmitBranch(InstrType instr,
- Condition cc,
+ Condition condition,
Register src1,
const Operand& src2);
template<class InstrType>
void EmitBranchF(InstrType instr,
- Condition cc,
+ Condition condition,
FPURegister src1,
FPURegister src2);
+ template<class InstrType>
+ void EmitFalseBranchF(InstrType instr,
+ Condition condition,
+ FPURegister src1,
+ FPURegister src2);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
DoubleRegister result,
@@ -373,12 +385,6 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -435,7 +441,9 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ int old_position_;
+
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
@@ -483,7 +491,7 @@ class LCodeGen BASE_EMBEDDED {
};
-class LDeferredCode: public ZoneObject {
+class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
@@ -492,7 +500,7 @@ class LDeferredCode: public ZoneObject {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
diff --git a/chromium/v8/src/mips/lithium-gap-resolver-mips.cc b/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
index 771b22862ee..460e13bf0a9 100644
--- a/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -258,7 +258,7 @@ void LGapResolver::EmitMove(int index) {
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
- } else if (source->IsDoubleRegister()) {
+ } else if (destination->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
double v = cgen_->ToDouble(constant_source);
__ Move(result, v);
diff --git a/chromium/v8/src/mips/lithium-gap-resolver-mips.h b/chromium/v8/src/mips/lithium-gap-resolver-mips.h
index 2506e38c351..ea1ea3cbbf2 100644
--- a/chromium/v8/src/mips/lithium-gap-resolver-mips.h
+++ b/chromium/v8/src/mips/lithium-gap-resolver-mips.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/chromium/v8/src/mips/lithium-mips.cc b/chromium/v8/src/mips/lithium-mips.cc
index 760be2e6e85..06bb33abc01 100644
--- a/chromium/v8/src/mips/lithium-mips.cc
+++ b/chromium/v8/src/mips/lithium-mips.cc
@@ -30,6 +30,7 @@
#include "lithium-allocator-inl.h"
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -265,6 +266,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
@@ -430,6 +439,15 @@ LPlatformChunk* LChunkBuilder::Build() {
chunk_ = new(zone()) LPlatformChunk(info(), graph());
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(false);
+ }
+ }
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -442,7 +460,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LCodeGen::Abort(const char* reason) {
+void LCodeGen::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -598,8 +616,10 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
+ &argument_index_accumulator,
+ &objects_to_materialize));
return instr;
}
@@ -650,7 +670,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
- Abort("Out of virtual registers while trying to allocate temp register.");
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
vreg = 0;
}
operand->set_virtual_register(vreg);
@@ -721,12 +741,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Left shifts can deoptimize if we shift by > 0 and the result cannot be
// truncated to smi.
if (instr->representation().IsSmi() && constant_value > 0) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToSmi)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
}
} else {
right = UseRegisterAtStart(right_value);
@@ -738,12 +753,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
@@ -888,6 +898,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
+ instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@@ -903,11 +914,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
+ argument_index_accumulator,
+ objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
@@ -922,16 +935,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
- bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
+ int object_index = objects_to_materialize->length();
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
+ LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- needs_arguments_object_materialization = true;
- op = NULL;
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@@ -942,15 +955,33 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
- if (needs_arguments_object_materialization) {
- HArgumentsObject* arguments = hydrogen_env->entry() == NULL
- ? graph()->GetArgumentsObject()
- : hydrogen_env->entry()->arguments_object();
- ASSERT(arguments->IsLinked());
- for (int i = 1; i < arguments->arguments_count(); ++i) {
- HValue* value = arguments->arguments_values()->at(i);
- ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
- LOperand* op = UseAny(value);
+ for (int i = object_index; i < objects_to_materialize->length(); ++i) {
+ HValue* object_to_materialize = objects_to_materialize->at(i);
+ int previously_materialized_object = -1;
+ for (int prev = 0; prev < i; ++prev) {
+ if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
+ previously_materialized_object = prev;
+ break;
+ }
+ }
+ int length = object_to_materialize->OperandCount();
+ bool is_arguments = object_to_materialize->IsArgumentsObject();
+ if (previously_materialized_object >= 0) {
+ result->AddDuplicateObject(previously_materialized_object);
+ continue;
+ } else {
+ result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ }
+ for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+ LOperand* op;
+ HValue* value = object_to_materialize->OperandAt(i);
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
+ } else {
+ ASSERT(!value->IsPushArgument());
+ op = UseAny(value);
+ }
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
@@ -1066,6 +1097,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
@@ -1327,15 +1366,6 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LBitNotI(value));
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
@@ -1594,9 +1624,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), a0);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, f0), instr);
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, f0);
}
@@ -1614,9 +1648,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(
- instr->right()->representation()));
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCompareNumericAndBranch(left, right);
@@ -1639,6 +1672,13 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return new(zone()) LCmpHoleAndBranch(object);
+}
+
+
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -1751,17 +1791,6 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
}
-LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
- HInductionVariableAnnotation* instr) {
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@@ -1776,13 +1805,6 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), a0);
return MarkAsCall(new(zone()) LThrow(value), instr);
@@ -1829,19 +1851,17 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
ASSERT(to.IsInteger32());
LOperand* value = NULL;
LInstruction* res = NULL;
- if (instr->value()->type().IsSmi()) {
- value = UseRegisterAtStart(instr->value());
+ HValue* val = instr->value();
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
+ value = UseRegisterAtStart(val);
res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
- value = UseRegister(instr->value());
+ value = UseRegister(val);
LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
- : NULL;
- LOperand* temp3 = FixedTemp(f22);
+ LOperand* temp2 = FixedTemp(f22);
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
- temp2,
- temp3));
+ temp2));
res = AssignEnvironment(res);
}
return res;
@@ -1861,14 +1881,12 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignPointerMap(result);
} else if (to.IsSmi()) {
LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToSmi(value,
- TempRegister(), TempRegister())));
+ return AssignEnvironment(
+ DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
- LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
+ LDoubleToI* res = new(zone()) LDoubleToI(value);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
@@ -1888,8 +1906,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ LInstruction* result = val->CheckFlag(HInstruction::kUint32)
+ ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
+ : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
@@ -1935,31 +1954,24 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
}
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp1 = NULL;
- LOperand* temp2 = NULL;
- if (!instr->CanOmitPrototypeChecks()) {
- temp1 = TempRegister();
- temp2 = TempRegister();
- }
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- if (instr->CanOmitPrototypeChecks()) return result;
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckMaps(value);
- if (instr->CanOmitMapChecks()) return result;
- return AssignEnvironment(result);
+ if (!instr->CanOmitMapChecks()) {
+ value = UseRegisterAtStart(instr->value());
+ if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
+ }
+ LCheckMaps* result = new(zone()) LCheckMaps(value);
+ if (!instr->CanOmitMapChecks()) {
+ AssignEnvironment(result);
+ if (instr->has_migration_target()) return AssignPointerMap(result);
+ }
+ return result;
}
@@ -2071,23 +2083,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), a0);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, v0), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), a0);
LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), v0);
@@ -2243,7 +2238,7 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
+ bool needs_write_barrier_for_map = instr->has_transition() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
@@ -2361,10 +2356,18 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
@@ -2385,6 +2388,14 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
@@ -2426,20 +2437,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
diff --git a/chromium/v8/src/mips/lithium-mips.h b/chromium/v8/src/mips/lithium-mips.h
index 44c909ea766..91dea44045c 100644
--- a/chromium/v8/src/mips/lithium-mips.h
+++ b/chromium/v8/src/mips/lithium-mips.h
@@ -50,7 +50,6 @@ class LCodeGen;
V(ArithmeticD) \
V(ArithmeticT) \
V(BitI) \
- V(BitNotI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
@@ -63,19 +62,19 @@ class LCodeGen;
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(ConstantD) \
@@ -128,7 +127,6 @@ class LCodeGen;
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
@@ -163,6 +161,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
@@ -184,17 +183,22 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -204,13 +208,16 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
- virtual ~LInstruction() { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {
+ set_position(RelocInfo::kNoPosition);
+ }
+
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -248,20 +255,30 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
+ // The 31 bits PositionBits is used to store the int position value. And the
+ // position value may be RelocInfo::kNoPosition (-1). The accessor always
+ // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
+ // and can fit into the 31 bits PositionBits.
+ void set_position(int pos) {
+ bit_field_ = PositionBits::update(bit_field_, pos + 1);
+ }
+ int position() { return PositionBits::decode(bit_field_) - 1; }
+
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
- void MarkAsCall() { is_call_ = true; }
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
// Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return is_call_; }
- bool ClobbersRegisters() const { return is_call_; }
- bool ClobbersDoubleRegisters() const { return is_call_; }
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ bool ClobbersDoubleRegisters() const { return IsCall(); }
// Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsCall() const { return IsCall(); }
virtual bool HasResult() const = 0;
virtual LOperand* result() const = 0;
@@ -285,10 +302,13 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
+ class IsCallBits: public BitField<bool, 0, 1> {};
+ class PositionBits: public BitField<int, 1, 31> {};
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- bool is_call_;
+ int bit_field_;
};
@@ -296,11 +316,13 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0 && result() != NULL; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
@@ -310,15 +332,15 @@ class LTemplateInstruction: public LInstruction {
EmbeddedContainer<LOperand*, T> temps_;
private:
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -329,8 +351,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -366,11 +388,11 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const {
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
return !IsRedundant();
}
@@ -378,14 +400,14 @@ class LInstructionGap: public LGap {
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(int block_id) : block_id_(block_id) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const;
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
int block_id() const { return block_id_; }
@@ -394,7 +416,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -410,7 +432,7 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -419,22 +441,24 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -450,14 +474,16 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -468,19 +494,21 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const { return true; }
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -519,7 +547,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -533,7 +561,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -554,7 +582,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -568,11 +596,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -584,14 +612,14 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModI: public LTemplateInstruction<1, 2, 3> {
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
public:
// Used when the right hand is a constant power of 2.
LModI(LOperand* left,
@@ -627,7 +655,7 @@ class LModI: public LTemplateInstruction<1, 2, 3> {
};
-class LDivI: public LTemplateInstruction<1, 2, 0> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LDivI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -642,7 +670,7 @@ class LDivI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
@@ -661,7 +689,7 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 1> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -679,7 +707,7 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
+class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LMultiplyAddD(LOperand* addend, LOperand* multiplier,
LOperand* multiplicand) {
@@ -696,13 +724,13 @@ class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
};
-class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -721,11 +749,11 @@ class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LMathFloor: public LTemplateInstruction<1, 1, 1> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathFloor(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -740,7 +768,7 @@ class LMathFloor: public LTemplateInstruction<1, 1, 1> {
};
-class LMathRound: public LTemplateInstruction<1, 1, 1> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -755,7 +783,7 @@ class LMathRound: public LTemplateInstruction<1, 1, 1> {
};
-class LMathAbs: public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathAbs(LOperand* value) {
inputs_[0] = value;
@@ -768,7 +796,7 @@ class LMathAbs: public LTemplateInstruction<1, 1, 0> {
};
-class LMathLog: public LTemplateInstruction<1, 1, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -780,7 +808,7 @@ class LMathLog: public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin: public LTemplateInstruction<1, 1, 0> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSin(LOperand* value) {
inputs_[0] = value;
@@ -792,7 +820,7 @@ class LMathSin: public LTemplateInstruction<1, 1, 0> {
};
-class LMathCos: public LTemplateInstruction<1, 1, 0> {
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathCos(LOperand* value) {
inputs_[0] = value;
@@ -804,7 +832,7 @@ class LMathCos: public LTemplateInstruction<1, 1, 0> {
};
-class LMathTan: public LTemplateInstruction<1, 1, 0> {
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathTan(LOperand* value) {
inputs_[0] = value;
@@ -816,7 +844,7 @@ class LMathTan: public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 3> {
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
LOperand* double_temp,
@@ -838,7 +866,7 @@ class LMathExp: public LTemplateInstruction<1, 1, 3> {
};
-class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -850,7 +878,7 @@ class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf: public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathPowHalf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -864,7 +892,7 @@ class LMathPowHalf: public LTemplateInstruction<1, 1, 1> {
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -874,13 +902,25 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
};
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -897,7 +937,7 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
-class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsNumberAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -910,7 +950,7 @@ class LIsNumberAndBranch: public LControlInstruction<1, 0> {
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -923,11 +963,11 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -938,11 +978,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -956,11 +996,11 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LStringCompareAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -976,11 +1016,11 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -992,11 +1032,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -1009,7 +1049,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1021,11 +1062,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1039,11 +1080,11 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1060,7 +1101,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1074,7 +1115,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1092,7 +1133,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1101,7 +1143,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInstanceSize(LOperand* object) {
inputs_[0] = object;
@@ -1114,7 +1156,7 @@ class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1129,7 +1171,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1146,7 +1188,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1167,7 +1209,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1182,7 +1224,7 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1191,7 +1233,7 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS: public LTemplateInstruction<1, 0, 0> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1200,7 +1242,7 @@ class LConstantS: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1209,7 +1251,7 @@ class LConstantD: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantE: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1220,16 +1262,18 @@ class LConstantE: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 0> {
+class LBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1240,11 +1284,11 @@ class LBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpMapAndBranch: public LControlInstruction<1, 1> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1261,7 +1305,7 @@ class LCmpMapAndBranch: public LControlInstruction<1, 1> {
};
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1273,7 +1317,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1286,7 +1330,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1301,7 +1345,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
-class LDateField: public LTemplateInstruction<1, 1, 1> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1320,7 +1364,7 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LSeqStringSetChar(String::Encoding encoding,
LOperand* string,
@@ -1344,7 +1388,7 @@ class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
@@ -1356,19 +1400,7 @@ class LThrow: public LTemplateInstruction<0, 1, 0> {
};
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1383,7 +1415,7 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1398,7 +1430,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1413,20 +1445,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
- LOperand* global_object() { return inputs_[0]; }
+ LOperand* global_object() const { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1438,16 +1479,18 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1459,16 +1502,16 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
LOperand* right() { return inputs_[1]; }
Token::Value op() const { return op_; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_FINAL { return LInstruction::kArithmeticT; }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LReturn: public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LReturn(LOperand* value, LOperand* parameter_count) {
inputs_[0] = value;
@@ -1490,7 +1533,7 @@ class LReturn: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1503,20 +1546,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
@@ -1531,7 +1561,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1544,7 +1574,8 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1557,7 +1588,7 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1581,7 +1612,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* object, LOperand* key) {
inputs_[0] = object;
@@ -1595,14 +1626,14 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadGlobalGeneric(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1618,7 +1649,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1633,7 +1664,7 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LStoreGlobalGeneric(LOperand* global_object,
LOperand* value) {
@@ -1652,7 +1683,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1669,7 +1700,7 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
@@ -1684,11 +1715,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1700,7 +1731,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1713,7 +1744,24 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
@@ -1722,28 +1770,28 @@ class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
LOperand* base_object() { return inputs_[0]; }
int offset() { return hydrogen()->offset(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1755,14 +1803,14 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
@@ -1774,7 +1822,7 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1786,19 +1834,19 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInvokeFunction(LOperand* function) {
inputs_[0] = function;
@@ -1809,13 +1857,13 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
inputs_[0] = key;
@@ -1826,26 +1874,26 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallFunction(LOperand* function) {
inputs_[0] = function;
@@ -1860,7 +1908,7 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
@@ -1872,18 +1920,18 @@ class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1894,13 +1942,13 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNewArray(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1911,13 +1959,13 @@ class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
@@ -1927,7 +1975,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1939,7 +1987,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToSmi(LOperand* value) {
inputs_[0] = value;
@@ -1952,7 +2000,7 @@ class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1964,7 +2012,20 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1976,7 +2037,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagU(LOperand* value) {
inputs_[0] = value;
@@ -1988,7 +2049,7 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -2005,17 +2066,13 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
};
-class LDoubleToSmi: public LTemplateInstruction<1, 1, 2> {
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToSmi(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -2025,17 +2082,13 @@ class LDoubleToSmi: public LTemplateInstruction<1, 1, 2> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -2045,22 +2098,19 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LTaggedToI(LOperand* value,
LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
+ LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp;
temps_[1] = temp2;
- temps_[2] = temp3;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -2069,7 +2119,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
};
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2081,7 +2131,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -2094,7 +2144,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2111,7 +2161,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -2126,16 +2176,16 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> transition() const { return hydrogen()->transition(); }
+ Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
return hydrogen()->field_representation();
}
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamedGeneric(LOperand* object, LOperand* value) {
inputs_[0] = object;
@@ -2148,14 +2198,14 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2174,13 +2224,13 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
inputs_[0] = obj;
@@ -2195,13 +2245,13 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LTransitionElementsKind: public LTemplateInstruction<0, 1, 1> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp) {
@@ -2216,7 +2266,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 1> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
@@ -2225,7 +2275,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 1> {
};
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2241,7 +2291,7 @@ class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2257,7 +2307,7 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> {
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -2272,7 +2322,7 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringCharFromCode(LOperand* char_code) {
inputs_[0] = char_code;
@@ -2285,20 +2335,20 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2311,7 +2361,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2324,27 +2374,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
- public:
- LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- ZoneList<Handle<JSObject> >* prototypes() const {
- return hydrogen()->prototypes();
- }
- ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2356,7 +2386,7 @@ class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2369,7 +2399,7 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampDToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2383,7 +2413,7 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2395,7 +2425,7 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped, LOperand* temp) {
inputs_[0] = unclamped;
@@ -2409,7 +2439,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocate: public LTemplateInstruction<1, 2, 2> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
inputs_[1] = size;
@@ -2426,21 +2456,21 @@ class LAllocate: public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2453,7 +2483,7 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
@@ -2465,7 +2495,7 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2478,11 +2508,11 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2495,16 +2525,18 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2516,7 +2548,7 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
inputs_[0] = object;
@@ -2528,7 +2560,7 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2544,7 +2576,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2558,7 +2590,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2573,7 +2605,7 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
@@ -2583,7 +2615,7 @@ class LPlatformChunk: public LChunk {
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
@@ -2642,7 +2674,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* reason);
+ void Abort(BailoutReason reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2724,7 +2756,8 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);
diff --git a/chromium/v8/src/mips/macro-assembler-mips.cc b/chromium/v8/src/mips/macro-assembler-mips.cc
index ea08a552be5..a85b0d80344 100644
--- a/chromium/v8/src/mips/macro-assembler-mips.cc
+++ b/chromium/v8/src/mips/macro-assembler-mips.cc
@@ -256,7 +256,7 @@ void MacroAssembler::RecordWrite(Register object,
if (emit_debug_code()) {
lw(at, MemOperand(address));
Assert(
- eq, "Wrong address or value passed to RecordWrite", at, Operand(value));
+ eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
}
Label done;
@@ -358,7 +358,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
- Check(ne, "we should not have an empty lexical context",
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
scratch, Operand(zero_reg));
#endif
@@ -374,7 +374,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Read the first word and compare to the native_context_map.
lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
LoadRoot(at, Heap::kNativeContextMapRootIndex);
- Check(eq, "JSGlobalObject::native_context should be a native context.",
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
holder_reg, Operand(at));
pop(holder_reg); // Restore holder.
}
@@ -388,12 +388,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
push(holder_reg); // Temporarily save holder on the stack.
mov(holder_reg, at); // Move at to its holding place.
LoadRoot(at, Heap::kNullValueRootIndex);
- Check(ne, "JSGlobalProxy::context() should not be null.",
+ Check(ne, kJSGlobalProxyContextShouldNotBeNull,
holder_reg, Operand(at));
lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
LoadRoot(at, Heap::kNativeContextMapRootIndex);
- Check(eq, "JSGlobalObject::native_context should be a native context.",
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
holder_reg, Operand(at));
// Restore at is not needed. at is reloaded below.
pop(holder_reg); // Restore holder.
@@ -1298,60 +1298,6 @@ void MacroAssembler::Clz(Register rd, Register rs) {
}
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
-// 32bits signed integer range.
-// This method implementation differs from the ARM version for performance
-// reasons.
-void MacroAssembler::ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- FPURegister double_scratch,
- Label *not_int32) {
- Label right_exponent, done;
- // Get exponent word (ENDIAN issues).
- lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- mov(dest, zero_reg);
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- // If we have a match of the int32-but-not-Smi exponent then skip some logic.
- Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
- // If the exponent is higher than that then go to not_int32 case. This
- // catches numbers that don't fit in a signed int32, infinities and NaNs.
- Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- Subu(scratch2, scratch2, Operand(zero_exponent));
- // Dest already has a Smi zero.
- Branch(&done, lt, scratch2, Operand(zero_reg));
- bind(&right_exponent);
-
- // MIPS FPU instructions implementing double precision to integer
- // conversion using round to zero. Since the FP value was qualified
- // above, the resulting integer should be a legal int32.
- // The original 'Exponent' word is still in scratch.
- lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
- trunc_w_d(double_scratch, double_scratch);
- mfc1(dest, double_scratch);
-
- bind(&done);
-}
-
-
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
Register result,
DoubleRegister double_input,
@@ -1416,104 +1362,12 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
}
-void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch) {
- Label done, normal_exponent, restore_sign;
- // Extract the biased exponent in result.
- Ext(result,
- input_high,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Check for Infinity and NaNs, which should return 0.
- Subu(scratch, result, HeapNumber::kExponentMask);
- Movz(result, zero_reg, scratch);
- Branch(&done, eq, scratch, Operand(zero_reg));
-
- // Express exponent as delta to (number of mantissa bits + 31).
- Subu(result,
- result,
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
-
- // If the delta is strictly positive, all bits would be shifted away,
- // which means that we can return 0.
- Branch(&normal_exponent, le, result, Operand(zero_reg));
- mov(result, zero_reg);
- Branch(&done);
-
- bind(&normal_exponent);
- const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
- // Calculate shift.
- Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
-
- // Save the sign.
- Register sign = result;
- result = no_reg;
- And(sign, input_high, Operand(HeapNumber::kSignMask));
-
- // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
- // to check for this specific case.
- Label high_shift_needed, high_shift_done;
- Branch(&high_shift_needed, lt, scratch, Operand(32));
- mov(input_high, zero_reg);
- Branch(&high_shift_done);
- bind(&high_shift_needed);
-
- // Set the implicit 1 before the mantissa part in input_high.
- Or(input_high,
- input_high,
- Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- // Shift the mantissa bits to the correct position.
- // We don't need to clear non-mantissa bits as they will be shifted away.
- // If they weren't, it would mean that the answer is in the 32bit range.
- sllv(input_high, input_high, scratch);
-
- bind(&high_shift_done);
-
- // Replace the shifted bits with bits from the lower mantissa word.
- Label pos_shift, shift_done;
- li(at, 32);
- subu(scratch, at, scratch);
- Branch(&pos_shift, ge, scratch, Operand(zero_reg));
-
- // Negate scratch.
- Subu(scratch, zero_reg, scratch);
- sllv(input_low, input_low, scratch);
- Branch(&shift_done);
-
- bind(&pos_shift);
- srlv(input_low, input_low, scratch);
-
- bind(&shift_done);
- Or(input_high, input_high, Operand(input_low));
- // Restore sign if necessary.
- mov(scratch, sign);
- result = sign;
- sign = no_reg;
- Subu(result, zero_reg, input_high);
- Movz(result, input_high, scratch);
- bind(&done);
-}
-
-
-void MacroAssembler::EmitECMATruncate(Register result,
- FPURegister double_input,
- FPURegister single_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3) {
- ASSERT(!scratch2.is(result));
- ASSERT(!scratch3.is(result));
- ASSERT(!scratch3.is(scratch2));
- ASSERT(!scratch.is(result) &&
- !scratch.is(scratch2) &&
- !scratch.is(scratch3));
- ASSERT(!single_scratch.is(double_input));
-
- Label done;
- Label manual;
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ DoubleRegister single_scratch = kLithiumScratchDouble.low();
+ Register scratch = at;
+ Register scratch2 = t9;
// Clear cumulative exception flags and save the FCSR.
cfc1(scratch2, FCSR);
@@ -1529,16 +1383,66 @@ void MacroAssembler::EmitECMATruncate(Register result,
scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
// If we had no exceptions we are done.
- Branch(&done, eq, scratch, Operand(zero_reg));
-
- // Load the double value and perform a manual truncation.
- Register input_high = scratch2;
- Register input_low = scratch3;
- Move(input_low, input_high, double_input);
- EmitOutOfInt32RangeTruncate(result,
- input_high,
- input_low,
- scratch);
+ Branch(done, eq, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DoubleRegister double_input) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(ra);
+ Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
+ sdc1(double_input, MemOperand(sp, 0));
+
+ DoubleToIStub stub(sp, result, 0, true, true);
+ CallStub(&stub);
+
+ Addu(sp, sp, Operand(kDoubleSize));
+ pop(ra);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
+ Label done;
+ DoubleRegister double_scratch = f12;
+ ASSERT(!result.is(object));
+
+ ldc1(double_scratch,
+ MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
+ TryInlineTruncateDoubleToI(result, double_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(ra);
+ DoubleToIStub stub(object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true,
+ true);
+ CallStub(&stub);
+ pop(ra);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number) {
+ Label done;
+ ASSERT(!result.is(object));
+
+ UntagAndJumpIfSmi(result, object, &done);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
+ TruncateHeapNumberToI(result, object);
+
bind(&done);
}
@@ -2923,9 +2827,7 @@ void MacroAssembler::Allocate(int object_size,
// Set up allocation top address and object size registers.
Register topaddr = scratch1;
- Register obj_size_reg = scratch2;
li(topaddr, Operand(allocation_top));
- li(obj_size_reg, Operand(object_size));
// This code stores a temporary value in t9.
if ((flags & RESULT_CONTAINS_TOP) == 0) {
@@ -2938,15 +2840,32 @@ void MacroAssembler::Allocate(int object_size,
// immediately below so this use of t9 does not cause difference with
// respect to register content between debug and release mode.
lw(t9, MemOperand(topaddr));
- Check(eq, "Unexpected allocation top", result, Operand(t9));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
}
// Load allocation limit into t9. Result already contains allocation top.
lw(t9, MemOperand(topaddr, limit - top));
}
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ And(scratch2, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ }
+ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(scratch2, MemOperand(result));
+ Addu(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- Addu(scratch2, result, Operand(obj_size_reg));
+ Addu(scratch2, result, Operand(object_size));
Branch(gc_required, Ugreater, scratch2, Operand(t9));
sw(scratch2, MemOperand(topaddr));
@@ -3008,12 +2927,29 @@ void MacroAssembler::Allocate(Register object_size,
// immediately below so this use of t9 does not cause difference with
// respect to register content between debug and release mode.
lw(t9, MemOperand(topaddr));
- Check(eq, "Unexpected allocation top", result, Operand(t9));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
}
// Load allocation limit into t9. Result already contains allocation top.
lw(t9, MemOperand(topaddr, limit - top));
}
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ And(scratch2, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ }
+ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(scratch2, MemOperand(result));
+ Addu(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
@@ -3028,7 +2964,7 @@ void MacroAssembler::Allocate(Register object_size,
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
And(t9, scratch2, Operand(kObjectAlignmentMask));
- Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
+ Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
}
sw(scratch2, MemOperand(topaddr));
@@ -3050,7 +2986,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object,
// Check that the object un-allocated is below the current top.
li(scratch, Operand(new_space_allocation_top));
lw(scratch, MemOperand(scratch));
- Check(less, "Undo allocation of non allocated memory",
+ Check(less, kUndoAllocationOfNonAllocatedMemory,
object, Operand(scratch));
#endif
// Write the address of the object to un-allocate as the current top.
@@ -3234,7 +3170,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
if (tagging_mode == TAG_RESULT) {
sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
} else {
@@ -3303,7 +3239,7 @@ void MacroAssembler::CopyBytes(Register src,
bind(&word_loop);
if (emit_debug_code()) {
And(scratch, src, kPointerSize - 1);
- Assert(eq, "Expecting alignment for CopyBytes",
+ Assert(eq, kExpectingAlignmentForCopyBytes,
scratch, Operand(zero_reg));
}
Branch(&byte_loop, lt, length, Operand(kPointerSize));
@@ -3396,7 +3332,6 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- Register scratch4,
Label* fail,
int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
@@ -3453,25 +3388,11 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
- FloatingPointHelper::Destination destination;
- destination = FloatingPointHelper::kFPURegisters;
-
Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- f0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- f2);
- if (destination == FloatingPointHelper::kFPURegisters) {
- sdc1(f0, MemOperand(scratch1, 0));
- } else {
- sw(mantissa_reg, MemOperand(scratch1, 0));
- sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
+ mtc1(untagged_value, f2);
+ cvt_d_w(f0, f2);
+ sdc1(f0, MemOperand(scratch1, 0));
bind(&done);
}
@@ -3931,7 +3852,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset_from_fp) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
@@ -3960,14 +3880,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
PopSafepointRegisters();
}
- // The O32 ABI requires us to pass a pointer in a0 where the returned struct
- // (4 bytes) will be placed. This is also built into the Simulator.
- // Set up the pointer to the returned value (a0). It was allocated in
- // EnterExitFrame.
- if (returns_handle) {
- addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
- }
-
Label profiler_disabled;
Label end_profiler_check;
bool* is_profiling_flag =
@@ -4007,19 +3919,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
Label leave_exit_frame;
Label return_value_loaded;
- if (returns_handle) {
- Label load_return_value;
-
- // As mentioned above, on MIPS a pointer is returned - we need to
- // dereference it to get the actual return value (which is also a pointer).
- lw(v0, MemOperand(v0));
-
- Branch(&load_return_value, eq, v0, Operand(zero_reg));
- // Dereference returned value.
- lw(v0, MemOperand(v0));
- Branch(&return_value_loaded);
- bind(&load_return_value);
- }
// Load value from ReturnValue.
lw(v0, MemOperand(fp, return_value_offset_from_fp*kPointerSize));
bind(&return_value_loaded);
@@ -4029,7 +3928,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
sw(s0, MemOperand(s3, kNextOffset));
if (emit_debug_code()) {
lw(a1, MemOperand(s3, kLevelOffset));
- Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
}
Subu(s2, s2, Operand(1));
sw(s2, MemOperand(s3, kLevelOffset));
@@ -4383,19 +4282,10 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void MacroAssembler::Assert(Condition cc, const char* msg,
+void MacroAssembler::Assert(Condition cc, BailoutReason reason,
Register rs, Operand rt) {
if (emit_debug_code())
- Check(cc, msg, rs, rt);
-}
-
-
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index) {
- if (emit_debug_code()) {
- LoadRoot(at, index);
- Check(eq, "Register did not match expected root", reg, Operand(at));
- }
+ Check(cc, reason, rs, rt);
}
@@ -4411,24 +4301,24 @@ void MacroAssembler::AssertFastElements(Register elements) {
Branch(&ok, eq, elements, Operand(at));
LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
Branch(&ok, eq, elements, Operand(at));
- Abort("JSObject with fast elements map has slow elements");
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
pop(elements);
}
}
-void MacroAssembler::Check(Condition cc, const char* msg,
+void MacroAssembler::Check(Condition cc, BailoutReason reason,
Register rs, Operand rt) {
Label L;
Branch(&L, cc, rs, rt);
- Abort(msg);
+ Abort(reason);
// Will not return here.
bind(&L);
}
-void MacroAssembler::Abort(const char* msg) {
+void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
// We want to pass the msg string like a smi to avoid GC
@@ -4436,6 +4326,7 @@ void MacroAssembler::Abort(const char* msg) {
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
+ const char* msg = GetBailoutReason(reason);
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
@@ -4444,6 +4335,11 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ stop(msg);
+ return;
+ }
#endif
li(a0, Operand(p0));
@@ -4579,12 +4475,122 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
Branch(&ok);
bind(&fail);
- Abort("Global functions must have initial map");
+ Abort(kGlobalFunctionsMustHaveInitialMap);
bind(&ok);
}
}
+void MacroAssembler::LoadNumber(Register object,
+ FPURegister dst,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number) {
+ Label is_smi, done;
+
+ UntagAndJumpIfSmi(scratch, object, &is_smi);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
+
+ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+ Branch(&done);
+
+ bind(&is_smi);
+ mtc1(scratch, dst);
+ cvt_d_w(dst, dst);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadNumberAsInt32Double(Register object,
+ DoubleRegister double_dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label* not_int32) {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!heap_number_map.is(object) &&
+ !heap_number_map.is(scratch1) &&
+ !heap_number_map.is(scratch2));
+
+ Label done, obj_is_not_smi;
+
+ UntagAndJumpIfNotSmi(scratch1, object, &obj_is_not_smi);
+ mtc1(scratch1, double_scratch);
+ cvt_d_w(double_dst, double_scratch);
+ Branch(&done);
+
+ bind(&obj_is_not_smi);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Load the number.
+ // Load the double value.
+ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ EmitFPUTruncate(kRoundToZero,
+ scratch1,
+ double_dst,
+ at,
+ double_scratch,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ bind(&done);
+}
+
+
+void MacroAssembler::LoadNumberAsInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch0,
+ FPURegister double_scratch1,
+ Label* not_int32) {
+ ASSERT(!dst.is(object));
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+
+ Label done, maybe_undefined;
+
+ UntagAndJumpIfSmi(dst, object, &done);
+
+ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
+
+ // Object is a heap number.
+ // Convert the floating point value to a 32-bit integer.
+ // Load the double value.
+ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ EmitFPUTruncate(kRoundToZero,
+ dst,
+ double_scratch0,
+ scratch1,
+ double_scratch1,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ Branch(&done);
+
+ bind(&maybe_undefined);
+ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ Branch(not_int32, ne, object, Operand(at));
+ // |undefined| is truncated to 0.
+ li(dst, Operand(Smi::FromInt(0)));
+ // Fall through.
+
+ bind(&done);
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
li(t8, Operand(Smi::FromInt(type)));
@@ -4862,7 +4868,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
andi(at, object, kSmiTagMask);
- Check(ne, "Operand is a smi", at, Operand(zero_reg));
+ Check(ne, kOperandIsASmi, at, Operand(zero_reg));
}
}
@@ -4871,7 +4877,7 @@ void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
andi(at, object, kSmiTagMask);
- Check(eq, "Operand is a smi", at, Operand(zero_reg));
+ Check(eq, kOperandIsASmi, at, Operand(zero_reg));
}
}
@@ -4880,11 +4886,11 @@ void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
And(t0, object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi and not a string", t0, Operand(zero_reg));
+ Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
push(object);
lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Check(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
+ Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
pop(object);
}
}
@@ -4894,23 +4900,21 @@ void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
And(t0, object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi and not a name", t0, Operand(zero_reg));
+ Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
push(object);
lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Check(le, "Operand is not a name", object, Operand(LAST_NAME_TYPE));
+ Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
pop(object);
}
}
-void MacroAssembler::AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
- ASSERT(!src.is(at));
- LoadRoot(at, root_value_index);
- Check(eq, message, src, Operand(at));
+ ASSERT(!reg.is(at));
+ LoadRoot(at, index);
+ Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
}
}
@@ -4920,7 +4924,7 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register scratch,
Label* on_not_heap_number) {
lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
}
@@ -5127,7 +5131,7 @@ void MacroAssembler::PatchRelocatedValue(Register li_location,
// At this point scratch is a lui(at, ...) instruction.
if (emit_debug_code()) {
And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction to patch should be a lui.",
+ Check(eq, kTheInstructionToPatchShouldBeALui,
scratch, Operand(LUI));
lw(scratch, MemOperand(li_location));
}
@@ -5139,7 +5143,7 @@ void MacroAssembler::PatchRelocatedValue(Register li_location,
// scratch is now ori(at, ...).
if (emit_debug_code()) {
And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction to patch should be an ori.",
+ Check(eq, kTheInstructionToPatchShouldBeAnOri,
scratch, Operand(ORI));
lw(scratch, MemOperand(li_location, kInstrSize));
}
@@ -5156,7 +5160,7 @@ void MacroAssembler::GetRelocatedValue(Register li_location,
lw(value, MemOperand(li_location));
if (emit_debug_code()) {
And(value, value, kOpcodeMask);
- Check(eq, "The instruction should be a lui.",
+ Check(eq, kTheInstructionShouldBeALui,
value, Operand(LUI));
lw(value, MemOperand(li_location));
}
@@ -5167,7 +5171,7 @@ void MacroAssembler::GetRelocatedValue(Register li_location,
lw(scratch, MemOperand(li_location, kInstrSize));
if (emit_debug_code()) {
And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction should be an ori.",
+ Check(eq, kTheInstructionShouldBeAnOri,
scratch, Operand(ORI));
lw(scratch, MemOperand(li_location, kInstrSize));
}
@@ -5508,6 +5512,30 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
}
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
diff --git a/chromium/v8/src/mips/macro-assembler-mips.h b/chromium/v8/src/mips/macro-assembler-mips.h
index bc3e7c48b4a..75ded884909 100644
--- a/chromium/v8/src/mips/macro-assembler-mips.h
+++ b/chromium/v8/src/mips/macro-assembler-mips.h
@@ -51,20 +51,6 @@ class JumpTarget;
// MIPS generated code calls C code, it must be via t9 register.
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1,
- // Specify that the requested size of the space to allocate is specified in
- // words instead of bytes.
- SIZE_IN_WORDS = 1 << 2
-};
-
// Flags used for AllocateHeapNumber
enum TaggingMode {
// Tag the result.
@@ -104,6 +90,13 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
@@ -249,6 +242,14 @@ class MacroAssembler: public Assembler {
mfc1(dst_high, FPURegister::from_code(src.code() + 1));
}
+ inline void FmoveHigh(Register dst_high, FPURegister src) {
+ mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+ }
+
+ inline void FmoveLow(Register dst_low, FPURegister src) {
+ mfc1(dst_low, src);
+ }
+
inline void Move(FPURegister dst, Register src_low, Register src_high) {
mtc1(src_low, dst);
mtc1(src_high, FPURegister::from_code(dst.code() + 1));
@@ -627,11 +628,11 @@ class MacroAssembler: public Assembler {
void MultiPushFPU(RegList regs);
void MultiPushReversedFPU(RegList regs);
- // Lower case push() for compatibility with arch-independent code.
void push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
+ void Push(Register src) { push(src); }
// Push a handle.
void Push(Handle<Object> handle);
@@ -676,11 +677,11 @@ class MacroAssembler: public Assembler {
void MultiPopFPU(RegList regs);
void MultiPopReversedFPU(RegList regs);
- // Lower case pop() for compatibility with arch-independent code.
void pop(Register dst) {
lw(dst, MemOperand(sp, 0));
Addu(sp, sp, Operand(kPointerSize));
}
+ void Pop(Register dst) { pop(dst); }
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
@@ -757,17 +758,6 @@ class MacroAssembler: public Assembler {
BranchF(target, nan, cc, cmp1, cmp2, bd);
};
- // Convert the HeapNumber pointed to by source to a 32bits signed integer
- // dest. If the HeapNumber does not fit into a 32bits signed integer branch
- // to not_int32 label. If FPU is available double_scratch is used but not
- // scratch2.
- void ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- FPURegister double_scratch,
- Label *not_int32);
-
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// The except_flag will contain any exceptions caused by the instruction.
@@ -782,26 +772,71 @@ class MacroAssembler: public Assembler {
CheckForInexactConversion check_inexact
= kDontCheckForInexactConversion);
- // Helper for EmitECMATruncate.
- // This will truncate a floating-point value outside of the singed 32bit
- // integer range to a 32bit signed integer.
- // Expects the double value loaded in input_high and input_low.
- // Exits with the answer in 'result'.
- // Note that this code does not work for values in the 32bit range!
- void EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch);
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ //
+ // Only public for the test code in test-code-stubs-arm.cc.
+ void TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister input,
+ Label* done);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer and all other registers clobbered.
- void EmitECMATruncate(Register result,
- FPURegister double_input,
- FPURegister single_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3);
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+ // Performs a truncating conversion of a heap number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+ // must be different registers. Exits with 'result' holding the answer.
+ void TruncateHeapNumberToI(Register result, Register object);
+
+ // Converts the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+ // different registers.
+ void TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst register.
+ // If |object| is neither smi nor heap number, |not_number| is jumped to
+ // with |object| still intact.
+ void LoadNumber(Register object,
+ FPURegister dst,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number);
+
+ // Loads the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ void LoadNumberAsInt32Double(Register object,
+ DoubleRegister double_dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ void LoadNumberAsInt32(Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch0,
+ FPURegister double_scratch1,
+ Label* not_int32);
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
@@ -992,16 +1027,13 @@ class MacroAssembler: public Assembler {
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail, in which
- // case scratch2, scratch3 and scratch4 are unmodified.
+ // the FastDoubleElements array elements. Otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- // All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- Register scratch4,
Label* fail,
int elements_offset = 0);
@@ -1239,7 +1271,6 @@ class MacroAssembler: public Assembler {
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset_from_fp);
// Jump to the builtin routine.
@@ -1286,15 +1317,14 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, const char* msg, Register rs, Operand rt);
- void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
+ void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg, Register rs, Operand rt);
+ void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
// Print a message to stdout and abort execution.
- void Abort(const char* msg);
+ void Abort(BailoutReason msg);
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
@@ -1374,11 +1404,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
- // Abort execution if argument is not the root value with the given index,
+ // Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
- void AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
+ void AssertIsRoot(Register reg, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// HeapNumber utilities.
diff --git a/chromium/v8/src/mips/regexp-macro-assembler-mips.cc b/chromium/v8/src/mips/regexp-macro-assembler-mips.cc
index 7b67a7b47f9..1a04fd10292 100644
--- a/chromium/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/chromium/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -882,7 +882,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
masm_->GetCode(&code_desc);
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
- LOG(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+ LOG(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}
@@ -1086,7 +1086,6 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
diff --git a/chromium/v8/src/mips/simulator-mips.cc b/chromium/v8/src/mips/simulator-mips.cc
index 914a7586623..ea8b65948af 100644
--- a/chromium/v8/src/mips/simulator-mips.cc
+++ b/chromium/v8/src/mips/simulator-mips.cc
@@ -1387,27 +1387,12 @@ typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-// NOTE: the O32 abi requires a0 to hold a special pointer when returning a
-// struct from the function (which is currently the case). This means we pass
-// the first argument in a1 instead of a0.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-// Here, we pass the first argument in a0, because this function
-// does not return a struct.
-typedef void (*SimulatorRuntimeDirectApiCallNew)(int32_t arg0);
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingApiCall)(
- int32_t arg0, int32_t arg1);
-typedef void (*SimulatorRuntimeProfilingApiCallNew)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, int32_t arg1);
// This signature supports direct call to accessor getter callback.
-// See comment at SimulatorRuntimeDirectApiCall.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
- int32_t arg1);
-// See comment at SimulatorRuntimeDirectApiCallNew.
-typedef void (*SimulatorRuntimeDirectGetterCallNew)(int32_t arg0,
- int32_t arg1);
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeProfilingGetterCall)(
- int32_t arg0, int32_t arg1, int32_t arg2);
-typedef void (*SimulatorRuntimeProfilingGetterCallNew)(
+typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(
int32_t arg0, int32_t arg1, int32_t arg2);
// Software interrupt instructions are used by the simulator to call into the
@@ -1553,102 +1538,41 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
}
}
- } else if (
- redirection->type() == ExternalReference::DIRECT_API_CALL ||
- redirection->type() == ExternalReference::DIRECT_API_CALL_NEW) {
- if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- // See comment at type definition of SimulatorRuntimeDirectApiCall
- // for explanation of register usage.
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x\n",
- reinterpret_cast<void*>(external), arg1);
- }
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
- v8::Handle<v8::Value> result = target(arg1);
- *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
- set_register(v0, arg0);
- } else {
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x\n",
- reinterpret_cast<void*>(external), arg0);
- }
- SimulatorRuntimeDirectApiCallNew target =
- reinterpret_cast<SimulatorRuntimeDirectApiCallNew>(external);
- target(arg0);
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x\n",
+ reinterpret_cast<void*>(external), arg0);
}
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg0);
} else if (
- redirection->type() == ExternalReference::PROFILING_API_CALL ||
- redirection->type() == ExternalReference::PROFILING_API_CALL_NEW) {
- if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
- // See comment at type definition of SimulatorRuntimeDirectApiCall
- // for explanation of register usage.
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x\n",
- reinterpret_cast<void*>(external), arg1, arg2);
- }
- SimulatorRuntimeProfilingApiCall target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
- v8::Handle<v8::Value> result = target(arg1, arg2);
- *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
- set_register(v0, arg0);
- } else {
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x\n",
- reinterpret_cast<void*>(external), arg0, arg1);
- }
- SimulatorRuntimeProfilingApiCallNew target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCallNew>(external);
- target(arg0, arg1);
+ redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x\n",
+ reinterpret_cast<void*>(external), arg0, arg1);
}
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg0, arg1);
} else if (
- redirection->type() == ExternalReference::DIRECT_GETTER_CALL ||
- redirection->type() == ExternalReference::DIRECT_GETTER_CALL_NEW) {
- if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- // See comment at type definition of SimulatorRuntimeDirectGetterCall
- // for explanation of register usage.
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x\n",
- reinterpret_cast<void*>(external), arg1, arg2);
- }
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
- v8::Handle<v8::Value> result = target(arg1, arg2);
- *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
- set_register(v0, arg0);
- } else {
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x\n",
- reinterpret_cast<void*>(external), arg0, arg1);
- }
- SimulatorRuntimeDirectGetterCallNew target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCallNew>(external);
- target(arg0, arg1);
+ redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x\n",
+ reinterpret_cast<void*>(external), arg0, arg1);
}
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ target(arg0, arg1);
} else if (
- redirection->type() == ExternalReference::PROFILING_GETTER_CALL ||
- redirection->type() == ExternalReference::PROFILING_GETTER_CALL_NEW) {
- if (redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
- // See comment at type definition of SimulatorRuntimeProfilingGetterCall
- // for explanation of register usage.
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x %08x\n",
- reinterpret_cast<void*>(external), arg1, arg2, arg3);
- }
- SimulatorRuntimeProfilingGetterCall target =
- reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
- v8::Handle<v8::Value> result = target(arg1, arg2, arg3);
- *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
- set_register(v0, arg0);
- } else {
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08x %08x %08x\n",
- reinterpret_cast<void*>(external), arg0, arg1, arg2);
- }
- SimulatorRuntimeProfilingGetterCallNew target =
- reinterpret_cast<SimulatorRuntimeProfilingGetterCallNew>(external);
- target(arg0, arg1, arg2);
+ redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x %08x\n",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
}
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+ target(arg0, arg1, arg2);
} else {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
diff --git a/chromium/v8/src/mips/stub-cache-mips.cc b/chromium/v8/src/mips/stub-cache-mips.cc
index c4b1ee57a72..6d68bbd7d0d 100644
--- a/chromium/v8/src/mips/stub-cache-mips.cc
+++ b/chromium/v8/src/mips/stub-cache-mips.cc
@@ -117,18 +117,14 @@ static void ProbeTable(Isolate* isolate,
}
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be unique and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<Name> name,
- Register scratch0,
- Register scratch1) {
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
ASSERT(name->IsUniqueName());
+ ASSERT(!receiver.is(scratch0));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -412,15 +408,12 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<Cell> cell = GlobalObject::EnsurePropertyCell(global, name);
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
__ li(scratch, Operand(cell));
__ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
@@ -437,7 +430,7 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
Label* miss) {
if (holder->IsJSGlobalObject()) {
GenerateCheckPropertyCell(
- masm, Handle<GlobalObject>::cast(holder), name, scratch1(), miss);
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
} else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
GenerateDictionaryNegativeLookup(
masm, miss, holder_reg, name, scratch1(), scratch2());
@@ -777,16 +770,17 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
Register scratch = name;
__ li(scratch, Operand(interceptor));
__ Push(scratch, receiver, holder);
- __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(scratch);
- __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ push(scratch);
}
@@ -801,7 +795,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
- __ PrepareCEntryArgs(6);
+ __ PrepareCEntryArgs(StubCache::kInterceptorArgsLength);
__ PrepareCEntryFunction(ref);
CEntryStub stub(1);
@@ -879,51 +873,31 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
- // struct from the function (which is currently the case). This means we pass
- // the first argument in a1 instead of a0, if returns_handle is true.
- // CallApiFunctionAndReturn will set up a0.
-
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
-
- Register first_arg = returns_handle ? a1 : a0;
- Register second_arg = returns_handle ? a2 : a1;
-
- // first_arg = v8::Arguments&
+ // a0 = v8::Arguments&
// Arguments is built at sp + 1 (sp is a reserved spot for ra).
- __ Addu(first_arg, sp, kPointerSize);
+ __ Addu(a0, sp, kPointerSize);
// v8::Arguments::implicit_args_
- __ sw(a2, MemOperand(first_arg, 0 * kPointerSize));
+ __ sw(a2, MemOperand(a0, 0 * kPointerSize));
// v8::Arguments::values_
__ Addu(t0, a2, Operand(argc * kPointerSize));
- __ sw(t0, MemOperand(first_arg, 1 * kPointerSize));
+ __ sw(t0, MemOperand(a0, 1 * kPointerSize));
// v8::Arguments::length_ = argc
__ li(t0, Operand(argc));
- __ sw(t0, MemOperand(first_arg, 2 * kPointerSize));
+ __ sw(t0, MemOperand(a0, 2 * kPointerSize));
// v8::Arguments::is_construct_call = 0
- __ sw(zero_reg, MemOperand(first_arg, 3 * kPointerSize));
+ __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
- ExternalReference::Type type =
- returns_handle ?
- ExternalReference::DIRECT_API_CALL :
- ExternalReference::DIRECT_API_CALL_NEW;
+ ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
ExternalReference ref =
ExternalReference(&fun,
type,
masm->isolate());
-
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeInvocationCallback)
- : FUNCTION_ADDR(&InvokeFunctionCallback);
- ExternalReference::Type thunk_type =
- returns_handle ?
- ExternalReference::PROFILING_API_CALL :
- ExternalReference::PROFILING_API_CALL_NEW;
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
ApiFunction thunk_fun(thunk_address);
ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
masm->isolate());
@@ -932,12 +906,41 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
__ CallApiFunctionAndReturn(ref,
function_address,
thunk_ref,
- second_arg,
+ a1,
kStackUnwindSpace,
- returns_handle,
kFastApiCallArguments + 1);
}
+
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ const int stack_space = kFastApiCallArguments + argc + 1;
+ // Assign stack space for the call arguments.
+ __ Subu(sp, sp, Operand(stack_space * kPointerSize));
+ // Write holder to stack frame.
+ __ sw(receiver, MemOperand(sp, 0));
+ // Write receiver to stack frame.
+ int index = stack_space - 1;
+ __ sw(receiver, MemOperand(sp, index * kPointerSize));
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ sw(receiver, MemOperand(sp, index-- * kPointerSize));
+ }
+
+ GenerateFastApiDirectCall(masm, optimization, argc);
+}
+
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -1098,7 +1101,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
// Leave the internal frame.
@@ -1134,19 +1137,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
+void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
Handle<JSObject> current = object;
while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
+ if (current->IsJSGlobalObject()) {
GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
+ Handle<JSGlobalObject>::cast(current),
name,
scratch,
miss);
@@ -1156,22 +1157,6 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
}
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If FPU is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register scratch1) {
- __ mtc1(ival, f0);
- __ cvt_s_w(f0, f0);
- __ sll(scratch1, wordoffset, 2);
- __ addu(scratch1, dst, scratch1);
- __ swc1(f0, MemOperand(scratch1, 0));
-}
-
-
void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1324,7 +1309,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> holder,
Handle<Name> name,
Label* success,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<Object> callback) {
Label miss;
Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1364,26 +1349,6 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
}
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Label* success,
- Handle<GlobalObject> global) {
- Label miss;
-
- HandlerFrontendHeader(object, receiver(), last, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (!global.is_null()) {
- GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
- }
-
- HandlerFrontendFooter(name, success, &miss);
-}
-
-
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
PropertyIndex field,
@@ -1411,10 +1376,26 @@ void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void BaseLoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ ASSERT(!scratch2().is(reg));
+ ASSERT(!scratch3().is(reg));
+ ASSERT(!scratch4().is(reg));
__ push(receiver());
__ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
@@ -1425,31 +1406,18 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ li(scratch3(), Handle<Object>(callback->data(), isolate()));
}
__ Subu(sp, sp, 6 * kPointerSize);
- __ sw(reg, MemOperand(sp, 5 * kPointerSize));
- __ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
+ __ sw(scratch3(), MemOperand(sp, 5 * kPointerSize));
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
+ __ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
__ sw(scratch3(), MemOperand(sp, 3 * kPointerSize));
- __ sw(scratch3(), MemOperand(sp, 2 * kPointerSize));
__ li(scratch4(),
Operand(ExternalReference::isolate_address(isolate())));
- __ sw(scratch4(), MemOperand(sp, 1 * kPointerSize));
+ __ sw(scratch4(), MemOperand(sp, 2 * kPointerSize));
+ __ sw(reg, MemOperand(sp, 1 * kPointerSize));
__ sw(name(), MemOperand(sp, 0 * kPointerSize));
- Address getter_address = v8::ToCData<Address>(callback->getter());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(isolate(), getter_address);
-
- Register first_arg = returns_handle ? a1 : a0;
- Register second_arg = returns_handle ? a2 : a1;
- Register third_arg = returns_handle ? a3 : a2;
-
__ mov(a2, scratch2()); // Saved in case scratch2 == a1.
- __ mov(first_arg, sp); // (first argument - see note below) = Handle<Name>
-
- // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
- // struct from the function (which is currently the case). This means we pass
- // the arguments in a1-a2 instead of a0-a1, if returns_handle is true.
- // CallApiFunctionAndReturn will set up a0.
+ __ mov(a0, sp); // (first argument - a0) = Handle<Name>
const int kApiStackSpace = 1;
FrameScope frame_scope(masm(), StackFrame::MANUAL);
@@ -1458,35 +1426,27 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object** args_) as the data.
__ sw(a2, MemOperand(sp, kPointerSize));
- // (second argument - see note above) = AccessorInfo&
- __ Addu(second_arg, sp, kPointerSize);
+ // (second argument - a1) = AccessorInfo&
+ __ Addu(a1, sp, kPointerSize);
const int kStackUnwindSpace = kFastApiCallArguments + 1;
-
+ Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
- ExternalReference::Type type =
- returns_handle ?
- ExternalReference::DIRECT_GETTER_CALL :
- ExternalReference::DIRECT_GETTER_CALL_NEW;
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeAccessorGetter)
- : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
ExternalReference::Type thunk_type =
- returns_handle ?
- ExternalReference::PROFILING_GETTER_CALL :
- ExternalReference::PROFILING_GETTER_CALL_NEW;
+ ExternalReference::PROFILING_GETTER_CALL;
ApiFunction thunk_fun(thunk_address);
ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
isolate());
__ CallApiFunctionAndReturn(ref,
getter_address,
thunk_ref,
- third_arg,
+ a2,
kStackUnwindSpace,
- returns_handle,
- 5);
+ 6);
}
@@ -1571,7 +1531,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
@@ -1819,25 +1779,25 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&call_builtin,
DONT_DO_SMI_CHECK);
- // Get the array's length into r0 and calculate new length.
- __ lw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ // Get the array's length into v0 and calculate new length.
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- __ Addu(a0, a0, Operand(Smi::FromInt(argc)));
+ __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
- __ Branch(&call_builtin, gt, a0, Operand(t0));
+ __ Branch(&call_builtin, gt, v0, Operand(t0));
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(
- t0, a0, elements, a3, t1, a2, t5,
+ t0, v0, elements, a3, t1, a2,
&call_builtin, argc * kDoubleSize);
// Save new length.
- __ sw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Check for a smi.
__ DropAndRet(argc + 1);
@@ -2854,6 +2814,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -2934,47 +2912,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetICCode(kind(), Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<PropertyCell> cell,
- Handle<Name> name) {
- Label miss;
-
- // Check that the map of the global has not changed.
- __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
- __ Branch(&miss, ne, scratch1(), Operand(Handle<Map>(object->map())));
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ li(scratch1(), Operand(cell));
- __ LoadRoot(scratch2(), Heap::kTheHoleValueRootIndex);
- __ lw(scratch3(), FieldMemOperand(scratch1(), Cell::kValueOffset));
- __ Branch(&miss, eq, scratch3(), Operand(scratch2()));
-
- // Store the value in the cell.
- __ sw(value(), FieldMemOperand(scratch1(), Cell::kValueOffset));
- __ mov(v0, a0); // Stored value must be returned in v0.
- // Cells are always rescanned, so no write barrier here.
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(
- counters->named_store_global_inline(), 1, scratch1(), scratch2());
- __ Ret();
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(
- counters->named_store_global_inline_miss(), 1, scratch1(), scratch2());
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), Code::INTERCEPTOR, name);
}
@@ -2982,7 +2920,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
- Handle<GlobalObject> global) {
+ Handle<JSGlobalObject> global) {
Label success;
NonexistentHandlerFrontend(object, last, name, &success, global);
@@ -3229,570 +3167,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch0,
- Register scratch1,
- FPURegister double_scratch0,
- FPURegister double_scratch1,
- Label* fail) {
- Label key_ok;
- // Check for smi or a smi inside a heap number. We convert the heap
- // number and check if the conversion is exact and fits into the smi
- // range.
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- scratch0,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
- __ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
- __ EmitFPUTruncate(kRoundToZero,
- scratch0,
- double_scratch0,
- at,
- double_scratch1,
- scratch1,
- kCheckForInexactConversion);
-
- __ Branch(fail, ne, scratch1, Operand(zero_reg));
-
- __ SmiTagCheckOverflow(key, scratch0, scratch1);
- __ BranchOnOverflow(fail, scratch1);
- __ bind(&key_ok);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
-
- Label slow, check_heap_number, miss_force_generic;
-
- // Register usage.
- Register value = a0;
- Register key = a1;
- Register receiver = a2;
- // a3 mostly holds the elements array or the destination external array.
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic);
-
- __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check that the index is in range.
- __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // a3: external array.
-
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Double to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(value, &slow);
- } else {
- __ JumpIfNotSmi(value, &check_heap_number);
- }
- __ SmiUntag(t1, value);
- __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
-
- // a3: base pointer of external storage.
- // t1: value (integer).
-
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- // Clamp the value to [0..255].
- // v0 is used as a scratch register here.
- Label done;
- __ li(v0, Operand(255));
- // Normal branch: nop in delay slot.
- __ Branch(&done, gt, t1, Operand(v0));
- // Use delay slot in this branch.
- __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
- __ mov(v0, zero_reg); // In delay slot.
- __ mov(v0, t1); // Value is in range 0..255.
- __ bind(&done);
- __ mov(t1, v0);
-
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t1, MemOperand(t8, 0));
- }
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t1, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t1, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t1, MemOperand(t8, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Perform int-to-float conversion and store to memory.
- __ SmiUntag(t0, key);
- StoreIntAsFloat(masm, a3, t0, t1, t2);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ sll(t8, key, 2);
- __ addu(a3, a3, t8);
- // a3: effective address of the double element
- FloatingPointHelper::Destination destination;
- destination = FloatingPointHelper::kFPURegisters;
- FloatingPointHelper::ConvertIntToDouble(
- masm, t1, destination,
- f0, t2, t3, // These are: double_dst, dst_mantissa, dst_exponent.
- t0, f2); // These are: scratch2, single_scratch.
- __ sdc1(f0, MemOperand(a3, 0));
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, a0 holds the value which is the return value.
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- // a3: external array.
- __ bind(&check_heap_number);
- __ GetObjectType(value, t1, t2);
- __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
-
- __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
-
- // a3: base pointer of external storage.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
-
-
- __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvt_s_d(f0, f0);
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ swc1(f0, MemOperand(t8, 0));
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, key, 2);
- __ addu(t8, a3, t8);
- __ sdc1(f0, MemOperand(t8, 0));
- } else {
- __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, a0 holds the value
- // which is the return value.
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- }
-
- // Slow case, key and receiver still in a0 and a1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, a2, a3);
- // Entry registers are intact.
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch
- // -- a4 : scratch (elements)
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = a0;
- Register key_reg = a1;
- Register receiver_reg = a2;
- Register scratch = t0;
- Register elements_reg = a3;
- Register length_reg = t1;
- Register scratch2 = t2;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
- }
-
- // Check that the key is within bounds.
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- if (is_js_array) {
- __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis.
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- __ Branch(&grow, hs, key_reg, Operand(scratch));
- } else {
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
- }
-
- // Make sure elements is a fast element array, not 'cow'.
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ bind(&finish_store);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ Addu(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(scratch, scratch, scratch2);
- __ sw(value_reg, MemOperand(scratch));
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ Addu(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(scratch, scratch, scratch2);
- __ sw(value_reg, MemOperand(scratch));
- __ mov(receiver_reg, value_reg);
- __ RecordWrite(elements_reg, // Object.
- scratch, // Address.
- receiver_reg, // Value.
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
- }
- // value_reg (a0) is preserved.
- // Done.
- __ Ret();
-
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime.
- __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch));
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ lw(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&check_capacity, ne, elements_reg, Operand(at));
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, elements_reg, scratch, scratch2, &slow, TAG_OBJECT);
-
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ li(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
- }
-
- // Store the element at index zero.
- __ sw(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
-
- // Install the new backing store in the JSArray.
- __ sw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ li(length_reg, Operand(Smi::FromInt(1)));
- __ Ret(USE_DELAY_SLOT);
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedCOWArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ Branch(&slow, hs, length_reg, Operand(scratch));
-
- // Grow the array and finish the store.
- __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -- a3 : scratch (elements backing store)
- // -- t0 : scratch (elements_reg)
- // -- t1 : scratch (mantissa_reg)
- // -- t2 : scratch (exponent_reg)
- // -- t3 : scratch4
- // -- t4 : scratch
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = a0;
- Register key_reg = a1;
- Register receiver_reg = a2;
- Register elements_reg = a3;
- Register scratch1 = t0;
- Register scratch2 = t1;
- Register scratch3 = t2;
- Register scratch4 = t3;
- Register scratch5 = t4;
- Register length_reg = t3;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
-
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ lw(scratch1,
- FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis, unsigned compare catches both negative and out-of-bound
- // indexes.
- if (IsGrowStoreMode(store_mode)) {
- __ Branch(&grow, hs, key_reg, Operand(scratch1));
- } else {
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
- }
-
- __ bind(&finish_store);
-
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- elements_reg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- &transition_elements_kind);
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value_reg); // In delay slot.
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime.
- __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch1));
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(value_reg, &value_is_smi);
- __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&transition_elements_kind, ne, scratch1, Operand(at));
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ lw(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&check_capacity, ne, elements_reg, Operand(at));
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT);
-
- // Initialize the new FixedDoubleArray.
- __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
- __ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ sw(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-
- __ mov(scratch1, elements_reg);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- scratch5,
- &transition_elements_kind);
-
- __ li(scratch1, Operand(kHoleNanLower32));
- __ li(scratch2, Operand(kHoleNanUpper32));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ sw(scratch1, FieldMemOperand(elements_reg, offset));
- __ sw(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
- }
-
- // Install the new backing store in the JSArray.
- __ sw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ li(length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ Ret(USE_DELAY_SLOT);
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- __ bind(&check_capacity);
- // Make sure that the backing store can hold additional elements.
- __ lw(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ Branch(&slow, hs, length_reg, Operand(scratch1));
-
- // Grow the array and finish the store.
- __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/mksnapshot.cc b/chromium/v8/src/mksnapshot.cc
index c1edcb1b3a1..9cf9e2e8a42 100644
--- a/chromium/v8/src/mksnapshot.cc
+++ b/chromium/v8/src/mksnapshot.cc
@@ -314,9 +314,6 @@ int main(int argc, char** argv) {
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
- // Disable the i18n extension, as it doesn't support being snapshotted yet.
- i::FLAG_enable_i18n = false;
-
// Print the usage if an error occurs when parsing the command line
// flags or if the help flag is set.
int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
@@ -333,8 +330,9 @@ int main(int argc, char** argv) {
exit(1);
}
#endif
- i::Serializer::Enable();
Isolate* isolate = Isolate::GetCurrent();
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Serializer::Enable(internal_isolate);
Persistent<Context> context;
{
HandleScope handle_scope(isolate);
@@ -391,21 +389,23 @@ int main(int argc, char** argv) {
// Make sure all builtin scripts are cached.
{ HandleScope scope(isolate);
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
- i::Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
+ internal_isolate->bootstrapper()->NativesSourceLookup(i);
}
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot");
+ internal_isolate->heap()->CollectAllGarbage(
+ i::Heap::kNoGCFlags, "mksnapshot");
i::Object* raw_context = *v8::Utils::OpenPersistent(context);
- context.Dispose(isolate);
+ context.Dispose();
CppByteSink sink(argv[1]);
// This results in a somewhat smaller snapshot, probably because it gets rid
// of some things that are cached between garbage collections.
- i::StartupSerializer ser(&sink);
+ i::StartupSerializer ser(internal_isolate, &sink);
ser.SerializeStrongReferences();
- i::PartialSerializer partial_ser(&ser, sink.partial_sink());
+ i::PartialSerializer partial_ser(
+ internal_isolate, &ser, sink.partial_sink());
partial_ser.Serialize(&raw_context);
ser.SerializeWeakReferences();
diff --git a/chromium/v8/src/natives.h b/chromium/v8/src/natives.h
index e3f69d1dae0..5f34420d0b2 100644
--- a/chromium/v8/src/natives.h
+++ b/chromium/v8/src/natives.h
@@ -36,7 +36,7 @@ typedef bool (*NativeSourceCallback)(Vector<const char> name,
int index);
enum NativeType {
- CORE, EXPERIMENTAL, D8, TEST, I18N
+ CORE, EXPERIMENTAL, D8, TEST
};
template <NativeType type>
@@ -61,7 +61,6 @@ class NativesCollection {
typedef NativesCollection<CORE> Natives;
typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
-typedef NativesCollection<I18N> I18NNatives;
} } // namespace v8::internal
diff --git a/chromium/v8/src/object-observe.js b/chromium/v8/src/object-observe.js
index a5c12bf0098..1035792e8b6 100644
--- a/chromium/v8/src/object-observe.js
+++ b/chromium/v8/src/object-observe.js
@@ -27,12 +27,41 @@
"use strict";
+// Overview:
+//
+// This file contains all of the routing and accounting for Object.observe.
+// User code will interact with these mechanisms via the Object.observe APIs
+// and, as a side effect of mutation objects which are observed. The V8 runtime
+// (both C++ and JS) will interact with these mechanisms primarily by enqueuing
+// proper change records for objects which were mutated. The Object.observe
+// routing and accounting consists primarily of three participants
+//
+// 1) ObjectInfo. This represents the observed state of a given object. It
+// records what callbacks are observing the object, with what options, and
+// what "change types" are in progress on the object (i.e. via
+// notifier.performChange).
+//
+// 2) CallbackInfo. This represents a callback used for observation. It holds
+// the records which must be delivered to the callback, as well as the global
+// priority of the callback (which determines delivery order between
+// callbacks).
+//
+// 3) observationState.pendingObservers. This is the set of observers which
+// have change records which must be delivered. During "normal" delivery
+// (i.e. not Object.deliverChangeRecords), this is the mechanism by which
+// callbacks are invoked in the proper order until there are no more
+// change records pending to a callback.
+//
+// Note that in order to reduce allocation and processing costs, the
+// implementation of (1) and (2) have "optimized" states which represent
+// common cases which can be handled more efficiently.
+
var observationState = %GetObservationState();
if (IS_UNDEFINED(observationState.callbackInfoMap)) {
observationState.callbackInfoMap = %ObservationWeakMapCreate();
observationState.objectInfoMap = %ObservationWeakMapCreate();
- observationState.notifierTargetMap = %ObservationWeakMapCreate();
- observationState.pendingObservers = new InternalArray;
+ observationState.notifierObjectInfoMap = %ObservationWeakMapCreate();
+ observationState.pendingObservers = null;
observationState.nextCallbackPriority = 0;
}
@@ -59,126 +88,191 @@ ObservationWeakMap.prototype = {
var callbackInfoMap =
new ObservationWeakMap(observationState.callbackInfoMap);
var objectInfoMap = new ObservationWeakMap(observationState.objectInfoMap);
-var notifierTargetMap =
- new ObservationWeakMap(observationState.notifierTargetMap);
-
-function CreateObjectInfo(object) {
- var info = {
- changeObservers: new InternalArray,
- notifier: null,
- inactiveObservers: new InternalArray,
- performing: { __proto__: null },
- performingCount: 0,
- };
- objectInfoMap.set(object, info);
- return info;
+var notifierObjectInfoMap =
+ new ObservationWeakMap(observationState.notifierObjectInfoMap);
+
+function TypeMapCreate() {
+ return { __proto__: null };
}
-var defaultAcceptTypes = {
- __proto__: null,
- 'new': true,
- 'updated': true,
- 'deleted': true,
- 'prototype': true,
- 'reconfigured': true
-};
+function TypeMapAddType(typeMap, type, ignoreDuplicate) {
+ typeMap[type] = ignoreDuplicate ? 1 : (typeMap[type] || 0) + 1;
+}
-function CreateObserver(callback, accept) {
- var observer = {
+function TypeMapRemoveType(typeMap, type) {
+ typeMap[type]--;
+}
+
+function TypeMapCreateFromList(typeList) {
+ var typeMap = TypeMapCreate();
+ for (var i = 0; i < typeList.length; i++) {
+ TypeMapAddType(typeMap, typeList[i], true);
+ }
+ return typeMap;
+}
+
+function TypeMapHasType(typeMap, type) {
+ return !!typeMap[type];
+}
+
+function TypeMapIsDisjointFrom(typeMap1, typeMap2) {
+ if (!typeMap1 || !typeMap2)
+ return true;
+
+ for (var type in typeMap1) {
+ if (TypeMapHasType(typeMap1, type) && TypeMapHasType(typeMap2, type))
+ return false;
+ }
+
+ return true;
+}
+
+var defaultAcceptTypes = TypeMapCreateFromList([
+ 'new',
+ 'updated',
+ 'deleted',
+ 'prototype',
+ 'reconfigured'
+]);
+
+// An Observer is a registration to observe an object by a callback with
+// a given set of accept types. If the set of accept types is the default
+// set for Object.observe, the observer is represented as a direct reference
+// to the callback. An observer never changes its accept types and thus never
+// needs to "normalize".
+function ObserverCreate(callback, acceptList) {
+ return IS_UNDEFINED(acceptList) ? callback : {
__proto__: null,
callback: callback,
- accept: defaultAcceptTypes
+ accept: TypeMapCreateFromList(acceptList)
};
+}
- if (IS_UNDEFINED(accept))
- return observer;
-
- var acceptMap = { __proto__: null };
- for (var i = 0; i < accept.length; i++)
- acceptMap[accept[i]] = true;
+function ObserverGetCallback(observer) {
+ return IS_SPEC_FUNCTION(observer) ? observer : observer.callback;
+}
- observer.accept = acceptMap;
- return observer;
+function ObserverGetAcceptTypes(observer) {
+ return IS_SPEC_FUNCTION(observer) ? defaultAcceptTypes : observer.accept;
}
function ObserverIsActive(observer, objectInfo) {
- if (objectInfo.performingCount === 0)
- return true;
+ return TypeMapIsDisjointFrom(ObjectInfoGetPerformingTypes(objectInfo),
+ ObserverGetAcceptTypes(observer));
+}
- var performing = objectInfo.performing;
- for (var type in performing) {
- if (performing[type] > 0 && observer.accept[type])
- return false;
+function ObjectInfoGet(object) {
+ var objectInfo = objectInfoMap.get(object);
+ if (IS_UNDEFINED(objectInfo)) {
+ if (!%IsJSProxy(object))
+ %SetIsObserved(object);
+
+ objectInfo = {
+ object: object,
+ changeObservers: null,
+ notifier: null,
+ performing: null,
+ performingCount: 0,
+ };
+ objectInfoMap.set(object, objectInfo);
}
+ return objectInfo;
+}
- return true;
+function ObjectInfoGetFromNotifier(notifier) {
+ return notifierObjectInfoMap.get(notifier);
+}
+
+function ObjectInfoGetNotifier(objectInfo) {
+ if (IS_NULL(objectInfo.notifier)) {
+ objectInfo.notifier = { __proto__: notifierPrototype };
+ notifierObjectInfoMap.set(objectInfo.notifier, objectInfo);
+ }
+
+ return objectInfo.notifier;
+}
+
+function ObjectInfoGetObject(objectInfo) {
+ return objectInfo.object;
}
-function ObserverIsInactive(observer, objectInfo) {
- return !ObserverIsActive(observer, objectInfo);
+function ChangeObserversIsOptimized(changeObservers) {
+ return typeof changeObservers === 'function' ||
+ typeof changeObservers.callback === 'function';
}
-function RemoveNullElements(from) {
- var i = 0;
- var j = 0;
- for (; i < from.length; i++) {
- if (from[i] === null)
- continue;
- if (j < i)
- from[j] = from[i];
- j++;
+// The set of observers on an object is called 'changeObservers'. The first
+// observer is referenced directly via objectInfo.changeObservers. When a second
+// is added, changeObservers "normalizes" to become a mapping of callback
+// priority -> observer and is then stored on objectInfo.changeObservers.
+function ObjectInfoNormalizeChangeObservers(objectInfo) {
+ if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
+ var observer = objectInfo.changeObservers;
+ var callback = ObserverGetCallback(observer);
+ var callbackInfo = CallbackInfoGet(callback);
+ var priority = CallbackInfoGetPriority(callbackInfo);
+ objectInfo.changeObservers = { __proto__: null };
+ objectInfo.changeObservers[priority] = observer;
}
+}
+
+function ObjectInfoAddObserver(objectInfo, callback, acceptList) {
+ var callbackInfo = CallbackInfoGetOrCreate(callback);
+ var observer = ObserverCreate(callback, acceptList);
- if (i !== j)
- from.length = from.length - (i - j);
+ if (!objectInfo.changeObservers) {
+ objectInfo.changeObservers = observer;
+ return;
+ }
+
+ ObjectInfoNormalizeChangeObservers(objectInfo);
+ var priority = CallbackInfoGetPriority(callbackInfo);
+ objectInfo.changeObservers[priority] = observer;
}
-function RepartitionObservers(conditionFn, from, to, objectInfo) {
- var anyRemoved = false;
- for (var i = 0; i < from.length; i++) {
- var observer = from[i];
- if (conditionFn(observer, objectInfo)) {
- anyRemoved = true;
- from[i] = null;
- to.push(observer);
- }
+function ObjectInfoRemoveObserver(objectInfo, callback) {
+ if (!objectInfo.changeObservers)
+ return;
+
+ if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
+ if (callback === ObserverGetCallback(objectInfo.changeObservers))
+ objectInfo.changeObservers = null;
+ return;
}
- if (anyRemoved)
- RemoveNullElements(from);
+ var callbackInfo = CallbackInfoGet(callback);
+ var priority = CallbackInfoGetPriority(callbackInfo);
+ delete objectInfo.changeObservers[priority];
}
-function BeginPerformChange(objectInfo, type) {
- objectInfo.performing[type] = (objectInfo.performing[type] || 0) + 1;
+function ObjectInfoHasActiveObservers(objectInfo) {
+ if (IS_UNDEFINED(objectInfo) || !objectInfo.changeObservers)
+ return false;
+
+ if (ChangeObserversIsOptimized(objectInfo.changeObservers))
+ return ObserverIsActive(objectInfo.changeObservers, objectInfo);
+
+ for (var priority in objectInfo.changeObservers) {
+ if (ObserverIsActive(objectInfo.changeObservers[priority], objectInfo))
+ return true;
+ }
+
+ return false;
+}
+
+function ObjectInfoAddPerformingType(objectInfo, type) {
+ objectInfo.performing = objectInfo.performing || TypeMapCreate();
+ TypeMapAddType(objectInfo.performing, type);
objectInfo.performingCount++;
- RepartitionObservers(ObserverIsInactive,
- objectInfo.changeObservers,
- objectInfo.inactiveObservers,
- objectInfo);
}
-function EndPerformChange(objectInfo, type) {
- objectInfo.performing[type]--;
+function ObjectInfoRemovePerformingType(objectInfo, type) {
objectInfo.performingCount--;
- RepartitionObservers(ObserverIsActive,
- objectInfo.inactiveObservers,
- objectInfo.changeObservers,
- objectInfo);
-}
-
-function EnsureObserverRemoved(objectInfo, callback) {
- function remove(observerList) {
- for (var i = 0; i < observerList.length; i++) {
- if (observerList[i].callback === callback) {
- observerList.splice(i, 1);
- return true;
- }
- }
- return false;
- }
+ TypeMapRemoveType(objectInfo.performing, type);
+}
- if (!remove(objectInfo.changeObservers))
- remove(objectInfo.inactiveObservers);
+function ObjectInfoGetPerformingTypes(objectInfo) {
+ return objectInfo.performingCount > 0 ? objectInfo.performing : null;
}
function AcceptArgIsValid(arg) {
@@ -198,12 +292,31 @@ function AcceptArgIsValid(arg) {
return true;
}
-function EnsureCallbackPriority(callback) {
- if (!callbackInfoMap.has(callback))
- callbackInfoMap.set(callback, observationState.nextCallbackPriority++);
+// CallbackInfo's optimized state is just a number which represents its global
+// priority. When a change record must be enqueued for the callback, it
+// normalizes. When delivery clears any pending change records, it re-optimizes.
+function CallbackInfoGet(callback) {
+ return callbackInfoMap.get(callback);
+}
+
+function CallbackInfoGetOrCreate(callback) {
+ var callbackInfo = callbackInfoMap.get(callback);
+ if (!IS_UNDEFINED(callbackInfo))
+ return callbackInfo;
+
+ var priority = observationState.nextCallbackPriority++
+ callbackInfoMap.set(callback, priority);
+ return priority;
}
-function NormalizeCallbackInfo(callback) {
+function CallbackInfoGetPriority(callbackInfo) {
+ if (IS_NUMBER(callbackInfo))
+ return callbackInfo;
+ else
+ return callbackInfo.priority;
+}
+
+function CallbackInfoNormalize(callback) {
var callbackInfo = callbackInfoMap.get(callback);
if (IS_NUMBER(callbackInfo)) {
var priority = callbackInfo;
@@ -214,32 +327,18 @@ function NormalizeCallbackInfo(callback) {
return callbackInfo;
}
-function ObjectObserve(object, callback, accept) {
+function ObjectObserve(object, callback, acceptList) {
if (!IS_SPEC_OBJECT(object))
throw MakeTypeError("observe_non_object", ["observe"]);
if (!IS_SPEC_FUNCTION(callback))
throw MakeTypeError("observe_non_function", ["observe"]);
if (ObjectIsFrozen(callback))
throw MakeTypeError("observe_callback_frozen");
- if (!AcceptArgIsValid(accept))
+ if (!AcceptArgIsValid(acceptList))
throw MakeTypeError("observe_accept_invalid");
- EnsureCallbackPriority(callback);
-
- var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo)) {
- objectInfo = CreateObjectInfo(object);
- %SetIsObserved(object);
- }
-
- EnsureObserverRemoved(objectInfo, callback);
-
- var observer = CreateObserver(callback, accept);
- if (ObserverIsActive(observer, objectInfo))
- objectInfo.changeObservers.push(observer);
- else
- objectInfo.inactiveObservers.push(observer);
-
+ var objectInfo = ObjectInfoGet(object);
+ ObjectInfoAddObserver(objectInfo, callback, acceptList);
return object;
}
@@ -253,7 +352,7 @@ function ObjectUnobserve(object, callback) {
if (IS_UNDEFINED(objectInfo))
return object;
- EnsureObserverRemoved(objectInfo, callback);
+ ObjectInfoRemoveObserver(objectInfo, callback);
return object;
}
@@ -268,41 +367,67 @@ function ArrayUnobserve(object, callback) {
return ObjectUnobserve(object, callback);
}
-function EnqueueToCallback(callback, changeRecord) {
- var callbackInfo = NormalizeCallbackInfo(callback);
+function ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
+ needsAccessCheck) {
+ if (!ObserverIsActive(observer, objectInfo) ||
+ !TypeMapHasType(ObserverGetAcceptTypes(observer), changeRecord.type)) {
+ return;
+ }
+
+ var callback = ObserverGetCallback(observer);
+ if (needsAccessCheck &&
+ // Drop all splice records on the floor for access-checked objects
+ (changeRecord.type == 'splice' ||
+ !%IsAccessAllowedForObserver(
+ callback, changeRecord.object, changeRecord.name))) {
+ return;
+ }
+
+ var callbackInfo = CallbackInfoNormalize(callback);
+ if (!observationState.pendingObservers)
+ observationState.pendingObservers = { __proto__: null };
observationState.pendingObservers[callbackInfo.priority] = callback;
callbackInfo.push(changeRecord);
%SetObserverDeliveryPending();
}
-function EnqueueChangeRecord(changeRecord, observers) {
+function ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord,
+ skipAccessCheck) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(changeRecord.name)) return;
- for (var i = 0; i < observers.length; i++) {
- var observer = observers[i];
- if (IS_UNDEFINED(observer.accept[changeRecord.type]))
- continue;
+ var needsAccessCheck = !skipAccessCheck &&
+ %IsAccessCheckNeeded(changeRecord.object);
- EnqueueToCallback(observer.callback, changeRecord);
+ if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
+ var observer = objectInfo.changeObservers;
+ ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
+ needsAccessCheck);
+ return;
+ }
+
+ for (var priority in objectInfo.changeObservers) {
+ var observer = objectInfo.changeObservers[priority];
+ ObserverEnqueueIfActive(observer, objectInfo, changeRecord,
+ needsAccessCheck);
}
}
function BeginPerformSplice(array) {
var objectInfo = objectInfoMap.get(array);
if (!IS_UNDEFINED(objectInfo))
- BeginPerformChange(objectInfo, 'splice');
+ ObjectInfoAddPerformingType(objectInfo, 'splice');
}
function EndPerformSplice(array) {
var objectInfo = objectInfoMap.get(array);
if (!IS_UNDEFINED(objectInfo))
- EndPerformChange(objectInfo, 'splice');
+ ObjectInfoRemovePerformingType(objectInfo, 'splice');
}
function EnqueueSpliceRecord(array, index, removed, addedCount) {
var objectInfo = objectInfoMap.get(array);
- if (IS_UNDEFINED(objectInfo) || objectInfo.changeObservers.length === 0)
+ if (!ObjectInfoHasActiveObservers(objectInfo))
return;
var changeRecord = {
@@ -315,19 +440,19 @@ function EnqueueSpliceRecord(array, index, removed, addedCount) {
ObjectFreeze(changeRecord);
ObjectFreeze(changeRecord.removed);
- EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
+ ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord);
}
function NotifyChange(type, object, name, oldValue) {
var objectInfo = objectInfoMap.get(object);
- if (objectInfo.changeObservers.length === 0)
+ if (!ObjectInfoHasActiveObservers(objectInfo))
return;
var changeRecord = (arguments.length < 4) ?
{ type: type, object: object, name: name } :
{ type: type, object: object, name: name, oldValue: oldValue };
ObjectFreeze(changeRecord);
- EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
+ ObjectInfoEnqueueChangeRecord(objectInfo, changeRecord);
}
var notifierPrototype = {};
@@ -336,17 +461,16 @@ function ObjectNotifierNotify(changeRecord) {
if (!IS_SPEC_OBJECT(this))
throw MakeTypeError("called_on_non_object", ["notify"]);
- var target = notifierTargetMap.get(this);
- if (IS_UNDEFINED(target))
+ var objectInfo = ObjectInfoGetFromNotifier(this);
+ if (IS_UNDEFINED(objectInfo))
throw MakeTypeError("observe_notify_non_notifier");
if (!IS_STRING(changeRecord.type))
throw MakeTypeError("observe_type_non_string");
- var objectInfo = objectInfoMap.get(target);
- if (IS_UNDEFINED(objectInfo) || objectInfo.changeObservers.length === 0)
+ if (!ObjectInfoHasActiveObservers(objectInfo))
return;
- var newRecord = { object: target };
+ var newRecord = { object: ObjectInfoGetObject(objectInfo) };
for (var prop in changeRecord) {
if (prop === 'object') continue;
%DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
@@ -354,36 +478,28 @@ function ObjectNotifierNotify(changeRecord) {
}
ObjectFreeze(newRecord);
- EnqueueChangeRecord(newRecord, objectInfo.changeObservers);
+ ObjectInfoEnqueueChangeRecord(objectInfo, newRecord,
+ true /* skip access check */);
}
-function ObjectNotifierPerformChange(changeType, changeFn, receiver) {
+function ObjectNotifierPerformChange(changeType, changeFn) {
if (!IS_SPEC_OBJECT(this))
throw MakeTypeError("called_on_non_object", ["performChange"]);
- var target = notifierTargetMap.get(this);
- if (IS_UNDEFINED(target))
+ var objectInfo = ObjectInfoGetFromNotifier(this);
+
+ if (IS_UNDEFINED(objectInfo))
throw MakeTypeError("observe_notify_non_notifier");
if (!IS_STRING(changeType))
throw MakeTypeError("observe_perform_non_string");
if (!IS_SPEC_FUNCTION(changeFn))
throw MakeTypeError("observe_perform_non_function");
- if (IS_NULL_OR_UNDEFINED(receiver)) {
- receiver = %GetDefaultReceiver(changeFn) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(changeFn)) {
- receiver = ToObject(receiver);
- }
-
- var objectInfo = objectInfoMap.get(target);
- if (IS_UNDEFINED(objectInfo))
- return;
-
- BeginPerformChange(objectInfo, changeType);
+ ObjectInfoAddPerformingType(objectInfo, changeType);
try {
- %_CallFunction(receiver, changeFn);
+ %_CallFunction(void 0, changeFn);
} finally {
- EndPerformChange(objectInfo, changeType);
+ ObjectInfoRemovePerformingType(objectInfo, changeType);
}
}
@@ -393,15 +509,8 @@ function ObjectGetNotifier(object) {
if (ObjectIsFrozen(object)) return null;
- var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo)) objectInfo = CreateObjectInfo(object);
-
- if (IS_NULL(objectInfo.notifier)) {
- objectInfo.notifier = { __proto__: notifierPrototype };
- notifierTargetMap.set(objectInfo.notifier, object);
- }
-
- return objectInfo.notifier;
+ var objectInfo = ObjectInfoGet(object);
+ return ObjectInfoGetNotifier(objectInfo);
}
function CallbackDeliverPending(callback) {
@@ -414,12 +523,14 @@ function CallbackDeliverPending(callback) {
var priority = callbackInfo.priority;
callbackInfoMap.set(callback, priority);
- delete observationState.pendingObservers[priority];
+ if (observationState.pendingObservers)
+ delete observationState.pendingObservers[priority];
+
var delivered = [];
%MoveArrayContents(callbackInfo, delivered);
try {
- %Call(void 0, delivered, callback);
+ %_CallFunction(void 0, delivered, callback);
} catch (ex) {}
return true;
}
@@ -432,9 +543,9 @@ function ObjectDeliverChangeRecords(callback) {
}
function DeliverChangeRecords() {
- while (observationState.pendingObservers.length) {
+ while (observationState.pendingObservers) {
var pendingObservers = observationState.pendingObservers;
- observationState.pendingObservers = new InternalArray;
+ observationState.pendingObservers = null;
for (var i in pendingObservers) {
CallbackDeliverPending(pendingObservers[i]);
}
diff --git a/chromium/v8/src/objects-debug.cc b/chromium/v8/src/objects-debug.cc
index 395f95ca7ea..5d9e161a7e5 100644
--- a/chromium/v8/src/objects-debug.cc
+++ b/chromium/v8/src/objects-debug.cc
@@ -230,7 +230,8 @@ void HeapObject::HeapObjectVerify() {
void HeapObject::VerifyHeapPointer(Object* p) {
CHECK(p->IsHeapObject());
- CHECK(HEAP->Contains(HeapObject::cast(p)));
+ HeapObject* ho = HeapObject::cast(p);
+ CHECK(ho->GetHeap()->Contains(ho));
}
@@ -328,20 +329,27 @@ void JSObject::JSObjectVerify() {
}
}
}
- CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
- (elements() == GetHeap()->empty_fixed_array())),
- (elements()->map() == GetHeap()->fixed_array_map() ||
- elements()->map() == GetHeap()->fixed_cow_array_map()));
- CHECK(map()->has_fast_object_elements() == HasFastObjectElements());
+
+ // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
+ // allocation folding is turned off.
+ if (reinterpret_cast<Map*>(elements()) !=
+ GetHeap()->one_pointer_filler_map()) {
+ CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
+ (elements() == GetHeap()->empty_fixed_array())),
+ (elements()->map() == GetHeap()->fixed_array_map() ||
+ elements()->map() == GetHeap()->fixed_cow_array_map()));
+ CHECK(map()->has_fast_object_elements() == HasFastObjectElements());
+ }
}
void Map::MapVerify() {
- CHECK(!HEAP->InNewSpace(this));
+ Heap* heap = GetHeap();
+ CHECK(!heap->InNewSpace(this));
CHECK(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
CHECK(instance_size() == kVariableSizeSentinel ||
(kPointerSize <= instance_size() &&
- instance_size() < HEAP->Capacity()));
+ instance_size() < heap->Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
@@ -366,9 +374,12 @@ void Map::SharedMapVerify() {
}
-void Map::VerifyOmittedPrototypeChecks() {
- if (!FLAG_omit_prototype_checks_for_leaf_maps) return;
- if (HasTransitionArray() || is_dictionary_map()) {
+void Map::VerifyOmittedMapChecks() {
+ if (!FLAG_omit_map_checks_for_leaf_maps) return;
+ if (!is_stable() ||
+ is_deprecated() ||
+ HasTransitionArray() ||
+ is_dictionary_map()) {
CHECK_EQ(0, dependent_code()->number_of_entries(
DependentCode::kPrototypeCheckGroup));
}
@@ -498,7 +509,7 @@ void JSDate::JSDateVerify() {
}
if (cache_stamp()->IsSmi()) {
CHECK(Smi::cast(cache_stamp())->value() <=
- Smi::cast(Isolate::Current()->date_cache()->stamp())->value());
+ Smi::cast(GetIsolate()->date_cache()->stamp())->value());
}
}
@@ -520,7 +531,7 @@ void String::StringVerify() {
CHECK(IsString());
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
if (IsInternalizedString()) {
- CHECK(!HEAP->InNewSpace(this));
+ CHECK(!GetHeap()->InNewSpace(this));
}
if (IsConsString()) {
ConsString::cast(this)->ConsStringVerify();
@@ -612,7 +623,7 @@ void Oddball::OddballVerify() {
VerifyHeapPointer(to_string());
Object* number = to_number();
if (number->IsHeapObject()) {
- CHECK(number == HEAP->nan_value());
+ CHECK(number == HeapObject::cast(number)->GetHeap()->nan_value());
} else {
CHECK(number->IsSmi());
int value = Smi::cast(number)->value();
@@ -672,9 +683,14 @@ void Code::VerifyEmbeddedMapsDependency() {
void JSArray::JSArrayVerify() {
JSObjectVerify();
CHECK(length()->IsNumber() || length()->IsUndefined());
- CHECK(elements()->IsUndefined() ||
- elements()->IsFixedArray() ||
- elements()->IsFixedDoubleArray());
+ // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
+ // allocation folding is turned off.
+ if (reinterpret_cast<Map*>(elements()) !=
+ GetHeap()->one_pointer_filler_map()) {
+ CHECK(elements()->IsUndefined() ||
+ elements()->IsFixedArray() ||
+ elements()->IsFixedDoubleArray());
+ }
}
@@ -853,6 +869,7 @@ void AccessorPair::AccessorPairVerify() {
CHECK(IsAccessorPair());
VerifyPointer(getter());
VerifyPointer(setter());
+ VerifySmiField(kAccessFlagsOffset);
}
@@ -885,6 +902,7 @@ void CallHandlerInfo::CallHandlerInfoVerify() {
void TemplateInfo::TemplateInfoVerify() {
VerifyPointer(tag());
VerifyPointer(property_list());
+ VerifyPointer(property_accessors());
}
@@ -893,7 +911,6 @@ void FunctionTemplateInfo::FunctionTemplateInfoVerify() {
TemplateInfoVerify();
VerifyPointer(serial_number());
VerifyPointer(call_code());
- VerifyPointer(property_accessors());
VerifyPointer(prototype_template());
VerifyPointer(parent_template());
VerifyPointer(named_property_handler());
@@ -1039,7 +1056,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
int holes = 0;
FixedArray* e = FixedArray::cast(elements());
int len = e->length();
- Heap* heap = HEAP;
+ Heap* heap = GetHeap();
for (int i = 0; i < len; i++) {
if (e->get(i) == heap->the_hole_value()) holes++;
}
diff --git a/chromium/v8/src/objects-inl.h b/chromium/v8/src/objects-inl.h
index 128dc6be281..89abe504335 100644
--- a/chromium/v8/src/objects-inl.h
+++ b/chromium/v8/src/objects-inl.h
@@ -917,17 +917,17 @@ bool Object::HasSpecificClassOf(String* name) {
}
-MaybeObject* Object::GetElement(uint32_t index) {
+MaybeObject* Object::GetElement(Isolate* isolate, uint32_t index) {
// GetElement can trigger a getter which can cause allocation.
// This was not always the case. This ASSERT is here to catch
// leftover incorrect uses.
ASSERT(AllowHeapAllocation::IsAllowed());
- return GetElementWithReceiver(this, index);
+ return GetElementWithReceiver(isolate, this, index);
}
-Object* Object::GetElementNoExceptionThrown(uint32_t index) {
- MaybeObject* maybe = GetElementWithReceiver(this, index);
+Object* Object::GetElementNoExceptionThrown(Isolate* isolate, uint32_t index) {
+ MaybeObject* maybe = GetElementWithReceiver(isolate, this, index);
ASSERT(!maybe->IsFailure());
Object* result = NULL; // Initialization to please compiler.
maybe->ToObject(&result);
@@ -1185,7 +1185,6 @@ Heap* HeapObject::GetHeap() {
Heap* heap =
MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
ASSERT(heap != NULL);
- ASSERT(heap->isolate() == Isolate::Current());
return heap;
}
@@ -1312,7 +1311,7 @@ void JSObject::ValidateElements() {
bool JSObject::ShouldTrackAllocationInfo() {
- if (map()->CanTrackAllocationSite()) {
+ if (AllocationSite::CanTrack(map()->instance_type())) {
if (!IsJSArray()) {
return true;
}
@@ -1349,17 +1348,21 @@ AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
}
-MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
- ValidateElements();
- ElementsKind elements_kind = map()->elements_kind();
+inline bool AllocationSite::CanTrack(InstanceType type) {
+ return type == JS_ARRAY_TYPE;
+}
+
+
+void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
+ object->ValidateElements();
+ ElementsKind elements_kind = object->map()->elements_kind();
if (!IsFastObjectElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
- return TransitionElementsKind(FAST_HOLEY_ELEMENTS);
+ TransitionElementsKind(object, FAST_HOLEY_ELEMENTS);
} else {
- return TransitionElementsKind(FAST_ELEMENTS);
+ TransitionElementsKind(object, FAST_ELEMENTS);
}
}
- return this;
}
@@ -1567,14 +1570,27 @@ MaybeObject* JSObject::MigrateInstance() {
// Converting any field to the most specific type will cause the
// GeneralizeFieldRepresentation algorithm to create the most general existing
// transition that matches the object. This achieves what is needed.
- return GeneralizeFieldRepresentation(0, Representation::None());
+ Map* original_map = map();
+ MaybeObject* maybe_result = GeneralizeFieldRepresentation(
+ 0, Representation::None(), ALLOW_AS_CONSTANT);
+ JSObject* result;
+ if (FLAG_trace_migration && maybe_result->To(&result)) {
+ PrintInstanceMigration(stdout, original_map, result->map());
+ }
+ return maybe_result;
}
MaybeObject* JSObject::TryMigrateInstance() {
Map* new_map = map()->CurrentMapForDeprecated();
if (new_map == NULL) return Smi::FromInt(0);
- return MigrateToMap(new_map);
+ Map* original_map = map();
+ MaybeObject* maybe_result = MigrateToMap(new_map);
+ JSObject* result;
+ if (FLAG_trace_migration && maybe_result->To(&result)) {
+ PrintInstanceMigration(stdout, original_map, result->map());
+ }
+ return maybe_result;
}
@@ -1852,14 +1868,15 @@ bool JSObject::HasFastProperties() {
}
-bool JSObject::TooManyFastProperties(int properties,
- JSObject::StoreFromKeyed store_mode) {
+bool JSObject::TooManyFastProperties(StoreFromKeyed store_mode) {
// Allow extra fast properties if the object has more than
- // kFastPropertiesSoftLimit in-object properties. When this is the case,
- // it is very unlikely that the object is being used as a dictionary
- // and there is a good chance that allowing more map transitions
- // will be worth it.
- int inobject = map()->inobject_properties();
+ // kFastPropertiesSoftLimit in-object properties. When this is the case, it is
+ // very unlikely that the object is being used as a dictionary and there is a
+ // good chance that allowing more map transitions will be worth it.
+ Map* map = this->map();
+ if (map->unused_property_fields() != 0) return false;
+
+ int inobject = map->inobject_properties();
int limit;
if (store_mode == CERTAINLY_NOT_STORE_FROM_KEYED) {
@@ -1867,7 +1884,7 @@ bool JSObject::TooManyFastProperties(int properties,
} else {
limit = Max(inobject, kFastPropertiesSoftLimit);
}
- return properties > limit;
+ return properties()->length() > limit;
}
@@ -1946,7 +1963,7 @@ bool FixedArray::is_the_hole(int index) {
void FixedArray::set(int index, Smi* value) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
@@ -1955,7 +1972,7 @@ void FixedArray::set(int index, Smi* value) {
void FixedArray::set(int index, Object* value) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -1981,8 +1998,8 @@ inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
double FixedDoubleArray::get_scalar(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
ASSERT(index >= 0 && index < this->length());
double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
ASSERT(!is_the_hole_nan(result));
@@ -1990,8 +2007,8 @@ double FixedDoubleArray::get_scalar(int index) {
}
int64_t FixedDoubleArray::get_representation(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
ASSERT(index >= 0 && index < this->length());
return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
}
@@ -2006,8 +2023,8 @@ MaybeObject* FixedDoubleArray::get(int index) {
void FixedDoubleArray::set(int index, double value) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
if (std::isnan(value)) value = canonical_not_the_hole_nan_as_double();
WRITE_DOUBLE_FIELD(this, offset, value);
@@ -2015,8 +2032,8 @@ void FixedDoubleArray::set(int index, double value) {
void FixedDoubleArray::set_the_hole(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map() &&
- map() != HEAP->fixed_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
}
@@ -2040,7 +2057,7 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
void FixedArray::set(int index,
Object* value,
WriteBarrierMode mode) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -2051,7 +2068,7 @@ void FixedArray::set(int index,
void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- ASSERT(array->map() != HEAP->fixed_cow_array_map());
+ ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(array, offset, value);
@@ -2065,43 +2082,36 @@ void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- ASSERT(array->map() != HEAP->fixed_cow_array_map());
+ ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
- ASSERT(!HEAP->InNewSpace(value));
+ ASSERT(!array->GetHeap()->InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
void FixedArray::set_undefined(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
- set_undefined(GetHeap(), index);
-}
-
-
-void FixedArray::set_undefined(Heap* heap, int index) {
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->undefined_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize,
- heap->undefined_value());
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->undefined_value()));
+ WRITE_FIELD(this,
+ kHeaderSize + index * kPointerSize,
+ GetHeap()->undefined_value());
}
void FixedArray::set_null(int index) {
- set_null(GetHeap(), index);
-}
-
-
-void FixedArray::set_null(Heap* heap, int index) {
ASSERT(index >= 0 && index < this->length());
- ASSERT(!heap->InNewSpace(heap->null_value()));
- WRITE_FIELD(this, kHeaderSize + index * kPointerSize, heap->null_value());
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->null_value()));
+ WRITE_FIELD(this,
+ kHeaderSize + index * kPointerSize,
+ GetHeap()->null_value());
}
void FixedArray::set_the_hole(int index) {
- ASSERT(map() != HEAP->fixed_cow_array_map());
+ ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
- ASSERT(!HEAP->InNewSpace(HEAP->the_hole_value()));
+ ASSERT(!GetHeap()->InNewSpace(GetHeap()->the_hole_value()));
WRITE_FIELD(this,
kHeaderSize + index * kPointerSize,
GetHeap()->the_hole_value());
@@ -2120,7 +2130,7 @@ Object** FixedArray::data_start() {
bool DescriptorArray::IsEmpty() {
ASSERT(length() >= kFirstIndex ||
- this == HEAP->empty_descriptor_array());
+ this == GetHeap()->empty_descriptor_array());
return length() < kFirstIndex;
}
@@ -2348,6 +2358,7 @@ PropertyType DescriptorArray::GetType(int descriptor_number) {
int DescriptorArray::GetFieldIndex(int descriptor_number) {
+ ASSERT(GetDetails(descriptor_number).type() == FIELD);
return GetDetails(descriptor_number).field_index();
}
@@ -3578,11 +3589,6 @@ Code::Flags Code::flags() {
}
-inline bool Map::CanTrackAllocationSite() {
- return instance_type() == JS_ARRAY_TYPE;
-}
-
-
void Map::set_owns_descriptors(bool is_shared) {
set_bit_field3(OwnsDescriptors::update(bit_field3(), is_shared));
}
@@ -3617,6 +3623,17 @@ bool Map::is_deprecated() {
}
+void Map::set_migration_target(bool value) {
+ set_bit_field3(IsMigrationTarget::update(bit_field3(), value));
+}
+
+
+bool Map::is_migration_target() {
+ if (!FLAG_track_fields) return false;
+ return IsMigrationTarget::decode(bit_field3());
+}
+
+
void Map::freeze() {
set_bit_field3(IsFrozen::update(bit_field3(), true));
}
@@ -3677,11 +3694,6 @@ void Map::NotifyLeafMapLayoutChange() {
}
-bool Map::CanOmitPrototypeChecks() {
- return is_stable() && FLAG_omit_prototype_checks_for_leaf_maps;
-}
-
-
bool Map::CanOmitMapChecks() {
return is_stable() && FLAG_omit_map_checks_for_leaf_maps;
}
@@ -3816,7 +3828,6 @@ inline void Code::set_is_crankshafted(bool value) {
int Code::major_key() {
ASSERT(kind() == STUB ||
- kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
kind() == COMPARE_NIL_IC ||
@@ -3831,7 +3842,6 @@ int Code::major_key() {
void Code::set_major_key(int major) {
ASSERT(kind() == STUB ||
- kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
kind() == COMPARE_NIL_IC ||
@@ -4021,21 +4031,6 @@ void Code::set_check_type(CheckType value) {
}
-byte Code::unary_op_type() {
- ASSERT(is_unary_op_stub());
- return UnaryOpTypeField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_unary_op_type(byte value) {
- ASSERT(is_unary_op_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = UnaryOpTypeField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
byte Code::to_boolean_state() {
return extended_extra_ic_state();
}
@@ -4082,8 +4077,8 @@ bool Code::is_inline_cache_stub() {
}
-bool Code::is_debug_break() {
- return ic_state() == DEBUG_STUB && extra_ic_state() == DEBUG_BREAK;
+bool Code::is_debug_stub() {
+ return ic_state() == DEBUG_STUB;
}
@@ -4198,7 +4193,7 @@ static MaybeObject* EnsureHasTransitionArray(Map* map) {
TransitionArray* transitions;
MaybeObject* maybe_transitions;
if (!map->HasTransitionArray()) {
- maybe_transitions = TransitionArray::Allocate(0);
+ maybe_transitions = TransitionArray::Allocate(map->GetIsolate(), 0);
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
transitions->set_back_pointer_storage(map->GetBackPointer());
} else if (!map->transitions()->IsFullTransitionArray()) {
@@ -4220,7 +4215,20 @@ void Map::InitializeDescriptors(DescriptorArray* descriptors) {
ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
-SMI_ACCESSORS(Map, bit_field3, kBitField3Offset)
+
+
+void Map::set_bit_field3(uint32_t bits) {
+ // Ensure the upper 2 bits have the same value by sign extending it. This is
+ // necessary to be able to use the 31st bit.
+ int value = bits << 1;
+ WRITE_FIELD(this, kBitField3Offset, Smi::FromInt(value >> 1));
+}
+
+
+uint32_t Map::bit_field3() {
+ Object* value = READ_FIELD(this, kBitField3Offset);
+ return Smi::cast(value)->value();
+}
void Map::ClearTransitions(Heap* heap, WriteBarrierMode mode) {
@@ -4438,6 +4446,7 @@ ACCESSORS(Box, value, Object, kValueOffset)
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
+ACCESSORS_TO_SMI(AccessorPair, access_flags, kAccessFlagsOffset)
ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
@@ -4455,11 +4464,10 @@ ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
+ACCESSORS(TemplateInfo, property_accessors, Object, kPropertyAccessorsOffset)
ACCESSORS(FunctionTemplateInfo, serial_number, Object, kSerialNumberOffset)
ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
-ACCESSORS(FunctionTemplateInfo, property_accessors, Object,
- kPropertyAccessorsOffset)
ACCESSORS(FunctionTemplateInfo, prototype_template, Object,
kPrototypeTemplateOffset)
ACCESSORS(FunctionTemplateInfo, parent_template, Object, kParentTemplateOffset)
@@ -4558,6 +4566,10 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
kNeedsAccessCheckBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, read_only_prototype,
kReadOnlyPrototypeBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, remove_prototype,
+ kRemovePrototypeBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache,
+ kDoNotCacheBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
kIsExpressionBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
@@ -4595,7 +4607,8 @@ SMI_ACCESSORS(SharedFunctionInfo, function_token_position,
kFunctionTokenPositionOffset)
SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
kCompilerHintsOffset)
-SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
+SMI_ACCESSORS(SharedFunctionInfo, opt_count_and_bailout_reason,
+ kOptCountAndBailoutReasonOffset)
SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset)
#else
@@ -4644,7 +4657,9 @@ PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
compiler_hints,
kCompilerHintsOffset)
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, opt_count, kOptCountOffset)
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
+ opt_count_and_bailout_reason,
+ kOptCountAndBailoutReasonOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, counters, kCountersOffset)
@@ -4827,7 +4842,7 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
bool SharedFunctionInfo::is_compiled() {
return code() !=
- Isolate::Current()->builtins()->builtin(Builtins::kLazyCompile);
+ GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
}
@@ -4891,6 +4906,24 @@ void SharedFunctionInfo::set_opt_reenable_tries(int tries) {
}
+int SharedFunctionInfo::opt_count() {
+ return OptCountBits::decode(opt_count_and_bailout_reason());
+}
+
+
+void SharedFunctionInfo::set_opt_count(int opt_count) {
+ set_opt_count_and_bailout_reason(
+ OptCountBits::update(opt_count_and_bailout_reason(), opt_count));
+}
+
+
+BailoutReason SharedFunctionInfo::DisableOptimizationReason() {
+ BailoutReason reason = static_cast<BailoutReason>(
+ DisabledOptimizationReasonBits::decode(opt_count_and_bailout_reason()));
+ return reason;
+}
+
+
bool SharedFunctionInfo::has_deoptimization_support() {
Code* code = this->code();
return code->kind() == Code::FUNCTION && code->has_deoptimization_support();
@@ -4937,15 +4970,9 @@ bool JSFunction::IsMarkedForLazyRecompilation() {
}
-bool JSFunction::IsMarkedForInstallingRecompiledCode() {
- return code() == GetIsolate()->builtins()->builtin(
- Builtins::kInstallRecompiledCode);
-}
-
-
-bool JSFunction::IsMarkedForParallelRecompilation() {
+bool JSFunction::IsMarkedForConcurrentRecompilation() {
return code() == GetIsolate()->builtins()->builtin(
- Builtins::kParallelRecompile);
+ Builtins::kConcurrentRecompile);
}
@@ -4962,7 +4989,7 @@ Code* JSFunction::code() {
void JSFunction::set_code(Code* value) {
- ASSERT(!HEAP->InNewSpace(value));
+ ASSERT(!GetHeap()->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
@@ -4973,7 +5000,7 @@ void JSFunction::set_code(Code* value) {
void JSFunction::set_code_no_write_barrier(Code* value) {
- ASSERT(!HEAP->InNewSpace(value));
+ ASSERT(!GetHeap()->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
}
@@ -4991,6 +5018,7 @@ void JSFunction::ReplaceCode(Code* code) {
context()->native_context()->AddOptimizedFunction(this);
}
if (was_optimized && !is_optimized) {
+ // TODO(titzer): linear in the number of optimized functions; fix!
context()->native_context()->RemoveOptimizedFunction(this);
}
}
@@ -5122,7 +5150,7 @@ void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
Code* value) {
ASSERT(id < kJSBuiltinsCount); // id is unsigned.
WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
- ASSERT(!HEAP->InNewSpace(value));
+ ASSERT(!GetHeap()->InNewSpace(value));
}
@@ -5253,6 +5281,20 @@ void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
}
+Object* Code::next_code_link() {
+ CHECK(kind() == OPTIMIZED_FUNCTION);
+ return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+}
+
+
+void Code::set_next_code_link(Object* value, WriteBarrierMode mode) {
+ CHECK(kind() == OPTIMIZED_FUNCTION);
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
+ value, mode);
+}
+
+
int Code::stub_info() {
ASSERT(kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
kind() == BINARY_OP_IC || kind() == LOAD_IC);
@@ -5274,25 +5316,6 @@ void Code::set_stub_info(int value) {
}
-Object* Code::code_to_deoptimize_link() {
- // Optimized code should not have type feedback.
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- return READ_FIELD(this, kTypeFeedbackInfoOffset);
-}
-
-
-void Code::set_code_to_deoptimize_link(Object* value) {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
-}
-
-
-Object** Code::code_to_deoptimize_link_slot() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- return HeapObject::RawField(this, kTypeFeedbackInfoOffset);
-}
-
-
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
INT_ACCESSORS(Code, ic_age, kICAgeOffset)
@@ -5834,6 +5857,36 @@ bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
}
+void AccessorPair::set_access_flags(v8::AccessControl access_control) {
+ int current = access_flags()->value();
+ current = BooleanBit::set(current,
+ kProhibitsOverwritingBit,
+ access_control & PROHIBITS_OVERWRITING);
+ current = BooleanBit::set(current,
+ kAllCanReadBit,
+ access_control & ALL_CAN_READ);
+ current = BooleanBit::set(current,
+ kAllCanWriteBit,
+ access_control & ALL_CAN_WRITE);
+ set_access_flags(Smi::FromInt(current));
+}
+
+
+bool AccessorPair::all_can_read() {
+ return BooleanBit::get(access_flags(), kAllCanReadBit);
+}
+
+
+bool AccessorPair::all_can_write() {
+ return BooleanBit::get(access_flags(), kAllCanWriteBit);
+}
+
+
+bool AccessorPair::prohibits_overwriting() {
+ return BooleanBit::get(access_flags(), kProhibitsOverwritingBit);
+}
+
+
template<typename Shape, typename Key>
void Dictionary<Shape, Key>::SetEntry(int entry,
Object* key,
@@ -6139,7 +6192,6 @@ SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
Relocatable::Relocatable(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
isolate_ = isolate;
prev_ = isolate->relocatable_top();
isolate->set_relocatable_top(this);
@@ -6147,7 +6199,6 @@ Relocatable::Relocatable(Isolate* isolate) {
Relocatable::~Relocatable() {
- ASSERT(isolate_ == Isolate::Current());
ASSERT_EQ(isolate_->relocatable_top(), this);
isolate_->set_relocatable_top(prev_);
}
diff --git a/chromium/v8/src/objects-printer.cc b/chromium/v8/src/objects-printer.cc
index 87b2811e413..0b8fdfda030 100644
--- a/chromium/v8/src/objects-printer.cc
+++ b/chromium/v8/src/objects-printer.cc
@@ -37,9 +37,6 @@ namespace internal {
#ifdef OBJECT_PRINT
-static const char* TypeToString(InstanceType type);
-
-
void MaybeObject::Print() {
Print(stdout);
}
@@ -509,83 +506,12 @@ void JSModule::JSModulePrint(FILE* out) {
static const char* TypeToString(InstanceType type) {
switch (type) {
- case INVALID_TYPE: return "INVALID";
- case MAP_TYPE: return "MAP";
- case HEAP_NUMBER_TYPE: return "HEAP_NUMBER";
- case SYMBOL_TYPE: return "SYMBOL";
- case STRING_TYPE: return "TWO_BYTE_STRING";
- case ASCII_STRING_TYPE: return "ASCII_STRING";
- case CONS_STRING_TYPE:
- case CONS_ASCII_STRING_TYPE:
- return "CONS_STRING";
- case EXTERNAL_STRING_TYPE:
- case EXTERNAL_ASCII_STRING_TYPE:
- case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return "EXTERNAL_STRING";
- case SHORT_EXTERNAL_STRING_TYPE:
- case SHORT_EXTERNAL_ASCII_STRING_TYPE:
- case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return "SHORT_EXTERNAL_STRING";
- case INTERNALIZED_STRING_TYPE: return "INTERNALIZED_STRING";
- case ASCII_INTERNALIZED_STRING_TYPE: return "ASCII_INTERNALIZED_STRING";
- case CONS_INTERNALIZED_STRING_TYPE: return "CONS_INTERNALIZED_STRING";
- case CONS_ASCII_INTERNALIZED_STRING_TYPE:
- return "CONS_ASCII_INTERNALIZED_STRING";
- case EXTERNAL_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
- case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return "EXTERNAL_INTERNALIZED_STRING";
- case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
- case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return "SHORT_EXTERNAL_INTERNALIZED_STRING";
- case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
- case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
- case FREE_SPACE_TYPE: return "FREE_SPACE";
- case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY";
- case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return "EXTERNAL_UNSIGNED_BYTE_ARRAY";
- case EXTERNAL_SHORT_ARRAY_TYPE: return "EXTERNAL_SHORT_ARRAY";
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return "EXTERNAL_UNSIGNED_SHORT_ARRAY";
- case EXTERNAL_INT_ARRAY_TYPE: return "EXTERNAL_INT_ARRAY";
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return "EXTERNAL_UNSIGNED_INT_ARRAY";
- case EXTERNAL_FLOAT_ARRAY_TYPE: return "EXTERNAL_FLOAT_ARRAY";
- case EXTERNAL_DOUBLE_ARRAY_TYPE: return "EXTERNAL_DOUBLE_ARRAY";
- case FILLER_TYPE: return "FILLER";
- case JS_OBJECT_TYPE: return "JS_OBJECT";
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
- case ODDBALL_TYPE: return "ODDBALL";
- case CELL_TYPE: return "CELL";
- case PROPERTY_CELL_TYPE: return "PROPERTY_CELL";
- case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO";
- case JS_GENERATOR_OBJECT_TYPE: return "JS_GENERATOR_OBJECT";
- case JS_MODULE_TYPE: return "JS_MODULE";
- case JS_FUNCTION_TYPE: return "JS_FUNCTION";
- case CODE_TYPE: return "CODE";
- case JS_ARRAY_TYPE: return "JS_ARRAY";
- case JS_PROXY_TYPE: return "JS_PROXY";
- case JS_SET_TYPE: return "JS_SET";
- case JS_MAP_TYPE: return "JS_MAP";
- case JS_WEAK_MAP_TYPE: return "JS_WEAK_MAP";
- case JS_WEAK_SET_TYPE: return "JS_WEAK_SET";
- case JS_REGEXP_TYPE: return "JS_REGEXP";
- case JS_VALUE_TYPE: return "JS_VALUE";
- case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
- case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
- case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
- case JS_ARRAY_BUFFER_TYPE: return "JS_ARRAY_BUFFER";
- case JS_TYPED_ARRAY_TYPE: return "JS_TYPED_ARRAY";
- case JS_DATA_VIEW_TYPE: return "JS_DATA_VIEW";
- case FOREIGN_TYPE: return "FOREIGN";
- case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- default: return "UNKNOWN";
+#define TYPE_TO_STRING(TYPE) case TYPE: return #TYPE;
+ INSTANCE_TYPE_LIST(TYPE_TO_STRING)
+#undef TYPE_TO_STRING
}
+ UNREACHABLE();
+ return "UNKNOWN"; // Keep the compiler happy.
}
@@ -1059,6 +985,8 @@ void AccessorPair::AccessorPairPrint(FILE* out) {
getter()->ShortPrint(out);
PrintF(out, "\n - setter: ");
setter()->ShortPrint(out);
+ PrintF(out, "\n - flag: ");
+ access_flags()->ShortPrint(out);
}
@@ -1142,6 +1070,8 @@ void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
tag()->ShortPrint(out);
PrintF(out, "\n - property_list: ");
property_list()->ShortPrint(out);
+ PrintF(out, "\n - property_accessors: ");
+ property_accessors()->ShortPrint(out);
PrintF(out, "\n - constructor: ");
constructor()->ShortPrint(out);
PrintF(out, "\n - internal_field_count: ");
diff --git a/chromium/v8/src/objects-visiting-inl.h b/chromium/v8/src/objects-visiting-inl.h
index 9398d6dfeaa..46cc9d79892 100644
--- a/chromium/v8/src/objects-visiting-inl.h
+++ b/chromium/v8/src/objects-visiting-inl.h
@@ -304,7 +304,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
&& (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() ||
Serializer::enabled() || target->ic_age() != heap->global_ic_age())) {
- IC::Clear(rinfo->pc());
+ IC::Clear(target->GetIsolate(), rinfo->pc());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
@@ -848,8 +848,9 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
IteratePointer(v, kTypeFeedbackInfoOffset);
RelocIterator it(this, mode_mask);
+ Isolate* isolate = this->GetIsolate();
for (; !it.done(); it.next()) {
- it.rinfo()->Visit(v);
+ it.rinfo()->Visit(isolate, v);
}
}
diff --git a/chromium/v8/src/objects-visiting.h b/chromium/v8/src/objects-visiting.h
index 32e457b869a..21757377a4f 100644
--- a/chromium/v8/src/objects-visiting.h
+++ b/chromium/v8/src/objects-visiting.h
@@ -141,7 +141,7 @@ class StaticVisitorBase : public AllStatic {
(base == kVisitJSObject));
ASSERT(IsAligned(object_size, kPointerSize));
ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
- ASSERT(object_size < Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
const VisitorId specialization = static_cast<VisitorId>(
base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
diff --git a/chromium/v8/src/objects.cc b/chromium/v8/src/objects.cc
index 2e9badbb2aa..d9538ae217d 100644
--- a/chromium/v8/src/objects.cc
+++ b/chromium/v8/src/objects.cc
@@ -32,6 +32,7 @@
#include "arguments.h"
#include "bootstrapper.h"
#include "codegen.h"
+#include "cpu-profiler.h"
#include "debug.h"
#include "deoptimizer.h"
#include "date.h"
@@ -40,6 +41,7 @@
#include "full-codegen.h"
#include "hydrogen.h"
#include "isolate-inl.h"
+#include "log.h"
#include "objects-inl.h"
#include "objects-visiting.h"
#include "objects-visiting-inl.h"
@@ -83,23 +85,19 @@ MaybeObject* Object::ToObject(Context* native_context) {
}
-MaybeObject* Object::ToObject() {
+MaybeObject* Object::ToObject(Isolate* isolate) {
if (IsJSReceiver()) {
return this;
} else if (IsNumber()) {
- Isolate* isolate = Isolate::Current();
Context* native_context = isolate->context()->native_context();
return CreateJSValue(native_context->number_function(), this);
} else if (IsBoolean()) {
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
Context* native_context = isolate->context()->native_context();
return CreateJSValue(native_context->boolean_function(), this);
} else if (IsString()) {
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
Context* native_context = isolate->context()->native_context();
return CreateJSValue(native_context->string_function(), this);
} else if (IsSymbol()) {
- Isolate* isolate = HeapObject::cast(this)->GetIsolate();
Context* native_context = isolate->context()->native_context();
return CreateJSValue(native_context->symbol_function(), this);
}
@@ -135,7 +133,7 @@ void Object::Lookup(Name* name, LookupResult* result) {
} else if (IsBoolean()) {
holder = native_context->boolean_function()->instance_prototype();
} else {
- Isolate::Current()->PushStackTraceAndDie(
+ result->isolate()->PushStackTraceAndDie(
0xDEAD0000, this, JSReceiver::cast(this)->map(), 0xDEAD0001);
}
}
@@ -341,7 +339,7 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
Foreign::cast(structure)->foreign_address());
- MaybeObject* value = (callback->getter)(receiver, callback->data);
+ MaybeObject* value = (callback->getter)(isolate, receiver, callback->data);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return value;
}
@@ -368,7 +366,8 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
}
ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
Object* fun_obj = data->getter();
- v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+ v8::AccessorGetterCallback call_fun =
+ v8::ToCData<v8::AccessorGetterCallback>(fun_obj);
if (call_fun == NULL) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
JSObject* self = JSObject::cast(receiver);
@@ -421,24 +420,22 @@ MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
}
-Handle<Object> Object::GetProperty(Handle<Object> object, Handle<Name> name) {
+Handle<Object> Object::GetProperty(Handle<Object> object,
+ Handle<Name> name) {
// TODO(rossberg): The index test should not be here but in the GetProperty
// method (or somewhere else entirely). Needs more global clean-up.
uint32_t index;
+ Isolate* isolate = name->GetIsolate();
if (name->AsArrayIndex(&index))
- return GetElement(object, index);
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
+ return GetElement(isolate, object, index);
CALL_HEAP_FUNCTION(isolate, object->GetProperty(*name), Object);
}
-Handle<Object> Object::GetElement(Handle<Object> object, uint32_t index) {
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
- CALL_HEAP_FUNCTION(isolate, object->GetElement(index), Object);
+Handle<Object> Object::GetElement(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index) {
+ CALL_HEAP_FUNCTION(isolate, object->GetElement(isolate, index), Object);
}
@@ -451,14 +448,17 @@ MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
}
-MaybeObject* JSProxy::SetElementWithHandler(JSReceiver* receiver,
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return SetPropertyWithHandler(receiver, name, value, NONE, strict_mode);
+Handle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ CALL_HEAP_FUNCTION(isolate,
+ proxy->SetPropertyWithHandler(
+ *receiver, *name, *value, NONE, strict_mode),
+ Object);
}
@@ -487,8 +487,8 @@ MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
#endif
bool has_pending_exception;
- Handle<Object> result =
- Execution::Call(fun, self, 0, NULL, &has_pending_exception, true);
+ Handle<Object> result = Execution::Call(
+ isolate, fun, self, 0, NULL, &has_pending_exception, true);
// Check for pending exception and return the result.
if (has_pending_exception) return Failure::Exception();
return *result;
@@ -513,6 +513,12 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck(
return result->holder()->GetPropertyWithCallback(
receiver, result->GetCallbackObject(), name);
}
+ } else if (obj->IsAccessorPair()) {
+ AccessorPair* pair = AccessorPair::cast(obj);
+ if (pair->all_can_read()) {
+ return result->holder()->GetPropertyWithCallback(
+ receiver, result->GetCallbackObject(), name);
+ }
}
break;
}
@@ -573,6 +579,11 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck(
if (info->all_can_read()) {
return result->GetAttributes();
}
+ } else if (obj->IsAccessorPair()) {
+ AccessorPair* pair = AccessorPair::cast(obj);
+ if (pair->all_can_read()) {
+ return result->GetAttributes();
+ }
}
break;
}
@@ -786,9 +797,7 @@ Handle<Object> Object::GetProperty(Handle<Object> object,
LookupResult* result,
Handle<Name> key,
PropertyAttributes* attributes) {
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
+ Isolate* isolate = result->isolate();
CALL_HEAP_FUNCTION(
isolate,
object->GetProperty(*receiver, result, *key, attributes),
@@ -801,9 +810,7 @@ MaybeObject* Object::GetPropertyOrFail(Handle<Object> object,
LookupResult* result,
Handle<Name> key,
PropertyAttributes* attributes) {
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
+ Isolate* isolate = result->isolate();
CALL_HEAP_FUNCTION_PASS_EXCEPTION(
isolate,
object->GetProperty(*receiver, result, *key, attributes));
@@ -816,7 +823,8 @@ MaybeObject* Object::GetProperty(Object* receiver,
PropertyAttributes* attributes) {
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
+
Isolate* isolate = name->GetIsolate();
Heap* heap = isolate->heap();
@@ -894,10 +902,9 @@ MaybeObject* Object::GetProperty(Object* receiver,
}
-MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
- Isolate* isolate = IsSmi()
- ? Isolate::Current()
- : HeapObject::cast(this)->GetIsolate();
+MaybeObject* Object::GetElementWithReceiver(Isolate* isolate,
+ Object* receiver,
+ uint32_t index) {
Heap* heap = isolate->heap();
Object* holder = this;
@@ -1449,6 +1456,66 @@ void JSObject::PrintElementsTransition(
}
+void Map::PrintGeneralization(FILE* file,
+ const char* reason,
+ int modify_index,
+ int split,
+ int descriptors,
+ bool constant_to_field,
+ Representation old_representation,
+ Representation new_representation) {
+ PrintF(file, "[generalizing ");
+ constructor_name()->PrintOn(file);
+ PrintF(file, "] ");
+ String::cast(instance_descriptors()->GetKey(modify_index))->PrintOn(file);
+ if (constant_to_field) {
+ PrintF(file, ":c->f");
+ } else {
+ PrintF(file, ":%s->%s",
+ old_representation.Mnemonic(),
+ new_representation.Mnemonic());
+ }
+ PrintF(file, " (");
+ if (strlen(reason) > 0) {
+ PrintF(file, "%s", reason);
+ } else {
+ PrintF(file, "+%i maps", descriptors - split);
+ }
+ PrintF(file, ") [");
+ JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
+ PrintF(file, "]\n");
+}
+
+
+void JSObject::PrintInstanceMigration(FILE* file,
+ Map* original_map,
+ Map* new_map) {
+ PrintF(file, "[migrating ");
+ map()->constructor_name()->PrintOn(file);
+ PrintF(file, "] ");
+ DescriptorArray* o = original_map->instance_descriptors();
+ DescriptorArray* n = new_map->instance_descriptors();
+ for (int i = 0; i < original_map->NumberOfOwnDescriptors(); i++) {
+ Representation o_r = o->GetDetails(i).representation();
+ Representation n_r = n->GetDetails(i).representation();
+ if (!o_r.Equals(n_r)) {
+ String::cast(o->GetKey(i))->PrintOn(file);
+ PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
+ } else if (o->GetDetails(i).type() == CONSTANT &&
+ n->GetDetails(i).type() == FIELD) {
+ Name* name = o->GetKey(i);
+ if (name->IsString()) {
+ String::cast(name)->PrintOn(file);
+ } else {
+ PrintF(file, "???");
+ }
+ PrintF(file, " ");
+ }
+ }
+ PrintF(file, "\n");
+}
+
+
void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
Heap* heap = GetHeap();
if (!heap->Contains(this)) {
@@ -1783,14 +1850,14 @@ String* JSReceiver::class_name() {
}
-String* JSReceiver::constructor_name() {
- if (map()->constructor()->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(map()->constructor());
+String* Map::constructor_name() {
+ if (constructor()->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(this->constructor());
String* name = String::cast(constructor->shared()->name());
if (name->length() > 0) return name;
String* inferred_name = constructor->shared()->inferred_name();
if (inferred_name->length() > 0) return inferred_name;
- Object* proto = GetPrototype();
+ Object* proto = prototype();
if (proto->IsJSObject()) return JSObject::cast(proto)->constructor_name();
}
// TODO(rossberg): what about proxies?
@@ -1799,6 +1866,11 @@ String* JSReceiver::constructor_name() {
}
+String* JSReceiver::constructor_name() {
+ return map()->constructor_name();
+}
+
+
MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
Name* name,
Object* value,
@@ -1828,30 +1900,12 @@ MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
}
-static bool IsIdentifier(UnicodeCache* cache, Name* name) {
- // Checks whether the buffer contains an identifier (no escape).
- if (!name->IsString()) return false;
- String* string = String::cast(name);
- if (string->length() == 0) return false;
- ConsStringIteratorOp op;
- StringCharacterStream stream(string, &op);
- if (!cache->IsIdentifierStart(stream.GetNext())) {
- return false;
- }
- while (stream.HasMore()) {
- if (!cache->IsIdentifierPart(stream.GetNext())) {
- return false;
- }
- }
- return true;
-}
-
-
MaybeObject* JSObject::AddFastProperty(Name* name,
Object* value,
PropertyAttributes attributes,
StoreFromKeyed store_mode,
- ValueType value_type) {
+ ValueType value_type,
+ TransitionFlag flag) {
ASSERT(!IsJSGlobalProxy());
ASSERT(DescriptorArray::kNotFound ==
map()->instance_descriptors()->Search(
@@ -1861,15 +1915,10 @@ MaybeObject* JSObject::AddFastProperty(Name* name,
// hidden strings) and is not a real identifier.
// Normalize the object if it will have too many fast properties.
Isolate* isolate = GetHeap()->isolate();
- if ((!name->IsSymbol() && !IsIdentifier(isolate->unicode_cache(), name)
- && name != isolate->heap()->hidden_string()) ||
- (map()->unused_property_fields() == 0 &&
- TooManyFastProperties(properties()->length(), store_mode))) {
- Object* obj;
- MaybeObject* maybe_obj =
+ if (!name->IsCacheable(isolate) || TooManyFastProperties(store_mode)) {
+ MaybeObject* maybe_failure =
NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-
+ if (maybe_failure->IsFailure()) return maybe_failure;
return AddSlowProperty(name, value, attributes);
}
@@ -1882,66 +1931,36 @@ MaybeObject* JSObject::AddFastProperty(Name* name,
FieldDescriptor new_field(name, index, attributes, representation);
- ASSERT(index < map()->inobject_properties() ||
- (index - map()->inobject_properties()) < properties()->length() ||
- map()->unused_property_fields() == 0);
-
- FixedArray* values = NULL;
-
- // TODO(verwaest): Merge with AddFastPropertyUsingMap.
- if (map()->unused_property_fields() == 0) {
- // Make room for the new value
- MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_values->To(&values)) return maybe_values;
- }
-
- TransitionFlag flag = INSERT_TRANSITION;
-
- Heap* heap = isolate->heap();
-
- Object* storage;
- MaybeObject* maybe_storage =
- value->AllocateNewStorageFor(heap, representation);
- if (!maybe_storage->To(&storage)) return maybe_storage;
-
- // Note that Map::CopyAddDescriptor has side-effects, the new map is already
- // inserted in the transition tree. No more allocations that might fail are
- // allowed after this point.
Map* new_map;
MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- if (map()->unused_property_fields() == 0) {
- ASSERT(values != NULL);
- set_properties(values);
- new_map->set_unused_property_fields(kFieldsAdded - 1);
- } else {
- new_map->set_unused_property_fields(map()->unused_property_fields() - 1);
+ int unused_property_fields = map()->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += kFieldsAdded;
}
+ new_map->set_unused_property_fields(unused_property_fields);
- set_map(new_map);
-
- FastPropertyAtPut(index, storage);
- return value;
+ return AddFastPropertyUsingMap(new_map, name, value, index, representation);
}
MaybeObject* JSObject::AddConstantProperty(
Name* name,
Object* constant,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ TransitionFlag initial_flag) {
// Allocate new instance descriptors with (name, constant) added
ConstantDescriptor d(name, constant, attributes);
TransitionFlag flag =
- // Do not add transitions to global objects.
+ // Do not add transitions to global objects.
(IsGlobalObject() ||
// Don't add transitions to special properties with non-trivial
// attributes.
attributes != NONE)
? OMIT_TRANSITION
- : INSERT_TRANSITION;
+ : initial_flag;
Map* new_map;
MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&d, flag);
@@ -2001,7 +2020,8 @@ MaybeObject* JSObject::AddProperty(Name* name,
JSReceiver::StoreFromKeyed store_mode,
ExtensibilityCheck extensibility_check,
ValueType value_type,
- StoreMode mode) {
+ StoreMode mode,
+ TransitionFlag transition_flag) {
ASSERT(!IsJSGlobalProxy());
Map* map_of_this = map();
Heap* heap = GetHeap();
@@ -2028,10 +2048,10 @@ MaybeObject* JSObject::AddProperty(Name* name,
// !value->IsTheHole() &&
// !value->IsConsString()) {
if (value->IsJSFunction()) {
- result = AddConstantProperty(name, value, attributes);
+ result = AddConstantProperty(name, value, attributes, transition_flag);
} else {
result = AddFastProperty(
- name, value, attributes, store_mode, value_type);
+ name, value, attributes, store_mode, value_type, transition_flag);
}
} else {
// Normalize the object to prevent very large instance descriptors.
@@ -2071,7 +2091,8 @@ void JSObject::EnqueueChangeRecord(Handle<JSObject> object,
}
Handle<Object> args[] = { type, object, name, old_value };
bool threw;
- Execution::Call(Handle<JSFunction>(isolate->observers_notify_change()),
+ Execution::Call(isolate,
+ Handle<JSFunction>(isolate->observers_notify_change()),
isolate->factory()->undefined_value(),
old_value->IsTheHole() ? 3 : 4, args,
&threw);
@@ -2083,6 +2104,7 @@ void JSObject::DeliverChangeRecords(Isolate* isolate) {
ASSERT(isolate->observer_delivery_pending());
bool threw = false;
Execution::Call(
+ isolate,
isolate->observers_deliver_changes(),
isolate->factory()->undefined_value(),
0,
@@ -2098,7 +2120,6 @@ MaybeObject* JSObject::SetPropertyPostInterceptor(
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- ExtensibilityCheck extensibility_check,
StoreMode mode) {
// Check local property, ignore interceptor.
LookupResult result(GetIsolate());
@@ -2110,13 +2131,12 @@ MaybeObject* JSObject::SetPropertyPostInterceptor(
return SetProperty(&result, name, value, attributes, strict_mode);
}
bool done = false;
- MaybeObject* result_object;
- result_object =
+ MaybeObject* result_object =
SetPropertyViaPrototypes(name, value, attributes, strict_mode, &done);
if (done) return result_object;
// Add a new real property.
return AddProperty(name, value, attributes, strict_mode,
- MAY_BE_STORE_FROM_KEYED, extensibility_check,
+ MAY_BE_STORE_FROM_KEYED, PERFORM_EXTENSIBILITY_CHECK,
OPTIMAL_REPRESENTATION, mode);
}
@@ -2137,105 +2157,6 @@ MaybeObject* JSObject::ReplaceSlowProperty(Name* name,
}
-MaybeObject* JSObject::ConvertTransitionToMapTransition(
- int transition_index,
- Name* name,
- Object* new_value,
- PropertyAttributes attributes) {
- Map* old_map = map();
- Map* old_target = old_map->GetTransition(transition_index);
- Object* result;
-
- MaybeObject* maybe_result = ConvertDescriptorToField(
- name, new_value, attributes, OMIT_TRANSITION_KEEP_REPRESENTATIONS);
- if (!maybe_result->To(&result)) return maybe_result;
-
- if (!HasFastProperties()) return result;
-
- // This method should only be used to convert existing transitions.
- Map* new_map = map();
-
- // TODO(verwaest): From here on we lose existing map transitions, causing
- // invalid back pointers. This will change once we can store multiple
- // transitions with the same key.
- bool owned_descriptors = old_map->owns_descriptors();
- if (owned_descriptors ||
- old_target->instance_descriptors() == old_map->instance_descriptors()) {
- // Since the conversion above generated a new fast map with an additional
- // property which can be shared as well, install this descriptor pointer
- // along the entire chain of smaller maps.
- Map* map;
- DescriptorArray* new_descriptors = new_map->instance_descriptors();
- DescriptorArray* old_descriptors = old_map->instance_descriptors();
- for (Object* current = old_map;
- !current->IsUndefined();
- current = map->GetBackPointer()) {
- map = Map::cast(current);
- if (map->instance_descriptors() != old_descriptors) break;
- map->SetEnumLength(Map::kInvalidEnumCache);
- map->set_instance_descriptors(new_descriptors);
- }
- old_map->set_owns_descriptors(false);
- }
-
- old_target->DeprecateTransitionTree();
-
- old_map->SetTransition(transition_index, new_map);
- new_map->SetBackPointer(old_map);
- return result;
-}
-
-
-MaybeObject* JSObject::ConvertDescriptorToField(Name* name,
- Object* new_value,
- PropertyAttributes attributes,
- TransitionFlag flag) {
- if (map()->unused_property_fields() == 0 &&
- TooManyFastProperties(properties()->length(), MAY_BE_STORE_FROM_KEYED)) {
- Object* obj;
- MaybeObject* maybe_obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- return ReplaceSlowProperty(name, new_value, attributes);
- }
-
- Representation representation = IsJSContextExtensionObject()
- ? Representation::Tagged() : new_value->OptimalRepresentation();
- int index = map()->NextFreePropertyIndex();
- FieldDescriptor new_field(name, index, attributes, representation);
-
- // Make a new map for the object.
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyInsertDescriptor(&new_field, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- // Make new properties array if necessary.
- FixedArray* new_properties = NULL;
- int new_unused_property_fields = map()->unused_property_fields() - 1;
- if (map()->unused_property_fields() == 0) {
- new_unused_property_fields = kFieldsAdded - 1;
- MaybeObject* maybe_new_properties =
- properties()->CopySize(properties()->length() + kFieldsAdded);
- if (!maybe_new_properties->To(&new_properties)) return maybe_new_properties;
- }
-
- Heap* heap = GetHeap();
- Object* storage;
- MaybeObject* maybe_storage =
- new_value->AllocateNewStorageFor(heap, representation);
- if (!maybe_storage->To(&storage)) return maybe_storage;
-
- // Update pointers to commit changes.
- // Object points to the new map.
- new_map->set_unused_property_fields(new_unused_property_fields);
- set_map(new_map);
- if (new_properties != NULL) {
- set_properties(new_properties);
- }
- FastPropertyAtPut(index, new_value);
- return new_value;
-}
-
-
const char* Representation::Mnemonic() const {
switch (kind_) {
case kNone: return "v";
@@ -2267,9 +2188,9 @@ static void ZapEndOfFixedArray(Address new_end, int to_trim) {
template<RightTrimMode trim_mode>
static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
+ ASSERT(elms->map() != heap->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
- ASSERT(!HEAP->lo_space()->Contains(elms));
+ ASSERT(!heap->lo_space()->Contains(elms));
const int len = elms->length();
@@ -2382,6 +2303,10 @@ MaybeObject* JSObject::MigrateToMap(Map* new_map) {
PropertyDetails details = new_descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
PropertyDetails old_details = old_descriptors->GetDetails(i);
+ if (old_details.type() == CALLBACKS) {
+ ASSERT(details.representation().IsTagged());
+ continue;
+ }
ASSERT(old_details.type() == CONSTANT ||
old_details.type() == FIELD);
Object* value = old_details.type() == CONSTANT
@@ -2438,10 +2363,11 @@ MaybeObject* JSObject::MigrateToMap(Map* new_map) {
MaybeObject* JSObject::GeneralizeFieldRepresentation(
int modify_index,
- Representation new_representation) {
+ Representation new_representation,
+ StoreMode store_mode) {
Map* new_map;
- MaybeObject* maybe_new_map =
- map()->GeneralizeRepresentation(modify_index, new_representation);
+ MaybeObject* maybe_new_map = map()->GeneralizeRepresentation(
+ modify_index, new_representation, store_mode);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
if (map() == new_map) return this;
@@ -2459,16 +2385,40 @@ int Map::NumberOfFields() {
}
-MaybeObject* Map::CopyGeneralizeAllRepresentations() {
+MaybeObject* Map::CopyGeneralizeAllRepresentations(
+ int modify_index,
+ StoreMode store_mode,
+ PropertyAttributes attributes,
+ const char* reason) {
Map* new_map;
MaybeObject* maybe_map = this->Copy();
if (!maybe_map->To(&new_map)) return maybe_map;
- new_map->instance_descriptors()->InitializeRepresentations(
- Representation::Tagged());
+ DescriptorArray* descriptors = new_map->instance_descriptors();
+ descriptors->InitializeRepresentations(Representation::Tagged());
+
+ // Unless the instance is being migrated, ensure that modify_index is a field.
+ PropertyDetails details = descriptors->GetDetails(modify_index);
+ if (store_mode == FORCE_FIELD && details.type() != FIELD) {
+ FieldDescriptor d(descriptors->GetKey(modify_index),
+ new_map->NumberOfFields(),
+ attributes,
+ Representation::Tagged());
+ d.SetSortedKeyIndex(details.pointer());
+ descriptors->Set(modify_index, &d);
+ int unused_property_fields = new_map->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += JSObject::kFieldsAdded;
+ }
+ new_map->set_unused_property_fields(unused_property_fields);
+ }
+
if (FLAG_trace_generalization) {
- PrintF("failed generalization %p -> %p\n",
- static_cast<void*>(this), static_cast<void*>(new_map));
+ PrintGeneralization(stdout, reason, modify_index,
+ new_map->NumberOfOwnDescriptors(),
+ new_map->NumberOfOwnDescriptors(),
+ details.type() == CONSTANT && store_mode == FORCE_FIELD,
+ Representation::Tagged(), Representation::Tagged());
}
return new_map;
}
@@ -2613,11 +2563,12 @@ Map* Map::FindLastMatchMap(int verbatim,
// - Otherwise, invalidate the outdated transition target from |updated|, and
// replace its transition tree with a new branch for the updated descriptors.
MaybeObject* Map::GeneralizeRepresentation(int modify_index,
- Representation new_representation) {
+ Representation new_representation,
+ StoreMode store_mode) {
Map* old_map = this;
DescriptorArray* old_descriptors = old_map->instance_descriptors();
- Representation old_representation =
- old_descriptors->GetDetails(modify_index).representation();
+ PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
+ Representation old_representation = old_details.representation();
// It's fine to transition from None to anything but double without any
// modification to the object, because the default uninitialized value for
@@ -2626,12 +2577,6 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
if (old_representation.IsNone() &&
!new_representation.IsNone() &&
!new_representation.IsDouble()) {
- if (FLAG_trace_generalization) {
- PrintF("initializing representation %i: %p -> %s\n",
- modify_index,
- static_cast<void*>(this),
- new_representation.Mnemonic());
- }
old_descriptors->SetRepresentation(modify_index, new_representation);
return old_map;
}
@@ -2641,40 +2586,46 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
// Check the state of the root map.
if (!old_map->EquivalentToForTransition(root_map)) {
- return CopyGeneralizeAllRepresentations();
+ return CopyGeneralizeAllRepresentations(
+ modify_index, store_mode, old_details.attributes(), "not equivalent");
}
int verbatim = root_map->NumberOfOwnDescriptors();
+ if (store_mode != ALLOW_AS_CONSTANT && modify_index < verbatim) {
+ return CopyGeneralizeAllRepresentations(
+ modify_index, store_mode,
+ old_details.attributes(), "root modification");
+ }
+
Map* updated = root_map->FindUpdatedMap(
verbatim, descriptors, old_descriptors);
- if (updated == NULL) return CopyGeneralizeAllRepresentations();
+ if (updated == NULL) {
+ return CopyGeneralizeAllRepresentations(
+ modify_index, store_mode, old_details.attributes(), "incompatible");
+ }
DescriptorArray* updated_descriptors = updated->instance_descriptors();
int valid = updated->NumberOfOwnDescriptors();
+
+ // Directly change the map if the target map is more general. Ensure that the
+ // target type of the modify_index is a FIELD, unless we are migrating.
if (updated_descriptors->IsMoreGeneralThan(
- verbatim, valid, descriptors, old_descriptors)) {
+ verbatim, valid, descriptors, old_descriptors) &&
+ (store_mode == ALLOW_AS_CONSTANT ||
+ updated_descriptors->GetDetails(modify_index).type() == FIELD)) {
Representation updated_representation =
updated_descriptors->GetDetails(modify_index).representation();
- if (new_representation.fits_into(updated_representation)) {
- if (FLAG_trace_generalization &&
- !(modify_index == 0 && new_representation.IsNone())) {
- PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
- PrintF("migrating to existing map %p(%s) -> %p(%s)\n",
- static_cast<void*>(this),
- old_details.representation().Mnemonic(),
- static_cast<void*>(updated),
- updated_representation.Mnemonic());
- }
- return updated;
- }
+ if (new_representation.fits_into(updated_representation)) return updated;
}
DescriptorArray* new_descriptors;
MaybeObject* maybe_descriptors = updated_descriptors->Merge(
- verbatim, valid, descriptors, old_descriptors);
+ verbatim, valid, descriptors, modify_index, store_mode, old_descriptors);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+ ASSERT(store_mode == ALLOW_AS_CONSTANT ||
+ new_descriptors->GetDetails(modify_index).type() == FIELD);
old_representation =
new_descriptors->GetDetails(modify_index).representation();
@@ -2696,15 +2647,12 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
split_map->DeprecateTarget(
old_descriptors->GetKey(descriptor), new_descriptors);
- if (FLAG_trace_generalization &&
- !(modify_index == 0 && new_representation.IsNone())) {
- PrintF("migrating to new map %i: %p(%s) -> %p(%s) (%i steps)\n",
- modify_index,
- static_cast<void*>(this),
- old_representation.Mnemonic(),
- static_cast<void*>(new_descriptors),
- updated_representation.Mnemonic(),
- descriptors - descriptor);
+ if (FLAG_trace_generalization) {
+ PrintGeneralization(
+ stdout, "", modify_index, descriptor, descriptors,
+ old_descriptors->GetDetails(modify_index).type() == CONSTANT &&
+ store_mode == FORCE_FIELD,
+ old_representation, updated_representation);
}
Map* new_map = split_map;
@@ -2719,6 +2667,7 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index,
Handle<Map>(new_map);
return maybe_map;
}
+ new_map->set_migration_target(true);
}
new_map->set_owns_descriptors(true);
@@ -2770,8 +2719,8 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
if (!interceptor->setter()->IsUndefined()) {
LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", this, name));
PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
- v8::NamedPropertySetter setter =
- v8::ToCData<v8::NamedPropertySetter>(interceptor->setter());
+ v8::NamedPropertySetterCallback setter =
+ v8::ToCData<v8::NamedPropertySetterCallback>(interceptor->setter());
Handle<Object> value_unhole(value->IsTheHole() ?
isolate->heap()->undefined_value() :
value,
@@ -2786,8 +2735,7 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
this_handle->SetPropertyPostInterceptor(*name_handle,
*value_handle,
attributes,
- strict_mode,
- PERFORM_EXTENSIBILITY_CHECK);
+ strict_mode);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return raw_result;
}
@@ -2851,7 +2799,8 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
Foreign::cast(structure)->foreign_address());
- MaybeObject* obj = (callback->setter)(this, value, callback->data);
+ MaybeObject* obj = (callback->setter)(
+ isolate, this, value, callback->data);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (obj->IsFailure()) return obj;
return *value_handle;
@@ -2873,7 +2822,8 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return value;
Object* call_obj = data->setter();
- v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
+ v8::AccessorSetterCallback call_fun =
+ v8::ToCData<v8::AccessorSetterCallback>(call_obj);
if (call_fun == NULL) return value;
Handle<String> key(String::cast(name));
LOG(isolate, ApiNamedPropertyAccess("store", this, name));
@@ -2931,7 +2881,8 @@ MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
#endif
bool has_pending_exception;
Handle<Object> argv[] = { value_handle };
- Execution::Call(fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
+ Execution::Call(
+ isolate, fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
// Check for pending exception and return the result.
if (has_pending_exception) return Failure::Exception();
return *value_handle;
@@ -3055,48 +3006,101 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
}
-void Map::AppendCallbackDescriptors(Handle<Map> map,
- Handle<Object> descriptors) {
- Isolate* isolate = map->GetIsolate();
- Handle<DescriptorArray> array(map->instance_descriptors());
- NeanderArray callbacks(descriptors);
- int nof_callbacks = callbacks.length();
-
- ASSERT(array->NumberOfSlackDescriptors() >= nof_callbacks);
+template<class T>
+static int AppendUniqueCallbacks(NeanderArray* callbacks,
+ Handle<typename T::Array> array,
+ int valid_descriptors) {
+ int nof_callbacks = callbacks->length();
+ Isolate* isolate = array->GetIsolate();
// Ensure the keys are unique names before writing them into the
// instance descriptor. Since it may cause a GC, it has to be done before we
// temporarily put the heap in an invalid state while appending descriptors.
for (int i = 0; i < nof_callbacks; ++i) {
- Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks.get(i)));
- if (!entry->name()->IsUniqueName()) {
- Handle<String> key =
- isolate->factory()->InternalizedStringFromString(
- Handle<String>(String::cast(entry->name())));
- entry->set_name(*key);
- }
+ Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks->get(i)));
+ if (entry->name()->IsUniqueName()) continue;
+ Handle<String> key =
+ isolate->factory()->InternalizedStringFromString(
+ Handle<String>(String::cast(entry->name())));
+ entry->set_name(*key);
}
- int nof = map->NumberOfOwnDescriptors();
-
// Fill in new callback descriptors. Process the callbacks from
// back to front so that the last callback with a given name takes
// precedence over previously added callbacks with that name.
for (int i = nof_callbacks - 1; i >= 0; i--) {
- AccessorInfo* entry = AccessorInfo::cast(callbacks.get(i));
+ AccessorInfo* entry = AccessorInfo::cast(callbacks->get(i));
Name* key = Name::cast(entry->name());
// Check if a descriptor with this name already exists before writing.
- if (array->Search(key, nof) == DescriptorArray::kNotFound) {
- CallbacksDescriptor desc(key, entry, entry->property_attributes());
- array->Append(&desc);
- nof += 1;
+ if (!T::Contains(key, entry, valid_descriptors, array)) {
+ T::Insert(key, entry, valid_descriptors, array);
+ valid_descriptors++;
}
}
+ return valid_descriptors;
+}
+
+struct DescriptorArrayAppender {
+ typedef DescriptorArray Array;
+ static bool Contains(Name* key,
+ AccessorInfo* entry,
+ int valid_descriptors,
+ Handle<DescriptorArray> array) {
+ return array->Search(key, valid_descriptors) != DescriptorArray::kNotFound;
+ }
+ static void Insert(Name* key,
+ AccessorInfo* entry,
+ int valid_descriptors,
+ Handle<DescriptorArray> array) {
+ CallbacksDescriptor desc(key, entry, entry->property_attributes());
+ array->Append(&desc);
+ }
+};
+
+
+struct FixedArrayAppender {
+ typedef FixedArray Array;
+ static bool Contains(Name* key,
+ AccessorInfo* entry,
+ int valid_descriptors,
+ Handle<FixedArray> array) {
+ for (int i = 0; i < valid_descriptors; i++) {
+ if (key == AccessorInfo::cast(array->get(i))->name()) return true;
+ }
+ return false;
+ }
+ static void Insert(Name* key,
+ AccessorInfo* entry,
+ int valid_descriptors,
+ Handle<FixedArray> array) {
+ array->set(valid_descriptors, entry);
+ }
+};
+
+
+void Map::AppendCallbackDescriptors(Handle<Map> map,
+ Handle<Object> descriptors) {
+ int nof = map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> array(map->instance_descriptors());
+ NeanderArray callbacks(descriptors);
+ ASSERT(array->NumberOfSlackDescriptors() >= callbacks.length());
+ nof = AppendUniqueCallbacks<DescriptorArrayAppender>(&callbacks, array, nof);
map->SetNumberOfOwnDescriptors(nof);
}
+int AccessorInfo::AppendUnique(Handle<Object> descriptors,
+ Handle<FixedArray> array,
+ int valid_descriptors) {
+ NeanderArray callbacks(descriptors);
+ ASSERT(array->length() >= callbacks.length() + valid_descriptors);
+ return AppendUniqueCallbacks<FixedArrayAppender>(&callbacks,
+ array,
+ valid_descriptors);
+}
+
+
static bool ContainsMap(MapHandleList* maps, Handle<Map> map) {
ASSERT(!map.is_null());
for (int i = 0; i < maps->length(); ++i) {
@@ -3360,6 +3364,15 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
result->holder(),
strict_mode);
}
+ } else if (obj->IsAccessorPair()) {
+ AccessorPair* pair = AccessorPair::cast(obj);
+ if (pair->all_can_read()) {
+ return SetPropertyWithCallback(result->GetCallbackObject(),
+ name,
+ value,
+ result->holder(),
+ strict_mode);
+ }
}
break;
}
@@ -3484,9 +3497,9 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
// Emulate [[GetProperty]] semantics for proxies.
bool has_pending_exception;
Handle<Object> argv[] = { result };
- Handle<Object> desc =
- Execution::Call(isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
+ Handle<Object> desc = Execution::Call(
+ isolate, isolate->to_complete_property_descriptor(), result,
+ ARRAY_SIZE(argv), argv, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
// [[GetProperty]] requires to check that all properties are configurable.
@@ -3553,20 +3566,20 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<Object> JSProxy::DeletePropertyWithHandler(
- Handle<JSProxy> object, Handle<Name> name, DeleteMode mode) {
- Isolate* isolate = object->GetIsolate();
+ Handle<JSProxy> proxy, Handle<Name> name, DeleteMode mode) {
+ Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) return isolate->factory()->false_value();
Handle<Object> args[] = { name };
- Handle<Object> result = object->CallTrap(
+ Handle<Object> result = proxy->CallTrap(
"delete", Handle<Object>(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return Handle<Object>();
bool result_bool = result->BooleanValue();
if (mode == STRICT_DELETION && !result_bool) {
- Handle<Object> handler(object->handler(), isolate);
+ Handle<Object> handler(proxy->handler(), isolate);
Handle<String> trap_name = isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("delete"));
Handle<Object> args[] = { handler, trap_name };
@@ -3580,10 +3593,10 @@ Handle<Object> JSProxy::DeletePropertyWithHandler(
Handle<Object> JSProxy::DeleteElementWithHandler(
- Handle<JSProxy> object, uint32_t index, DeleteMode mode) {
- Isolate* isolate = object->GetIsolate();
+ Handle<JSProxy> proxy, uint32_t index, DeleteMode mode) {
+ Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return JSProxy::DeletePropertyWithHandler(object, name, mode);
+ return JSProxy::DeletePropertyWithHandler(proxy, name, mode);
}
@@ -3609,9 +3622,9 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
bool has_pending_exception;
Handle<Object> argv[] = { result };
- Handle<Object> desc =
- Execution::Call(isolate->to_complete_property_descriptor(), result,
- ARRAY_SIZE(argv), argv, &has_pending_exception);
+ Handle<Object> desc = Execution::Call(
+ isolate, isolate->to_complete_property_descriptor(), result,
+ ARRAY_SIZE(argv), argv, &has_pending_exception);
if (has_pending_exception) return NONE;
// Convert result to PropertyAttributes.
@@ -3665,27 +3678,23 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
}
-void JSProxy::Fix() {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSProxy> self(this);
+void JSProxy::Fix(Handle<JSProxy> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
// Save identity hash.
- MaybeObject* maybe_hash = GetIdentityHash(OMIT_CREATION);
+ Handle<Object> hash = JSProxy::GetIdentityHash(proxy, OMIT_CREATION);
- if (IsJSFunctionProxy()) {
- isolate->factory()->BecomeJSFunction(self);
+ if (proxy->IsJSFunctionProxy()) {
+ isolate->factory()->BecomeJSFunction(proxy);
// Code will be set on the JavaScript side.
} else {
- isolate->factory()->BecomeJSObject(self);
+ isolate->factory()->BecomeJSObject(proxy);
}
- ASSERT(self->IsJSObject());
+ ASSERT(proxy->IsJSObject());
// Inherit identity, if it was present.
- Object* hash;
- if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) {
- Handle<JSObject> new_self(JSObject::cast(*self));
- isolate->factory()->SetIdentityHash(new_self, Smi::cast(hash));
+ if (hash->IsSmi()) {
+ JSObject::SetIdentityHash(Handle<JSObject>::cast(proxy), Smi::cast(*hash));
}
}
@@ -3713,7 +3722,7 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
}
bool threw;
- return Execution::Call(trap, handler, argc, argv, &threw);
+ return Execution::Call(isolate, trap, handler, argc, argv, &threw);
}
@@ -3725,11 +3734,6 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
void JSObject::MigrateInstance(Handle<JSObject> object) {
- if (FLAG_trace_migration) {
- PrintF("migrating instance %p (%p)\n",
- static_cast<void*>(*object),
- static_cast<void*>(object->map()));
- }
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
object->MigrateInstance());
@@ -3737,11 +3741,6 @@ void JSObject::MigrateInstance(Handle<JSObject> object) {
Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
- if (FLAG_trace_migration) {
- PrintF("migrating instance (no new maps) %p (%p)\n",
- static_cast<void*>(*object),
- static_cast<void*>(object->map()));
- }
CALL_HEAP_FUNCTION(
object->GetIsolate(),
object->MigrateInstance(),
@@ -3751,14 +3750,148 @@ Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
Handle<Map> Map::GeneralizeRepresentation(Handle<Map> map,
int modify_index,
- Representation representation) {
+ Representation representation,
+ StoreMode store_mode) {
CALL_HEAP_FUNCTION(
map->GetIsolate(),
- map->GeneralizeRepresentation(modify_index, representation),
+ map->GeneralizeRepresentation(modify_index, representation, store_mode),
Map);
}
+static MaybeObject* SetPropertyUsingTransition(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Map* transition_map = lookup->GetTransitionTarget();
+ int descriptor = transition_map->LastAdded();
+
+ DescriptorArray* descriptors = transition_map->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+
+ if (details.type() == CALLBACKS || attributes != details.attributes()) {
+ // AddProperty will either normalize the object, or create a new fast copy
+ // of the map. If we get a fast copy of the map, all field representations
+ // will be tagged since the transition is omitted.
+ return lookup->holder()->AddProperty(
+ *name, *value, attributes, kNonStrictMode,
+ JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED,
+ JSReceiver::OMIT_EXTENSIBILITY_CHECK,
+ JSObject::FORCE_TAGGED, FORCE_FIELD, OMIT_TRANSITION);
+ }
+
+ // Keep the target CONSTANT if the same value is stored.
+ // TODO(verwaest): Also support keeping the placeholder
+ // (value->IsUninitialized) as constant.
+ if (details.type() == CONSTANT &&
+ descriptors->GetValue(descriptor) == *value) {
+ lookup->holder()->set_map(transition_map);
+ return *value;
+ }
+
+ Representation representation = details.representation();
+
+ if (!value->FitsRepresentation(representation) ||
+ details.type() == CONSTANT) {
+ MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
+ descriptor, value->OptimalRepresentation(), FORCE_FIELD);
+ if (!maybe_map->To(&transition_map)) return maybe_map;
+ Object* back = transition_map->GetBackPointer();
+ if (back->IsMap()) {
+ MaybeObject* maybe_failure =
+ lookup->holder()->MigrateToMap(Map::cast(back));
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+ descriptors = transition_map->instance_descriptors();
+ representation = descriptors->GetDetails(descriptor).representation();
+ }
+
+ int field_index = descriptors->GetFieldIndex(descriptor);
+ return lookup->holder()->AddFastPropertyUsingMap(
+ transition_map, *name, *value, field_index, representation);
+}
+
+
+static MaybeObject* SetPropertyToField(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value) {
+ Representation representation = lookup->representation();
+ if (!value->FitsRepresentation(representation) ||
+ lookup->type() == CONSTANT) {
+ MaybeObject* maybe_failure =
+ lookup->holder()->GeneralizeFieldRepresentation(
+ lookup->GetDescriptorIndex(),
+ value->OptimalRepresentation(),
+ FORCE_FIELD);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ DescriptorArray* desc = lookup->holder()->map()->instance_descriptors();
+ int descriptor = lookup->GetDescriptorIndex();
+ representation = desc->GetDetails(descriptor).representation();
+ }
+
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapNumber* storage = HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
+ lookup->GetFieldIndex().field_index()));
+ storage->set_value(value->Number());
+ return *value;
+ }
+
+ lookup->holder()->FastPropertyAtPut(
+ lookup->GetFieldIndex().field_index(), *value);
+ return *value;
+}
+
+
+static MaybeObject* ConvertAndSetLocalProperty(LookupResult* lookup,
+ Name* name,
+ Object* value,
+ PropertyAttributes attributes) {
+ JSObject* object = lookup->holder();
+ if (object->TooManyFastProperties()) {
+ MaybeObject* maybe_failure = object->NormalizeProperties(
+ CLEAR_INOBJECT_PROPERTIES, 0);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+
+ if (!object->HasFastProperties()) {
+ return object->ReplaceSlowProperty(name, value, attributes);
+ }
+
+ int descriptor_index = lookup->GetDescriptorIndex();
+ if (lookup->GetAttributes() == attributes) {
+ MaybeObject* maybe_failure = object->GeneralizeFieldRepresentation(
+ descriptor_index, Representation::Tagged(), FORCE_FIELD);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ } else {
+ Map* map;
+ MaybeObject* maybe_map = object->map()->CopyGeneralizeAllRepresentations(
+ descriptor_index, FORCE_FIELD, attributes, "attributes mismatch");
+ if (!maybe_map->To(&map)) return maybe_map;
+ MaybeObject* maybe_failure = object->MigrateToMap(map);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+
+ DescriptorArray* descriptors = object->map()->instance_descriptors();
+ int index = descriptors->GetDetails(descriptor_index).field_index();
+ object->FastPropertyAtPut(index, value);
+ return value;
+}
+
+
+static MaybeObject* SetPropertyToFieldWithAttributes(
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ if (lookup->GetAttributes() == attributes) {
+ if (value->IsUninitialized()) return *value;
+ return SetPropertyToField(lookup, name, value);
+ } else {
+ return ConvertAndSetLocalProperty(lookup, *name, *value, attributes);
+ }
+}
+
+
MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
Name* name_raw,
Object* value_raw,
@@ -3767,9 +3900,10 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
StoreFromKeyed store_mode) {
Heap* heap = GetHeap();
Isolate* isolate = heap->isolate();
+
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
// Optimization for 2-byte strings often used as keys in a decompression
// dictionary. We internalize these short keys to avoid constantly
@@ -3847,37 +3981,13 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
case NORMAL:
result = lookup->holder()->SetNormalizedProperty(lookup, *value);
break;
- case FIELD: {
- Representation representation = lookup->representation();
- if (!value->FitsRepresentation(representation)) {
- MaybeObject* maybe_failure =
- lookup->holder()->GeneralizeFieldRepresentation(
- lookup->GetDescriptorIndex(), value->OptimalRepresentation());
- if (maybe_failure->IsFailure()) return maybe_failure;
- DescriptorArray* desc = lookup->holder()->map()->instance_descriptors();
- int descriptor = lookup->GetDescriptorIndex();
- representation = desc->GetDetails(descriptor).representation();
- }
- if (FLAG_track_double_fields && representation.IsDouble()) {
- HeapNumber* storage =
- HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
- lookup->GetFieldIndex().field_index()));
- storage->set_value(value->Number());
- result = *value;
- break;
- }
- lookup->holder()->FastPropertyAtPut(
- lookup->GetFieldIndex().field_index(), *value);
- result = *value;
+ case FIELD:
+ result = SetPropertyToField(lookup, name, value);
break;
- }
case CONSTANT:
// Only replace the constant if necessary.
if (*value == lookup->GetConstant()) return *value;
- // Preserve the attributes of this existing property.
- attributes = lookup->GetAttributes();
- result = lookup->holder()->ConvertDescriptorToField(
- *name, *value, attributes);
+ result = SetPropertyToField(lookup, name, value);
break;
case CALLBACKS: {
Object* callback_object = lookup->GetCallbackObject();
@@ -3889,55 +3999,7 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
*name, *value, attributes, strict_mode);
break;
case TRANSITION: {
- Map* transition_map = lookup->GetTransitionTarget();
- int descriptor = transition_map->LastAdded();
-
- DescriptorArray* descriptors = transition_map->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
-
- if (details.type() == FIELD) {
- if (attributes == details.attributes()) {
- Representation representation = details.representation();
- if (!value->FitsRepresentation(representation)) {
- MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
- descriptor, value->OptimalRepresentation());
- if (!maybe_map->To(&transition_map)) return maybe_map;
- Object* back = transition_map->GetBackPointer();
- if (back->IsMap()) {
- MaybeObject* maybe_failure =
- lookup->holder()->MigrateToMap(Map::cast(back));
- if (maybe_failure->IsFailure()) return maybe_failure;
- }
- DescriptorArray* desc = transition_map->instance_descriptors();
- int descriptor = transition_map->LastAdded();
- representation = desc->GetDetails(descriptor).representation();
- }
- int field_index = descriptors->GetFieldIndex(descriptor);
- result = lookup->holder()->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index, representation);
- } else {
- result = lookup->holder()->ConvertDescriptorToField(
- *name, *value, attributes);
- }
- } else if (details.type() == CALLBACKS) {
- result = lookup->holder()->ConvertDescriptorToField(
- *name, *value, attributes);
- } else {
- ASSERT(details.type() == CONSTANT);
-
- Object* constant = descriptors->GetValue(descriptor);
- if (constant == *value) {
- // If the same constant function is being added we can simply
- // transition to the target map.
- lookup->holder()->set_map(transition_map);
- result = constant;
- } else {
- // Otherwise, replace with a map transition to a new map with a FIELD,
- // even if the value is a constant function.
- result = lookup->holder()->ConvertTransitionToMapTransition(
- lookup->GetTransitionIndex(), *name, *value, attributes);
- }
- }
+ result = SetPropertyUsingTransition(lookup, name, value, attributes);
break;
}
case HANDLER:
@@ -3967,6 +4029,29 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
}
+MaybeObject* JSObject::SetLocalPropertyIgnoreAttributesTrampoline(
+ Name* key,
+ Object* value,
+ PropertyAttributes attributes,
+ ValueType value_type,
+ StoreMode mode,
+ ExtensibilityCheck extensibility_check) {
+ // TODO(mstarzinger): The trampoline is a giant hack, don't use it anywhere
+ // else or handlification people will start hating you for all eternity.
+ HandleScope scope(GetIsolate());
+ IdempotentPointerToHandleCodeTrampoline trampoline(GetIsolate());
+ return trampoline.CallWithReturnValue(
+ &JSObject::SetLocalPropertyIgnoreAttributes,
+ Handle<JSObject>(this),
+ Handle<Name>(key),
+ Handle<Object>(value, GetIsolate()),
+ attributes,
+ value_type,
+ mode,
+ extensibility_check);
+}
+
+
// Set a real local property, even if it is READ_ONLY. If the property is not
// present, add it with attributes NONE. This code is an exact clone of
// SetProperty, with the check for IsReadOnly and the check for a
@@ -3982,11 +4067,12 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
Handle<Object> value,
PropertyAttributes attributes,
ValueType value_type,
- StoreMode mode) {
+ StoreMode mode,
+ ExtensibilityCheck extensibility_check) {
CALL_HEAP_FUNCTION(
object->GetIsolate(),
object->SetLocalPropertyIgnoreAttributes(
- *key, *value, attributes, value_type, mode),
+ *key, *value, attributes, value_type, mode, extensibility_check),
Object);
}
@@ -3996,10 +4082,11 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
Object* value_raw,
PropertyAttributes attributes,
ValueType value_type,
- StoreMode mode) {
+ StoreMode mode,
+ ExtensibilityCheck extensibility_check) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
Isolate* isolate = GetIsolate();
LookupResult lookup(isolate);
LocalLookup(name_raw, &lookup, true);
@@ -4024,7 +4111,13 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
value_raw,
attributes,
value_type,
- mode);
+ mode,
+ extensibility_check);
+ }
+
+ if (lookup.IsFound() &&
+ (lookup.type() == INTERCEPTOR || lookup.type() == CALLBACKS)) {
+ LocalLookupRealNamedProperty(name_raw, &lookup);
}
// Check for accessor in prototype chain removed here in clone.
@@ -4032,7 +4125,7 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
// Neither properties nor transitions found.
return AddProperty(
name_raw, value_raw, attributes, kNonStrictMode,
- MAY_BE_STORE_FROM_KEYED, PERFORM_EXTENSIBILITY_CHECK, value_type, mode);
+ MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode);
}
// From this point on everything needs to be handlified.
@@ -4045,101 +4138,38 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
PropertyAttributes old_attributes = ABSENT;
bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
if (is_observed && lookup.IsProperty()) {
- if (lookup.IsDataProperty()) old_value = Object::GetProperty(self, name);
+ if (lookup.IsDataProperty()) old_value =
+ Object::GetProperty(self, name);
old_attributes = lookup.GetAttributes();
}
// Check of IsReadOnly removed from here in clone.
MaybeObject* result = *value;
switch (lookup.type()) {
- case NORMAL: {
- PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
- result = self->SetNormalizedProperty(*name, *value, details);
+ case NORMAL:
+ result = self->ReplaceSlowProperty(*name, *value, attributes);
break;
- }
- case FIELD: {
- Representation representation = lookup.representation();
- Representation value_representation =
- value->OptimalRepresentation(value_type);
- if (value_representation.IsNone()) break;
- if (!value_representation.fits_into(representation)) {
- MaybeObject* maybe_failure = self->GeneralizeFieldRepresentation(
- lookup.GetDescriptorIndex(), value_representation);
- if (maybe_failure->IsFailure()) return maybe_failure;
- DescriptorArray* desc = self->map()->instance_descriptors();
- int descriptor = lookup.GetDescriptorIndex();
- representation = desc->GetDetails(descriptor).representation();
- }
- if (FLAG_track_double_fields && representation.IsDouble()) {
- HeapNumber* storage =
- HeapNumber::cast(self->RawFastPropertyAt(
- lookup.GetFieldIndex().field_index()));
- storage->set_value(value->Number());
- result = *value;
- break;
- }
- self->FastPropertyAtPut(lookup.GetFieldIndex().field_index(), *value);
- result = *value;
+ case FIELD:
+ result = SetPropertyToFieldWithAttributes(
+ &lookup, name, value, attributes);
break;
- }
case CONSTANT:
- // Only replace the function if necessary.
- if (*value != lookup.GetConstant()) {
- // Preserve the attributes of this existing property.
- attributes = lookup.GetAttributes();
- result = self->ConvertDescriptorToField(*name, *value, attributes);
+ // Only replace the constant if necessary.
+ if (lookup.GetAttributes() != attributes ||
+ *value != lookup.GetConstant()) {
+ result = SetPropertyToFieldWithAttributes(
+ &lookup, name, value, attributes);
}
break;
case CALLBACKS:
- case INTERCEPTOR:
- // Override callback in clone
- result = self->ConvertDescriptorToField(*name, *value, attributes);
+ result = ConvertAndSetLocalProperty(&lookup, *name, *value, attributes);
break;
- case TRANSITION: {
- Map* transition_map = lookup.GetTransitionTarget();
- int descriptor = transition_map->LastAdded();
-
- DescriptorArray* descriptors = transition_map->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
-
- if (details.type() == FIELD) {
- if (attributes == details.attributes()) {
- Representation representation = details.representation();
- Representation value_representation =
- value->OptimalRepresentation(value_type);
- if (!value_representation.fits_into(representation)) {
- MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
- descriptor, value_representation);
- if (!maybe_map->To(&transition_map)) return maybe_map;
- Object* back = transition_map->GetBackPointer();
- if (back->IsMap()) {
- MaybeObject* maybe_failure = self->MigrateToMap(Map::cast(back));
- if (maybe_failure->IsFailure()) return maybe_failure;
- }
- DescriptorArray* desc = transition_map->instance_descriptors();
- int descriptor = transition_map->LastAdded();
- representation = desc->GetDetails(descriptor).representation();
- }
- int field_index = descriptors->GetFieldIndex(descriptor);
- result = self->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index, representation);
- } else {
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- }
- } else if (details.type() == CALLBACKS) {
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- } else {
- ASSERT(details.type() == CONSTANT);
-
- // Replace transition to CONSTANT FUNCTION with a map transition to a
- // new map with a FIELD, even if the value is a function.
- result = self->ConvertTransitionToMapTransition(
- lookup.GetTransitionIndex(), *name, *value, attributes);
- }
+ case TRANSITION:
+ result = SetPropertyUsingTransition(&lookup, name, value, attributes);
break;
- }
- case HANDLER:
case NONEXISTENT:
+ case HANDLER:
+ case INTERCEPTOR:
UNREACHABLE();
}
@@ -4201,20 +4231,20 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
if (name->IsSymbol()) return ABSENT;
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(String::cast(name));
PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
if (!interceptor->query()->IsUndefined()) {
- v8::NamedPropertyQuery query =
- v8::ToCData<v8::NamedPropertyQuery>(interceptor->query());
+ v8::NamedPropertyQueryCallback query =
+ v8::ToCData<v8::NamedPropertyQueryCallback>(interceptor->query());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-has", *holder_handle, name));
v8::Handle<v8::Integer> result =
@@ -4224,8 +4254,8 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
return static_cast<PropertyAttributes>(result->Int32Value());
}
} else if (!interceptor->getter()->IsUndefined()) {
- v8::NamedPropertyGetter getter =
- v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
+ v8::NamedPropertyGetterCallback getter =
+ v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-get-has", this, name));
v8::Handle<v8::Value> result =
@@ -4336,25 +4366,27 @@ PropertyAttributes JSObject::GetElementAttributeWithReceiver(
PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
JSReceiver* receiver, uint32_t index, bool continue_search) {
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
+
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSReceiver> hreceiver(receiver);
Handle<JSObject> holder(this);
PropertyCallbackArguments args(isolate, interceptor->data(), receiver, this);
if (!interceptor->query()->IsUndefined()) {
- v8::IndexedPropertyQuery query =
- v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
+ v8::IndexedPropertyQueryCallback query =
+ v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
v8::Handle<v8::Integer> result = args.Call(query, index);
if (!result.IsEmpty())
return static_cast<PropertyAttributes>(result->Int32Value());
} else if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetter getter =
- v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
+ v8::IndexedPropertyGetterCallback getter =
+ v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-get-has", this, index));
v8::Handle<v8::Value> result = args.Call(getter, index);
@@ -4447,30 +4479,21 @@ void NormalizedMapCache::Clear() {
}
-void JSObject::UpdateMapCodeCache(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Code> code) {
- Isolate* isolate = object->GetIsolate();
- CALL_HEAP_FUNCTION_VOID(isolate,
- object->UpdateMapCodeCache(*name, *code));
-}
-
-
-MaybeObject* JSObject::UpdateMapCodeCache(Name* name, Code* code) {
- if (map()->is_shared()) {
+void HeapObject::UpdateMapCodeCache(Handle<HeapObject> object,
+ Handle<Name> name,
+ Handle<Code> code) {
+ Handle<Map> map(object->map());
+ if (map->is_shared()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
// Fast case maps are never marked as shared.
- ASSERT(!HasFastProperties());
+ ASSERT(!receiver->HasFastProperties());
// Replace the map with an identical copy that can be safely modified.
- Object* obj;
- { MaybeObject* maybe_obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES,
- UNIQUE_NORMALIZED_MAP);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- GetIsolate()->counters()->normalized_maps()->Increment();
-
- set_map(Map::cast(obj));
+ map = Map::CopyNormalized(map, KEEP_INOBJECT_PROPERTIES,
+ UNIQUE_NORMALIZED_MAP);
+ receiver->GetIsolate()->counters()->normalized_maps()->Increment();
+ receiver->set_map(*map);
}
- return map()->UpdateCodeCache(name, code);
+ Map::UpdateCodeCache(map, name, code);
}
@@ -4721,7 +4744,7 @@ Smi* JSReceiver::GenerateIdentityHash() {
do {
// Generate a random 32-bit hash value but limit range to fit
// within a smi.
- hash_value = V8::RandomPrivate(isolate) & Smi::kMaxValue;
+ hash_value = isolate->random_number_generator()->NextInt() & Smi::kMaxValue;
attempts++;
} while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0
@@ -4730,17 +4753,16 @@ Smi* JSReceiver::GenerateIdentityHash() {
}
-MaybeObject* JSObject::SetIdentityHash(Smi* hash, CreationFlag flag) {
- MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_string(),
- hash);
- if (maybe->IsFailure()) return maybe;
- return this;
+void JSObject::SetIdentityHash(Handle<JSObject> object, Smi* hash) {
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+ object->SetHiddenProperty(
+ object->GetHeap()->identity_hash_string(), hash));
}
-int JSObject::GetIdentityHash(Handle<JSObject> obj) {
- CALL_AND_RETRY_OR_DIE(obj->GetIsolate(),
- obj->GetIdentityHash(ALLOW_CREATION),
+int JSObject::GetIdentityHash(Handle<JSObject> object) {
+ CALL_AND_RETRY_OR_DIE(object->GetIsolate(),
+ object->GetIdentityHash(ALLOW_CREATION),
return Smi::cast(__object__)->value(),
return 0);
}
@@ -4765,6 +4787,12 @@ MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
}
+Handle<Object> JSProxy::GetIdentityHash(Handle<JSProxy> proxy,
+ CreationFlag flag) {
+ CALL_HEAP_FUNCTION(proxy->GetIsolate(), proxy->GetIdentityHash(flag), Object);
+}
+
+
MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
Object* hash = this->hash();
if (!hash->IsSmi() && flag == ALLOW_CREATION) {
@@ -4857,30 +4885,27 @@ MaybeObject* JSObject::SetHiddenProperty(Name* key, Object* value) {
}
-void JSObject::DeleteHiddenProperty(Name* key) {
+void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
+ Isolate* isolate = object->GetIsolate();
ASSERT(key->IsUniqueName());
- if (IsJSGlobalProxy()) {
- // For a proxy, use the prototype as target object.
- Object* proxy_parent = GetPrototype();
- // If the proxy is detached, return immediately.
- if (proxy_parent->IsNull()) return;
- ASSERT(proxy_parent->IsJSGlobalObject());
- JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
- return;
+
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return;
+ ASSERT(proto->IsJSGlobalObject());
+ return DeleteHiddenProperty(Handle<JSObject>::cast(proto), key);
}
- ASSERT(!IsJSGlobalProxy());
+
MaybeObject* hidden_lookup =
- GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
+ object->GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
Object* inline_value = hidden_lookup->ToObjectUnchecked();
// We never delete (inline-stored) identity hashes.
- ASSERT(key != GetHeap()->identity_hash_string());
+ ASSERT(*key != isolate->heap()->identity_hash_string());
if (inline_value->IsUndefined() || inline_value->IsSmi()) return;
- ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
- MaybeObject* delete_result = hashtable->Put(key, GetHeap()->the_hole_value());
- USE(delete_result);
- ASSERT(!delete_result->IsFailure()); // Delete does not cause GC.
+ Handle<ObjectHashTable> hashtable(ObjectHashTable::cast(inline_value));
+ PutIntoObjectHashTable(hashtable, key, isolate->factory()->the_hole_value());
}
@@ -4951,13 +4976,13 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
ASSERT_EQ(hashtable, new_table);
}
- MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_string(),
- hashtable,
- DONT_ENUM,
- kNonStrictMode,
- OMIT_EXTENSIBILITY_CHECK,
- FORCE_FIELD);
+ MaybeObject* store_result = SetLocalPropertyIgnoreAttributesTrampoline(
+ GetHeap()->hidden_string(),
+ hashtable,
+ DONT_ENUM,
+ OPTIMAL_REPRESENTATION,
+ ALLOW_AS_CONSTANT,
+ OMIT_EXTENSIBILITY_CHECK);
if (store_result->IsFailure()) return store_result;
return hashtable;
}
@@ -4984,13 +5009,13 @@ MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
}
}
}
- MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_string(),
- value,
- DONT_ENUM,
- kNonStrictMode,
- OMIT_EXTENSIBILITY_CHECK,
- FORCE_FIELD);
+ MaybeObject* store_result = SetLocalPropertyIgnoreAttributesTrampoline(
+ GetHeap()->hidden_string(),
+ value,
+ DONT_ENUM,
+ OPTIMAL_REPRESENTATION,
+ ALLOW_AS_CONSTANT,
+ OMIT_EXTENSIBILITY_CHECK);
if (store_result->IsFailure()) return store_result;
return this;
}
@@ -5021,8 +5046,8 @@ Handle<Object> JSObject::DeletePropertyWithInterceptor(Handle<JSObject> object,
Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
if (!interceptor->deleter()->IsUndefined()) {
- v8::NamedPropertyDeleter deleter =
- v8::ToCData<v8::NamedPropertyDeleter>(interceptor->deleter());
+ v8::NamedPropertyDeleterCallback deleter =
+ v8::ToCData<v8::NamedPropertyDeleterCallback>(interceptor->deleter());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-delete", *object, *name));
PropertyCallbackArguments args(
@@ -5045,111 +5070,110 @@ Handle<Object> JSObject::DeletePropertyWithInterceptor(Handle<JSObject> object,
}
-MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Object> AccessorDelete(Handle<JSObject> object,
+ uint32_t index,
+ JSObject::DeleteMode mode) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->GetElementsAccessor()->Delete(*object,
+ index,
+ mode),
+ Object);
+}
+
+
+Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
+ uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- if (interceptor->deleter()->IsUndefined()) return heap->false_value();
- v8::IndexedPropertyDeleter deleter =
- v8::ToCData<v8::IndexedPropertyDeleter>(interceptor->deleter());
- Handle<JSObject> this_handle(this);
+
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
+ if (interceptor->deleter()->IsUndefined()) return factory->false_value();
+ v8::IndexedPropertyDeleterCallback deleter =
+ v8::ToCData<v8::IndexedPropertyDeleterCallback>(interceptor->deleter());
LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
- PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
+ ApiIndexedPropertyAccess("interceptor-indexed-delete", *object, index));
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *object, *object);
v8::Handle<v8::Boolean> result = args.Call(deleter, index);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (!result.IsEmpty()) {
ASSERT(result->IsBoolean());
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
- return *result_internal;
+ // Rebox CustomArguments::kReturnValueOffset before returning.
+ return handle(*result_internal, isolate);
}
- MaybeObject* raw_result = this_handle->GetElementsAccessor()->Delete(
- *this_handle,
- index,
- NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
+ Handle<Object> delete_result = AccessorDelete(object, index, NORMAL_DELETION);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return delete_result;
}
-Handle<Object> JSObject::DeleteElement(Handle<JSObject> obj,
+Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
uint32_t index,
DeleteMode mode) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteElement(index, mode),
- Object);
-}
-
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
-MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->false_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayIndexedAccess(*object, index, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_DELETE);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return factory->false_value();
}
- if (IsStringObjectWithCharacterAt(index)) {
+ if (object->IsStringObjectWithCharacterAt(index)) {
if (mode == STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> holder(this, isolate);
- Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[2] = { name, holder };
+ Handle<Object> name = factory->NewNumberFromUint(index);
+ Handle<Object> args[2] = { name, object };
Handle<Object> error =
- isolate->factory()->NewTypeError("strict_delete_property",
- HandleVector(args, 2));
- return isolate->Throw(*error);
+ factory->NewTypeError("strict_delete_property",
+ HandleVector(args, 2));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
- return isolate->heap()->false_value();
+ return factory->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return factory->false_value();
ASSERT(proto->IsJSGlobalObject());
- return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
+ return DeleteElement(Handle<JSObject>::cast(proto), index, mode);
}
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
-
Handle<Object> old_value;
bool should_enqueue_change_record = false;
- if (FLAG_harmony_observation && self->map()->is_observed()) {
- should_enqueue_change_record = self->HasLocalElement(index);
+ if (FLAG_harmony_observation && object->map()->is_observed()) {
+ should_enqueue_change_record = object->HasLocalElement(index);
if (should_enqueue_change_record) {
- old_value = self->GetLocalElementAccessorPair(index) != NULL
- ? Handle<Object>::cast(isolate->factory()->the_hole_value())
- : Object::GetElement(self, index);
+ old_value = object->GetLocalElementAccessorPair(index) != NULL
+ ? Handle<Object>::cast(factory->the_hole_value())
+ : Object::GetElement(isolate, object, index);
}
}
- MaybeObject* result;
// Skip interceptor if forcing deletion.
- if (self->HasIndexedInterceptor() && mode != FORCE_DELETION) {
- result = self->DeleteElementWithInterceptor(index);
+ Handle<Object> result;
+ if (object->HasIndexedInterceptor() && mode != FORCE_DELETION) {
+ result = DeleteElementWithInterceptor(object, index);
} else {
- result = self->GetElementsAccessor()->Delete(*self, index, mode);
+ result = AccessorDelete(object, index, mode);
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (should_enqueue_change_record && !self->HasLocalElement(index)) {
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- EnqueueChangeRecord(self, "deleted", name, old_value);
+ if (should_enqueue_change_record && !object->HasLocalElement(index)) {
+ Handle<String> name = factory->Uint32ToString(index);
+ EnqueueChangeRecord(object, "deleted", name, old_value);
}
- return *hresult;
+ return result;
}
@@ -5922,7 +5946,8 @@ void JSObject::DefineElementAccessor(Handle<JSObject> object,
uint32_t index,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ v8::AccessControl access_control) {
switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
@@ -5980,9 +6005,9 @@ void JSObject::DefineElementAccessor(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
Handle<AccessorPair> accessors = isolate->factory()->NewAccessorPair();
accessors->SetComponents(*getter, *setter);
+ accessors->set_access_flags(access_control);
- CALL_HEAP_FUNCTION_VOID(
- isolate, object->SetElementCallback(index, *accessors, attributes));
+ SetElementCallback(object, index, accessors, attributes);
}
@@ -6011,11 +6036,13 @@ void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ v8::AccessControl access_control) {
// We could assert that the property is configurable here, but we would need
// to do a lookup, which seems to be a bit of overkill.
bool only_attribute_changes = getter->IsNull() && setter->IsNull();
if (object->HasFastProperties() && !only_attribute_changes &&
+ access_control == v8::DEFAULT &&
(object->map()->NumberOfOwnDescriptors() <
DescriptorArray::kMaxNumberOfDescriptors)) {
bool getterOk = getter->IsNull() ||
@@ -6027,10 +6054,9 @@ void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
Handle<AccessorPair> accessors = CreateAccessorPairFor(object, name);
accessors->SetComponents(*getter, *setter);
+ accessors->set_access_flags(access_control);
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->SetPropertyCallback(*name, *accessors, attributes));
+ SetPropertyCallback(object, name, accessors, attributes);
}
@@ -6048,82 +6074,75 @@ bool JSObject::CanSetCallback(Name* name) {
LookupCallbackProperty(name, &callback_result);
if (callback_result.IsFound()) {
Object* obj = callback_result.GetCallbackObject();
- if (obj->IsAccessorInfo() &&
- AccessorInfo::cast(obj)->prohibits_overwriting()) {
- return false;
+ if (obj->IsAccessorInfo()) {
+ return !AccessorInfo::cast(obj)->prohibits_overwriting();
+ }
+ if (obj->IsAccessorPair()) {
+ return !AccessorPair::cast(obj)->prohibits_overwriting();
}
}
-
return true;
}
-MaybeObject* JSObject::SetElementCallback(uint32_t index,
- Object* structure,
- PropertyAttributes attributes) {
+void JSObject::SetElementCallback(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> structure,
+ PropertyAttributes attributes) {
+ Heap* heap = object->GetHeap();
PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
// Normalize elements to make this operation simple.
- SeededNumberDictionary* dictionary;
- { MaybeObject* maybe_dictionary = NormalizeElements();
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- }
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements());
// Update the dictionary with the new CALLBACKS property.
- { MaybeObject* maybe_dictionary = dictionary->Set(index, structure, details);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
- }
-
+ dictionary = SeededNumberDictionary::Set(dictionary, index, structure,
+ details);
dictionary->set_requires_slow_elements();
+
// Update the dictionary backing store on the object.
- if (elements()->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ if (object->elements()->map() == heap->non_strict_arguments_elements_map()) {
// Also delete any parameter alias.
//
// TODO(kmillikin): when deleting the last parameter alias we could
// switch to a direct backing store without the parameter map. This
// would allow GC of the context.
- FixedArray* parameter_map = FixedArray::cast(elements());
+ FixedArray* parameter_map = FixedArray::cast(object->elements());
if (index < static_cast<uint32_t>(parameter_map->length()) - 2) {
- parameter_map->set(index + 2, GetHeap()->the_hole_value());
+ parameter_map->set(index + 2, heap->the_hole_value());
}
- parameter_map->set(1, dictionary);
+ parameter_map->set(1, *dictionary);
} else {
- set_elements(dictionary);
+ object->set_elements(*dictionary);
}
-
- return GetHeap()->undefined_value();
}
-MaybeObject* JSObject::SetPropertyCallback(Name* name,
- Object* structure,
- PropertyAttributes attributes) {
+void JSObject::SetPropertyCallback(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> structure,
+ PropertyAttributes attributes) {
// Normalize object to make this operation simple.
- MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe_ok->IsFailure()) return maybe_ok;
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
// For the global object allocate a new map to invalidate the global inline
// caches which have a global property cell reference directly in the code.
- if (IsGlobalObject()) {
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ if (object->IsGlobalObject()) {
+ Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
ASSERT(new_map->is_dictionary_map());
+ object->set_map(*new_map);
- set_map(new_map);
// When running crankshaft, changing the map is not enough. We
// need to deoptimize all functions that rely on this global
// object.
- Deoptimizer::DeoptimizeGlobalObject(this);
+ Deoptimizer::DeoptimizeGlobalObject(*object);
}
// Update the dictionary with the new CALLBACKS property.
PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
- maybe_ok = SetNormalizedProperty(name, structure, details);
- if (maybe_ok->IsFailure()) return maybe_ok;
-
- return GetHeap()->undefined_value();
+ SetNormalizedProperty(object, name, structure, details);
}
@@ -6131,7 +6150,8 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ v8::AccessControl access_control) {
Isolate* isolate = object->GetIsolate();
// Check access rights if needed.
if (object->IsAccessCheckNeeded() &&
@@ -6144,14 +6164,18 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
Handle<Object> proto(object->GetPrototype(), isolate);
if (proto->IsNull()) return;
ASSERT(proto->IsJSGlobalObject());
- DefineAccessor(
- Handle<JSObject>::cast(proto), name, getter, setter, attributes);
+ DefineAccessor(Handle<JSObject>::cast(proto),
+ name,
+ getter,
+ setter,
+ attributes,
+ access_control);
return;
}
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
// Try to flatten before operating on the string.
if (name->IsString()) String::cast(*name)->TryFlatten();
@@ -6168,7 +6192,7 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
if (is_element) {
preexists = object->HasLocalElement(index);
if (preexists && object->GetLocalElementAccessorPair(index) == NULL) {
- old_value = Object::GetElement(object, index);
+ old_value = Object::GetElement(isolate, object, index);
}
} else {
LookupResult lookup(isolate);
@@ -6181,9 +6205,11 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
}
if (is_element) {
- DefineElementAccessor(object, index, getter, setter, attributes);
+ DefineElementAccessor(
+ object, index, getter, setter, attributes, access_control);
} else {
- DefinePropertyAccessor(object, name, getter, setter, attributes);
+ DefinePropertyAccessor(
+ object, name, getter, setter, attributes, access_control);
}
if (is_observed) {
@@ -6312,22 +6338,25 @@ bool JSObject::DefineFastAccessor(Handle<JSObject> object,
}
-MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
- Isolate* isolate = GetIsolate();
- Name* name = Name::cast(info->name());
+Handle<Object> JSObject::SetAccessor(Handle<JSObject> object,
+ Handle<AccessorInfo> info) {
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<Name> name(Name::cast(info->name()));
+
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->undefined_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return factory->undefined_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return object;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->DefineAccessor(info);
+ return SetAccessor(Handle<JSObject>::cast(proto), info);
}
// Make sure that the top context does not change when doing callbacks or
@@ -6335,18 +6364,18 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
AssertNoContextChange ncc;
// Try to flatten before operating on the string.
- if (name->IsString()) String::cast(name)->TryFlatten();
+ if (name->IsString()) FlattenString(Handle<String>::cast(name));
- if (!CanSetCallback(name)) return isolate->heap()->undefined_value();
+ if (!object->CanSetCallback(*name)) return factory->undefined_value();
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
if (is_element) {
- if (IsJSArray()) return isolate->heap()->undefined_value();
+ if (object->IsJSArray()) return factory->undefined_value();
// Accessors overwrite previous callbacks (cf. with getters/setters).
- switch (GetElementsKind()) {
+ switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
@@ -6365,7 +6394,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
case EXTERNAL_DOUBLE_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
- return isolate->heap()->undefined_value();
+ return factory->undefined_value();
case DICTIONARY_ELEMENTS:
break;
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -6373,25 +6402,21 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
break;
}
- MaybeObject* maybe_ok =
- SetElementCallback(index, info, info->property_attributes());
- if (maybe_ok->IsFailure()) return maybe_ok;
+ SetElementCallback(object, index, info, info->property_attributes());
} else {
// Lookup the name.
LookupResult result(isolate);
- LocalLookup(name, &result, true);
+ object->LocalLookup(*name, &result, true);
// ES5 forbids turning a property into an accessor if it's not
// configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) {
- return isolate->heap()->undefined_value();
+ return factory->undefined_value();
}
- MaybeObject* maybe_ok =
- SetPropertyCallback(name, info, info->property_attributes());
- if (maybe_ok->IsFailure()) return maybe_ok;
+ SetPropertyCallback(object, name, info, info->property_attributes());
}
- return this;
+ return object;
}
@@ -6400,7 +6425,7 @@ MaybeObject* JSObject::LookupAccessor(Name* name, AccessorComponent component) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
@@ -6500,6 +6525,15 @@ MaybeObject* Map::RawCopy(int instance_size) {
}
+Handle<Map> Map::CopyNormalized(Handle<Map> map,
+ PropertyNormalizationMode mode,
+ NormalizedMapSharingMode sharing) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->CopyNormalized(mode, sharing),
+ Map);
+}
+
+
MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing) {
int new_instance_size = instance_size();
@@ -6517,6 +6551,7 @@ MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
result->set_dictionary_map(true);
+ result->set_migration_target(false);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && result->is_shared()) {
@@ -6578,7 +6613,7 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors,
} else {
// Descriptor arrays grow by 50%.
MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
- old_size, old_size < 4 ? 1 : old_size / 2);
+ GetIsolate(), old_size, old_size < 4 ? 1 : old_size / 2);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(new_descriptors);
@@ -6643,7 +6678,7 @@ MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
set_transitions(transitions);
result->SetBackPointer(this);
- } else if (flag != OMIT_TRANSITION_KEEP_REPRESENTATIONS) {
+ } else {
descriptors->InitializeRepresentations(Representation::Tagged());
}
@@ -6828,7 +6863,8 @@ MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
}
DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(old_size, 1);
+ MaybeObject* maybe_descriptors =
+ DescriptorArray::Allocate(GetIsolate(), old_size, 1);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(new_descriptors);
@@ -6875,7 +6911,7 @@ MaybeObject* DescriptorArray::CopyUpToAddAttributes(
int size = enumeration_index;
DescriptorArray* descriptors;
- MaybeObject* maybe_descriptors = Allocate(size);
+ MaybeObject* maybe_descriptors = Allocate(GetIsolate(), size);
if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(descriptors);
@@ -6922,7 +6958,8 @@ MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors,
ASSERT_LT(insertion_index, new_size);
DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors = DescriptorArray::Allocate(new_size);
+ MaybeObject* maybe_descriptors =
+ DescriptorArray::Allocate(GetIsolate(), new_size);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(new_descriptors);
@@ -7712,8 +7749,10 @@ bool FixedArray::IsEqualTo(FixedArray* other) {
#endif
-MaybeObject* DescriptorArray::Allocate(int number_of_descriptors, int slack) {
- Heap* heap = Isolate::Current()->heap();
+MaybeObject* DescriptorArray::Allocate(Isolate* isolate,
+ int number_of_descriptors,
+ int slack) {
+ Heap* heap = isolate->heap();
// Do not use DescriptorArray::cast on incomplete object.
int size = number_of_descriptors + slack;
if (size == 0) return heap->empty_descriptor_array();
@@ -7771,6 +7810,8 @@ void DescriptorArray::CopyFrom(int dst_index,
MaybeObject* DescriptorArray::Merge(int verbatim,
int valid,
int new_size,
+ int modify_index,
+ StoreMode store_mode,
DescriptorArray* other) {
ASSERT(verbatim <= valid);
ASSERT(valid <= new_size);
@@ -7779,7 +7820,8 @@ MaybeObject* DescriptorArray::Merge(int verbatim,
// Allocate a new descriptor array large enough to hold the required
// descriptors, with minimally the exact same size as this descriptor array.
MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
- new_size, Max(new_size, other->number_of_descriptors()) - new_size);
+ GetIsolate(), new_size,
+ Max(new_size, other->number_of_descriptors()) - new_size);
if (!maybe_descriptors->To(&result)) return maybe_descriptors;
ASSERT(result->length() > length() ||
result->NumberOfSlackDescriptors() > 0 ||
@@ -7794,7 +7836,7 @@ MaybeObject* DescriptorArray::Merge(int verbatim,
int current_offset = 0;
for (descriptor = 0; descriptor < verbatim; descriptor++) {
if (GetDetails(descriptor).type() == FIELD) current_offset++;
- result->CopyFrom(descriptor, this, descriptor, witness);
+ result->CopyFrom(descriptor, other, descriptor, witness);
}
// |verbatim| -> |valid|
@@ -7804,6 +7846,7 @@ MaybeObject* DescriptorArray::Merge(int verbatim,
PropertyDetails other_details = other->GetDetails(descriptor);
if (details.type() == FIELD || other_details.type() == FIELD ||
+ (store_mode == FORCE_FIELD && descriptor == modify_index) ||
(details.type() == CONSTANT &&
other_details.type() == CONSTANT &&
GetValue(descriptor) != other->GetValue(descriptor))) {
@@ -7822,7 +7865,8 @@ MaybeObject* DescriptorArray::Merge(int verbatim,
// |valid| -> |new_size|
for (; descriptor < new_size; descriptor++) {
PropertyDetails details = other->GetDetails(descriptor);
- if (details.type() == FIELD) {
+ if (details.type() == FIELD ||
+ (store_mode == FORCE_FIELD && descriptor == modify_index)) {
Name* key = other->GetKey(descriptor);
FieldDescriptor d(key,
current_offset++,
@@ -7937,19 +7981,21 @@ Object* AccessorPair::GetComponent(AccessorComponent component) {
}
-MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count,
+MaybeObject* DeoptimizationInputData::Allocate(Isolate* isolate,
+ int deopt_entry_count,
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
- return HEAP->AllocateFixedArray(LengthFor(deopt_entry_count),
- pretenure);
+ return isolate->heap()->AllocateFixedArray(LengthFor(deopt_entry_count),
+ pretenure);
}
-MaybeObject* DeoptimizationOutputData::Allocate(int number_of_deopt_points,
+MaybeObject* DeoptimizationOutputData::Allocate(Isolate* isolate,
+ int number_of_deopt_points,
PretenureFlag pretenure) {
- if (number_of_deopt_points == 0) return HEAP->empty_fixed_array();
- return HEAP->AllocateFixedArray(LengthOfFixedArray(number_of_deopt_points),
- pretenure);
+ if (number_of_deopt_points == 0) return isolate->heap()->empty_fixed_array();
+ return isolate->heap()->AllocateFixedArray(
+ LengthOfFixedArray(number_of_deopt_points), pretenure);
}
@@ -7966,8 +8012,34 @@ bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
#endif
+static bool IsIdentifier(UnicodeCache* cache, Name* name) {
+ // Checks whether the buffer contains an identifier (no escape).
+ if (!name->IsString()) return false;
+ String* string = String::cast(name);
+ if (string->length() == 0) return false;
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(string, &op);
+ if (!cache->IsIdentifierStart(stream.GetNext())) {
+ return false;
+ }
+ while (stream.HasMore()) {
+ if (!cache->IsIdentifierPart(stream.GetNext())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool Name::IsCacheable(Isolate* isolate) {
+ return IsSymbol() ||
+ IsIdentifier(isolate->unicode_cache(), this) ||
+ this == isolate->heap()->hidden_string();
+}
+
+
bool String::LooksValid() {
- if (!Isolate::Current()->heap()->Contains(this)) return false;
+ if (!GetIsolate()->heap()->Contains(this)) return false;
return true;
}
@@ -8127,8 +8199,7 @@ const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
}
-void Relocatable::PostGarbageCollectionProcessing() {
- Isolate* isolate = Isolate::Current();
+void Relocatable::PostGarbageCollectionProcessing(Isolate* isolate) {
Relocatable* current = isolate->relocatable_top();
while (current != NULL) {
current->PostGarbageCollection();
@@ -8139,7 +8210,7 @@ void Relocatable::PostGarbageCollectionProcessing() {
// Reserve space for statics needing saving and restoring.
int Relocatable::ArchiveSpacePerThread() {
- return sizeof(Isolate::Current()->relocatable_top());
+ return sizeof(Relocatable*); // NOLINT
}
@@ -8165,8 +8236,7 @@ char* Relocatable::Iterate(ObjectVisitor* v, char* thread_storage) {
}
-void Relocatable::Iterate(ObjectVisitor* v) {
- Isolate* isolate = Isolate::Current();
+void Relocatable::Iterate(Isolate* isolate, ObjectVisitor* v) {
Iterate(v, isolate->relocatable_top());
}
@@ -8942,6 +9012,7 @@ AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object) {
// involves carefully checking the object immediately after the JSArray
// (if there is one) to see if it's an AllocationMemento.
if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) {
+ ASSERT(object->GetHeap()->InToSpace(object));
Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) +
object->Size();
if ((ptr_end + AllocationMemento::kSize) <=
@@ -8952,7 +9023,7 @@ AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object) {
if (*possible_allocation_memento_map ==
object->GetHeap()->allocation_memento_map()) {
AllocationMemento* memento = AllocationMemento::cast(
- reinterpret_cast<Object*>(ptr_end + 1));
+ reinterpret_cast<Object*>(ptr_end + kHeapObjectTag));
return memento;
}
}
@@ -9220,54 +9291,41 @@ void JSFunction::MarkForLazyRecompilation() {
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
+ ASSERT(!shared()->is_generator());
set_code_no_write_barrier(
GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile));
// No write barrier required, since the builtin is part of the root set.
}
-void JSFunction::MarkForParallelRecompilation() {
+void JSFunction::MarkForConcurrentRecompilation() {
ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- if (!FLAG_parallel_recompilation) {
- JSFunction::MarkForLazyRecompilation();
- return;
- }
- if (FLAG_trace_parallel_recompilation) {
+ ASSERT(!shared()->is_generator());
+ ASSERT(FLAG_concurrent_recompilation);
+ if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
PrintName();
- PrintF(" for parallel recompilation.\n");
+ PrintF(" for concurrent recompilation.\n");
}
set_code_no_write_barrier(
- GetIsolate()->builtins()->builtin(Builtins::kParallelRecompile));
- // No write barrier required, since the builtin is part of the root set.
-}
-
-
-void JSFunction::MarkForInstallingRecompiledCode() {
- // The debugger could have switched the builtin to lazy compile.
- // In that case, simply carry on. It will be dealt with later.
- ASSERT(!IsOptimized());
- ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- ASSERT(FLAG_parallel_recompilation);
- set_code_no_write_barrier(
- GetIsolate()->builtins()->builtin(Builtins::kInstallRecompiledCode));
+ GetIsolate()->builtins()->builtin(Builtins::kConcurrentRecompile));
// No write barrier required, since the builtin is part of the root set.
}
void JSFunction::MarkInRecompileQueue() {
- // We can only arrive here via the parallel-recompilation builtin. If
+ // We can only arrive here via the concurrent-recompilation builtin. If
// break points were set, the code would point to the lazy-compile builtin.
ASSERT(!GetIsolate()->DebuggerHasBreakPoints());
- ASSERT(IsMarkedForParallelRecompilation() && !IsOptimized());
+ ASSERT(IsMarkedForConcurrentRecompilation() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- ASSERT(FLAG_parallel_recompilation);
- if (FLAG_trace_parallel_recompilation) {
+ ASSERT(FLAG_concurrent_recompilation);
+ if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queueing ");
PrintName();
- PrintF(" for parallel recompilation.\n");
+ PrintF(" for concurrent recompilation.\n");
}
set_code_no_write_barrier(
GetIsolate()->builtins()->builtin(Builtins::kInRecompileQueue));
@@ -9281,7 +9339,7 @@ static bool CompileLazyHelper(CompilationInfo* info,
ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
ASSERT(!info->isolate()->has_pending_exception());
bool result = Compiler::CompileLazy(info);
- ASSERT(result != Isolate::Current()->has_pending_exception());
+ ASSERT(result != info->isolate()->has_pending_exception());
if (!result && flag == CLEAR_EXCEPTION) {
info->isolate()->clear_pending_exception();
}
@@ -9320,7 +9378,7 @@ MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
if (value->IsSmi()) {
// No optimized code map.
ASSERT_EQ(0, Smi::cast(value)->value());
- // Crate 3 entries per context {context, code, literals}.
+ // Create 3 entries per context {context, code, literals}.
MaybeObject* maybe = heap->AllocateFixedArray(kInitialLength);
if (!maybe->To(&new_code_map)) return maybe;
new_code_map->set(kEntriesStart + 0, native_context);
@@ -9450,11 +9508,25 @@ bool JSFunction::CompileLazy(Handle<JSFunction> function,
}
+Handle<Code> JSFunction::CompileOsr(Handle<JSFunction> function,
+ BailoutId osr_ast_id,
+ ClearExceptionFlag flag) {
+ CompilationInfoWithZone info(function);
+ info.SetOptimizing(osr_ast_id);
+ if (CompileLazyHelper(&info, flag)) {
+ // TODO(titzer): don't install the OSR code.
+ // ASSERT(function->code() != *info.code());
+ return info.code();
+ } else {
+ return Handle<Code>::null();
+ }
+}
+
+
bool JSFunction::CompileOptimized(Handle<JSFunction> function,
- BailoutId osr_ast_id,
ClearExceptionFlag flag) {
CompilationInfoWithZone info(function);
- info.SetOptimizing(osr_ast_id);
+ info.SetOptimizing(BailoutId::None());
return CompileLazyHelper(&info, flag);
}
@@ -9480,21 +9552,13 @@ bool JSFunction::IsInlineable() {
void JSObject::OptimizeAsPrototype(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(), object->OptimizeAsPrototype());
-}
-
-
-MaybeObject* JSObject::OptimizeAsPrototype() {
- if (IsGlobalObject()) return this;
+ if (object->IsGlobalObject()) return;
// Make sure prototypes are fast objects and their maps have the bit set
// so they remain fast.
- if (!HasFastProperties()) {
- MaybeObject* new_proto = TransformToFastProperties(0);
- if (new_proto->IsFailure()) return new_proto;
- ASSERT(new_proto == this);
+ if (!object->HasFastProperties()) {
+ TransformToFastProperties(object, 0);
}
- return this;
}
@@ -9646,40 +9710,38 @@ Context* JSFunction::NativeContextFromLiterals(FixedArray* literals) {
}
-bool JSFunction::PassesHydrogenFilter() {
+// The filter is a pattern that matches function names in this way:
+// "*" all; the default
+// "-" all but the top-level function
+// "-name" all but the function "name"
+// "" only the top-level function
+// "name" only the function "name"
+// "name*" only functions starting with "name"
+bool JSFunction::PassesFilter(const char* raw_filter) {
+ if (*raw_filter == '*') return true;
String* name = shared()->DebugName();
- // The filter string is a pattern that matches functions in this way:
- // "*" all; the default
- // "-" all but the top-level function
- // "-name" all but the function "name"
- // "" only the top-level function
- // "name" only the function "name"
- // "name*" only functions starting with "name"
- if (*FLAG_hydrogen_filter != '*') {
- Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
- if (filter.length() == 0) return name->length() == 0;
- if (filter[0] != '-' && name->IsUtf8EqualTo(filter)) return true;
- if (filter[0] == '-' &&
- !name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
- return true;
- }
- if (filter[filter.length() - 1] == '*' &&
- name->IsUtf8EqualTo(filter.SubVector(0, filter.length() - 1), true)) {
- return true;
- }
- return false;
+ Vector<const char> filter = CStrVector(raw_filter);
+ if (filter.length() == 0) return name->length() == 0;
+ if (filter[0] != '-' && name->IsUtf8EqualTo(filter)) return true;
+ if (filter[0] == '-' &&
+ !name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) {
+ return true;
}
-
- return true;
+ if (filter[filter.length() - 1] == '*' &&
+ name->IsUtf8EqualTo(filter.SubVector(0, filter.length() - 1), true)) {
+ return true;
+ }
+ return false;
}
-MaybeObject* Oddball::Initialize(const char* to_string,
+MaybeObject* Oddball::Initialize(Heap* heap,
+ const char* to_string,
Object* to_number,
byte kind) {
String* internalized_to_string;
{ MaybeObject* maybe_string =
- Isolate::Current()->heap()->InternalizeUtf8String(
+ heap->InternalizeUtf8String(
CStrVector(to_string));
if (!maybe_string->To(&internalized_to_string)) return maybe_string;
}
@@ -9804,7 +9866,7 @@ void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
}
-void SharedFunctionInfo::DisableOptimization(const char* reason) {
+void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
// Disable optimization for the shared function info and mark the
// code as non-optimizable. The marker on the shared function info
// is there because we flush non-optimized code thereby loosing the
@@ -9813,16 +9875,20 @@ void SharedFunctionInfo::DisableOptimization(const char* reason) {
// non-optimizable if optimization is disabled for the shared
// function info.
set_optimization_disabled(true);
+ set_bailout_reason(reason);
// Code should be the lazy compilation stub or else unoptimized. If the
// latter, disable optimization for the code too.
ASSERT(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
if (code()->kind() == Code::FUNCTION) {
code()->set_optimizable(false);
}
+ PROFILE(GetIsolate(),
+ LogExistingFunction(Handle<SharedFunctionInfo>(this),
+ Handle<Code>(code())));
if (FLAG_trace_opt) {
PrintF("[disabled optimization for ");
ShortPrint();
- PrintF(", reason: %s]\n", reason);
+ PrintF(", reason: %s]\n", GetBailoutReason(reason));
}
}
@@ -10316,7 +10382,7 @@ void Code::ClearInlineCaches() {
RelocInfo* info = it.rinfo();
Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
if (target->is_inline_cache_stub()) {
- IC::Clear(info->pc());
+ IC::Clear(this->GetIsolate(), info->pc());
}
}
}
@@ -10340,6 +10406,18 @@ void Code::ClearTypeFeedbackCells(Heap* heap) {
}
+BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
+ DisallowHeapAllocation no_gc;
+ ASSERT(kind() == FUNCTION);
+ for (FullCodeGenerator::BackEdgeTableIterator it(this, &no_gc);
+ !it.Done();
+ it.Next()) {
+ if (it.pc_offset() == pc_offset) return it.ast_id();
+ }
+ return BailoutId::None();
+}
+
+
bool Code::allowed_in_shared_map_code_cache() {
return is_keyed_load_stub() || is_keyed_store_stub() ||
(is_compare_ic_stub() &&
@@ -10400,7 +10478,7 @@ int Code::GetAge() {
void Code::GetCodeAgeAndParity(Code* code, Age* age,
MarkingParity* parity) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = code->GetIsolate();
Builtins* builtins = isolate->builtins();
Code* stub = NULL;
#define HANDLE_CODE_AGE(AGE) \
@@ -10635,7 +10713,14 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
- case Translation::ARGUMENTS_OBJECT: {
+ case Translation::DUPLICATED_OBJECT: {
+ int object_index = iterator.Next();
+ PrintF(out, "{object_index=%d}", object_index);
+ break;
+ }
+
+ case Translation::ARGUMENTS_OBJECT:
+ case Translation::CAPTURED_OBJECT: {
int args_length = iterator.Next();
PrintF(out, "{length=%d}", args_length);
break;
@@ -10793,18 +10878,18 @@ void Code::Disassemble(const char* name, FILE* out) {
// If there is no back edge table, the "table start" will be at or after
// (due to alignment) the end of the instruction stream.
if (static_cast<int>(offset) < instruction_size()) {
- Address back_edge_cursor = instruction_start() + offset;
- uint32_t table_length = Memory::uint32_at(back_edge_cursor);
- PrintF(out, "Back edges (size = %u)\n", table_length);
+ DisallowHeapAllocation no_gc;
+ FullCodeGenerator::BackEdgeTableIterator back_edges(this, &no_gc);
+
+ PrintF(out, "Back edges (size = %u)\n", back_edges.table_length());
PrintF(out, "ast_id pc_offset loop_depth\n");
- for (uint32_t i = 0; i < table_length; ++i) {
- uint32_t ast_id = Memory::uint32_at(back_edge_cursor);
- uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
- uint32_t loop_depth = Memory::uint32_at(back_edge_cursor +
- 2 * kIntSize);
- PrintF(out, "%6u %9u %10u\n", ast_id, pc_offset, loop_depth);
- back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
+
+ for ( ; !back_edges.Done(); back_edges.Next()) {
+ PrintF(out, "%6d %9u %10u\n", back_edges.ast_id().ToInt(),
+ back_edges.pc_offset(),
+ back_edges.loop_depth());
}
+
PrintF(out, "\n");
}
#ifdef OBJECT_PRINT
@@ -10970,7 +11055,7 @@ static bool GetOldValue(Isolate* isolate,
ASSERT(attributes != ABSENT);
if (attributes == DONT_DELETE) return false;
old_values->Add(object->GetLocalElementAccessorPair(index) == NULL
- ? Object::GetElement(object, index)
+ ? Object::GetElement(isolate, object, index)
: Handle<Object>::cast(isolate->factory()->the_hole_value()));
indices->Add(index);
return true;
@@ -10990,7 +11075,8 @@ static void EnqueueSpliceRecord(Handle<JSArray> object,
{ object, index_object, deleted, add_count_object };
bool threw;
- Execution::Call(Handle<JSFunction>(isolate->observers_enqueue_splice()),
+ Execution::Call(isolate,
+ Handle<JSFunction>(isolate->observers_enqueue_splice()),
isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
&threw);
ASSERT(!threw);
@@ -11003,7 +11089,8 @@ static void BeginPerformSplice(Handle<JSArray> object) {
Handle<Object> args[] = { object };
bool threw;
- Execution::Call(Handle<JSFunction>(isolate->observers_begin_perform_splice()),
+ Execution::Call(isolate,
+ Handle<JSFunction>(isolate->observers_begin_perform_splice()),
isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
&threw);
ASSERT(!threw);
@@ -11016,7 +11103,8 @@ static void EndPerformSplice(Handle<JSArray> object) {
Handle<Object> args[] = { object };
bool threw;
- Execution::Call(Handle<JSFunction>(isolate->observers_end_perform_splice()),
+ Execution::Call(isolate,
+ Handle<JSFunction>(isolate->observers_end_perform_splice()),
isolate->factory()->undefined_value(), ARRAY_SIZE(args), args,
&threw);
ASSERT(!threw);
@@ -11236,11 +11324,9 @@ Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
int start = starts.at(group);
int end = starts.at(group + 1);
int number_of_entries = starts.number_of_entries();
- if (start < end && entries->object_at(end - 1) == *object) {
- // Do not append the compilation info if it is already in the array.
- // It is sufficient to just check only the last element because
- // we process embedded maps of an optimized code in one batch.
- return entries;
+ // Check for existing entry to avoid duplicates.
+ for (int i = start; i < end; i++) {
+ if (entries->object_at(i) == *object) return entries;
}
if (entries->length() < kCodesStartIndex + number_of_entries + 1) {
Factory* factory = entries->GetIsolate()->factory();
@@ -11336,8 +11422,9 @@ void DependentCode::RemoveCompilationInfo(DependentCode::DependencyGroup group,
bool DependentCode::Contains(DependencyGroup group, Code* code) {
GroupStartIndexes starts(this);
- int number_of_entries = starts.number_of_entries();
- for (int i = 0; i < number_of_entries; i++) {
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ for (int i = start; i < end; i++) {
if (object_at(i) == code) return true;
}
return false;
@@ -11347,6 +11434,7 @@ bool DependentCode::Contains(DependencyGroup group, Code* code) {
void DependentCode::DeoptimizeDependentCodeGroup(
Isolate* isolate,
DependentCode::DependencyGroup group) {
+ ASSERT(AllowCodeDependencyChange::IsAllowed());
DisallowHeapAllocation no_allocation_scope;
DependentCode::GroupStartIndexes starts(this);
int start = starts.at(group);
@@ -11354,13 +11442,15 @@ void DependentCode::DeoptimizeDependentCodeGroup(
int code_entries = starts.number_of_entries();
if (start == end) return;
- // Collect all the code to deoptimize.
- Zone zone(isolate);
- ZoneList<Code*> codes(end - start, &zone);
+ // Mark all the code that needs to be deoptimized.
+ bool marked = false;
for (int i = start; i < end; i++) {
if (is_code_at(i)) {
Code* code = code_at(i);
- if (!code->marked_for_deoptimization()) codes.Add(code, &zone);
+ if (!code->marked_for_deoptimization()) {
+ code->set_marked_for_deoptimization(true);
+ marked = true;
+ }
} else {
CompilationInfo* info = compilation_info_at(i);
info->AbortDueToDependencyChange();
@@ -11376,7 +11466,8 @@ void DependentCode::DeoptimizeDependentCodeGroup(
clear_at(i);
}
set_number_of_entries(group, 0);
- Deoptimizer::DeoptimizeCodeList(isolate, &codes);
+
+ if (marked) Deoptimizer::DeoptimizeMarkedCode(isolate);
}
@@ -11531,16 +11622,18 @@ MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
+
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSObject> this_handle(this);
Handle<Object> value_handle(value, isolate);
if (!interceptor->setter()->IsUndefined()) {
- v8::IndexedPropertySetter setter =
- v8::ToCData<v8::IndexedPropertySetter>(interceptor->setter());
+ v8::IndexedPropertySetterCallback setter =
+ v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", this, index));
PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
@@ -11573,7 +11666,8 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
Handle<ExecutableAccessorInfo> data(
ExecutableAccessorInfo::cast(structure));
Object* fun_obj = data->getter();
- v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+ v8::AccessorGetterCallback call_fun =
+ v8::ToCData<v8::AccessorGetterCallback>(fun_obj);
if (call_fun == NULL) return isolate->heap()->undefined_value();
HandleScope scope(isolate);
Handle<JSObject> self(JSObject::cast(receiver));
@@ -11638,7 +11732,8 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
Handle<ExecutableAccessorInfo> data(
ExecutableAccessorInfo::cast(structure));
Object* call_obj = data->setter();
- v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
+ v8::AccessorSetterCallback call_fun =
+ v8::ToCData<v8::AccessorSetterCallback>(call_obj);
if (call_fun == NULL) return value;
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> key(isolate->factory()->NumberToString(number));
@@ -12092,18 +12187,17 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
}
-MaybeObject* JSReceiver::SetElement(uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_proto) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->SetElementWithHandler(
- this, index, value, strict_mode);
- } else {
- return JSObject::cast(this)->SetElement(
- index, value, attributes, strict_mode, check_proto);
+Handle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
+ if (object->IsJSProxy()) {
+ return JSProxy::SetElementWithHandler(
+ Handle<JSProxy>::cast(object), object, index, value, strict_mode);
}
+ return JSObject::SetElement(
+ Handle<JSObject>::cast(object), index, value, attributes, strict_mode);
}
@@ -12128,7 +12222,8 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
if (object->HasExternalArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
- Handle<Object> number = Execution::ToNumber(value, &has_exception);
+ Handle<Object> number =
+ Execution::ToNumber(object->GetIsolate(), value, &has_exception);
if (has_exception) return Handle<Object>();
value = number;
}
@@ -12205,7 +12300,7 @@ MaybeObject* JSObject::SetElement(uint32_t index,
if (old_attributes != ABSENT) {
if (self->GetLocalElementAccessorPair(index) == NULL)
- old_value = Object::GetElement(self, index);
+ old_value = Object::GetElement(isolate, self, index);
} else if (self->IsJSArray()) {
// Store old array length in case adding an element grows the array.
old_length_handle = handle(Handle<JSArray>::cast(self)->length(), isolate);
@@ -12247,7 +12342,7 @@ MaybeObject* JSObject::SetElement(uint32_t index,
} else if (old_value->IsTheHole()) {
EnqueueChangeRecord(self, "reconfigured", name, old_value);
} else {
- Handle<Object> new_value = Object::GetElement(self, index);
+ Handle<Object> new_value = Object::GetElement(isolate, self, index);
bool value_changed = !old_value->SameValue(*new_value);
if (old_attributes != new_attributes) {
if (!value_changed) old_value = isolate->factory()->the_hole_value();
@@ -12539,16 +12634,18 @@ MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
uint32_t index) {
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
+
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
Handle<Object> this_handle(receiver, isolate);
Handle<JSObject> holder_handle(this, isolate);
if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetter getter =
- v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
+ v8::IndexedPropertyGetterCallback getter =
+ v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-get", this, index));
PropertyCallbackArguments
@@ -12573,7 +12670,7 @@ MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
Object* pt = holder_handle->GetPrototype();
if (pt == heap->null_value()) return heap->undefined_value();
- return pt->GetElementWithReceiver(*this_handle, index);
+ return pt->GetElementWithReceiver(isolate, *this_handle, index);
}
@@ -12852,8 +12949,8 @@ MaybeObject* JSObject::GetPropertyWithInterceptor(
Handle<String> name_handle(String::cast(name));
if (!interceptor->getter()->IsUndefined()) {
- v8::NamedPropertyGetter getter =
- v8::ToCData<v8::NamedPropertyGetter>(interceptor->getter());
+ v8::NamedPropertyGetterCallback getter =
+ v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name));
PropertyCallbackArguments
@@ -13689,6 +13786,74 @@ MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
template<typename Shape, typename Key>
+uint32_t HashTable<Shape, Key>::EntryForProbe(Key key,
+ Object* k,
+ int probe,
+ uint32_t expected) {
+ uint32_t hash = HashTable<Shape, Key>::HashForObject(key, k);
+ uint32_t capacity = Capacity();
+ uint32_t entry = FirstProbe(hash, capacity);
+ for (int i = 1; i < probe; i++) {
+ if (entry == expected) return expected;
+ entry = NextProbe(entry, i, capacity);
+ }
+ return entry;
+}
+
+
+template<typename Shape, typename Key>
+void HashTable<Shape, Key>::Swap(uint32_t entry1,
+ uint32_t entry2,
+ WriteBarrierMode mode) {
+ int index1 = EntryToIndex(entry1);
+ int index2 = EntryToIndex(entry2);
+ Object* temp[Shape::kEntrySize];
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ temp[j] = get(index1 + j);
+ }
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ set(index1 + j, get(index2 + j), mode);
+ }
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ set(index2 + j, temp[j], mode);
+ }
+}
+
+
+template<typename Shape, typename Key>
+void HashTable<Shape, Key>::Rehash(Key key) {
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
+ uint32_t capacity = Capacity();
+ bool done = false;
+ for (int probe = 1; !done; probe++) {
+ // All elements at entries given by one of the first _probe_ probes
+ // are placed correctly. Other elements might need to be moved.
+ done = true;
+ for (uint32_t current = 0; current < capacity; current++) {
+ Object* current_key = get(EntryToIndex(current));
+ if (IsKey(current_key)) {
+ uint32_t target = EntryForProbe(key, current_key, probe, current);
+ if (current == target) continue;
+ Object* target_key = get(EntryToIndex(target));
+ if (!IsKey(target_key) ||
+ EntryForProbe(key, target_key, probe, target) != target) {
+ // Put the current element into the correct position.
+ Swap(current, target, mode);
+ // The other element will be processed on the next iteration.
+ current--;
+ } else {
+ // The place for the current element is occupied. Leave the element
+ // for the next probe.
+ done = false;
+ }
+ }
+ }
+ }
+}
+
+
+template<typename Shape, typename Key>
MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
@@ -14352,8 +14517,8 @@ static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
}
-Handle<PropertyCell> GlobalObject::EnsurePropertyCell(
- Handle<GlobalObject> global,
+Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
+ Handle<JSGlobalObject> global,
Handle<Name> name) {
ASSERT(!global->HasFastProperties());
int entry = global->property_dictionary()->FindEntry(*name);
@@ -15218,7 +15383,7 @@ MaybeObject* NameDictionary::TransformPropertiesToFastFor(
// Allocate the instance descriptor.
DescriptorArray* descriptors;
MaybeObject* maybe_descriptors =
- DescriptorArray::Allocate(instance_descriptor_length);
+ DescriptorArray::Allocate(GetIsolate(), instance_descriptor_length);
if (!maybe_descriptors->To(&descriptors)) {
return maybe_descriptors;
}
@@ -15325,6 +15490,7 @@ MaybeObject* ObjectHashSet::Add(Object* key) {
int hash;
{ MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
if (maybe_hash->IsFailure()) return maybe_hash;
+ ASSERT(key->GetHash(OMIT_CREATION) == maybe_hash);
hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
}
int entry = FindEntry(key);
@@ -15386,6 +15552,7 @@ MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
int hash;
{ MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
if (maybe_hash->IsFailure()) return maybe_hash;
+ ASSERT(key->GetHash(OMIT_CREATION) == maybe_hash);
hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
}
int entry = FindEntry(key);
@@ -15509,7 +15676,7 @@ void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
int code_position,
Handle<Object> break_point_object) {
Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
- Isolate::Current());
+ debug_info->GetIsolate());
if (break_point_info->IsUndefined()) return;
BreakPointInfo::ClearBreakPoint(
Handle<BreakPointInfo>::cast(break_point_info),
@@ -15522,7 +15689,7 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
int source_position,
int statement_position,
Handle<Object> break_point_object) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = debug_info->GetIsolate();
Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
isolate);
if (!break_point_info->IsUndefined()) {
@@ -15636,7 +15803,7 @@ int DebugInfo::GetBreakPointInfoIndex(int code_position) {
// Remove the specified break point object.
void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
Handle<Object> break_point_object) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = break_point_info->GetIsolate();
// If there are no break points just ignore.
if (break_point_info->break_point_objects()->IsUndefined()) return;
// If there is a single break point clear it if it is the same.
@@ -15964,4 +16131,15 @@ void PropertyCell::AddDependentCode(Handle<Code> code) {
}
+const char* GetBailoutReason(BailoutReason reason) {
+ ASSERT(reason < kLastErrorMessage);
+#define ERROR_MESSAGES_TEXTS(C, T) T,
+ static const char* error_messages_[] = {
+ ERROR_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)
+ };
+#undef ERROR_MESSAGES_TEXTS
+ return error_messages_[reason];
+}
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/objects.h b/chromium/v8/src/objects.h
index d370c32a1a4..12087eb00a6 100644
--- a/chromium/v8/src/objects.h
+++ b/chromium/v8/src/objects.h
@@ -232,6 +232,13 @@ static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
+// Indicates whether a value can be loaded as a constant.
+enum StoreMode {
+ ALLOW_AS_CONSTANT,
+ FORCE_FIELD
+};
+
+
// PropertyNormalizationMode is used to specify whether to keep
// inobject properties when normalizing properties of a JSObject.
enum PropertyNormalizationMode {
@@ -258,7 +265,6 @@ enum CreationFlag {
// Indicates whether transitions can be added to a source map or not.
enum TransitionFlag {
INSERT_TRANSITION,
- OMIT_TRANSITION_KEEP_REPRESENTATIONS,
OMIT_TRANSITION
};
@@ -333,6 +339,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(CONS_STRING_TYPE) \
V(CONS_ASCII_STRING_TYPE) \
V(SLICED_STRING_TYPE) \
+ V(SLICED_ASCII_STRING_TYPE) \
V(EXTERNAL_STRING_TYPE) \
V(EXTERNAL_ASCII_STRING_TYPE) \
V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
@@ -416,6 +423,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(JS_TYPED_ARRAY_TYPE) \
V(JS_DATA_VIEW_TYPE) \
V(JS_PROXY_TYPE) \
+ V(JS_SET_TYPE) \
+ V(JS_MAP_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_WEAK_SET_TYPE) \
V(JS_REGEXP_TYPE) \
@@ -783,7 +792,6 @@ enum InstanceType {
// Pseudo-types
FIRST_TYPE = 0x0,
LAST_TYPE = JS_FUNCTION_TYPE,
- INVALID_TYPE = FIRST_TYPE - 1,
FIRST_NAME_TYPE = FIRST_TYPE,
LAST_NAME_TYPE = SYMBOL_TYPE,
FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE,
@@ -1046,7 +1054,300 @@ class MaybeObject BASE_EMBEDDED {
V(AccessCheckNeeded) \
V(Cell) \
V(PropertyCell) \
- V(ObjectHashTable) \
+ V(ObjectHashTable)
+
+
+#define ERROR_MESSAGES_LIST(V) \
+ V(kNoReason, "no reason") \
+ \
+ V(k32BitValueInRegisterIsNotZeroExtended, \
+ "32 bit value in register is not zero-extended") \
+ V(kAlignmentMarkerExpected, "alignment marker expected") \
+ V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
+ V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
+ V(kArgumentsObjectValueInATestContext, \
+ "arguments object value in a test context") \
+ V(kArrayBoilerplateCreationFailed, "array boilerplate creation failed") \
+ V(kArrayIndexConstantValueTooBig, "array index constant value too big") \
+ V(kAssignmentToArguments, "assignment to arguments") \
+ V(kAssignmentToLetVariableBeforeInitialization, \
+ "assignment to let variable before initialization") \
+ V(kAssignmentToLOOKUPVariable, "assignment to LOOKUP variable") \
+ V(kAssignmentToParameterFunctionUsesArgumentsObject, \
+ "assignment to parameter, function uses arguments object") \
+ V(kAssignmentToParameterInArgumentsObject, \
+ "assignment to parameter in arguments object") \
+ V(kAttemptToUseUndefinedCache, "Attempt to use undefined cache") \
+ V(kBadValueContextForArgumentsObjectValue, \
+ "bad value context for arguments object value") \
+ V(kBadValueContextForArgumentsValue, \
+ "bad value context for arguments value") \
+ V(kBailedOutDueToDependencyChange, "bailed out due to dependency change") \
+ V(kBailoutWasNotPrepared, "bailout was not prepared") \
+ V(kBinaryStubGenerateFloatingPointCode, \
+ "BinaryStub_GenerateFloatingPointCode") \
+ V(kBothRegistersWereSmisInSelectNonSmi, \
+ "Both registers were smis in SelectNonSmi") \
+ V(kCallToAJavaScriptRuntimeFunction, \
+ "call to a JavaScript runtime function") \
+ V(kCannotTranslatePositionInChangedArea, \
+ "Cannot translate position in changed area") \
+ V(kCodeGenerationFailed, "code generation failed") \
+ V(kCodeObjectNotProperlyPatched, "code object not properly patched") \
+ V(kCompoundAssignmentToLookupSlot, "compound assignment to lookup slot") \
+ V(kContextAllocatedArguments, "context-allocated arguments") \
+ V(kDebuggerIsActive, "debugger is active") \
+ V(kDebuggerStatement, "DebuggerStatement") \
+ V(kDeclarationInCatchContext, "Declaration in catch context") \
+ V(kDeclarationInWithContext, "Declaration in with context") \
+ V(kDefaultNaNModeNotSet, "Default NaN mode not set") \
+ V(kDeleteWithGlobalVariable, "delete with global variable") \
+ V(kDeleteWithNonGlobalVariable, "delete with non-global variable") \
+ V(kDestinationOfCopyNotAligned, "Destination of copy not aligned") \
+ V(kDontDeleteCellsCannotContainTheHole, \
+ "DontDelete cells can't contain the hole") \
+ V(kDoPushArgumentNotImplementedForDoubleType, \
+ "DoPushArgument not implemented for double type") \
+ V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
+ "EmitLoadRegister: Unsupported double immediate") \
+ V(kEval, "eval") \
+ V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
+ V(kExpectedAlignmentMarker, "expected alignment marker") \
+ V(kExpectedAllocationSiteInCell, \
+ "Expected AllocationSite in property cell") \
+ V(kExpectedPropertyCellInRegisterA2, \
+ "Expected property cell in register a2") \
+ V(kExpectedPropertyCellInRegisterEbx, \
+ "Expected property cell in register ebx") \
+ V(kExpectedPropertyCellInRegisterRbx, \
+ "Expected property cell in register rbx") \
+ V(kExpectingAlignmentForCopyBytes, \
+ "Expecting alignment for CopyBytes") \
+ V(kExportDeclaration, "Export declaration") \
+ V(kExternalStringExpectedButNotFound, \
+ "external string expected, but not found") \
+ V(kFailedBailedOutLastTime, "failed/bailed out last time") \
+ V(kForInStatementIsNotFastCase, "ForInStatement is not fast case") \
+ V(kForInStatementOptimizationIsDisabled, \
+ "ForInStatement optimization is disabled") \
+ V(kForInStatementWithNonLocalEachVariable, \
+ "ForInStatement with non-local each variable") \
+ V(kForOfStatement, "ForOfStatement") \
+ V(kFrameIsExpectedToBeAligned, "frame is expected to be aligned") \
+ V(kFunctionCallsEval, "function calls eval") \
+ V(kFunctionIsAGenerator, "function is a generator") \
+ V(kFunctionWithIllegalRedeclaration, "function with illegal redeclaration") \
+ V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
+ V(kGeneratorFailedToResume, "Generator failed to resume") \
+ V(kGenerator, "generator") \
+ V(kGlobalFunctionsMustHaveInitialMap, \
+ "Global functions must have initial map") \
+ V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
+ V(kImportDeclaration, "Import declaration") \
+ V(kImproperObjectOnPrototypeChainForStore, \
+ "improper object on prototype chain for store") \
+ V(kIndexIsNegative, "Index is negative") \
+ V(kIndexIsTooLarge, "Index is too large") \
+ V(kInlinedRuntimeFunctionClassOf, "inlined runtime function: ClassOf") \
+ V(kInlinedRuntimeFunctionFastAsciiArrayJoin, \
+ "inlined runtime function: FastAsciiArrayJoin") \
+ V(kInlinedRuntimeFunctionGeneratorNext, \
+ "inlined runtime function: GeneratorNext") \
+ V(kInlinedRuntimeFunctionGeneratorThrow, \
+ "inlined runtime function: GeneratorThrow") \
+ V(kInlinedRuntimeFunctionGetFromCache, \
+ "inlined runtime function: GetFromCache") \
+ V(kInlinedRuntimeFunctionIsNonNegativeSmi, \
+ "inlined runtime function: IsNonNegativeSmi") \
+ V(kInlinedRuntimeFunctionIsRegExpEquivalent, \
+ "inlined runtime function: IsRegExpEquivalent") \
+ V(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf, \
+ "inlined runtime function: IsStringWrapperSafeForDefaultValueOf") \
+ V(kInliningBailedOut, "inlining bailed out") \
+ V(kInputGPRIsExpectedToHaveUpper32Cleared, \
+ "input GPR is expected to have upper32 cleared") \
+ V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
+ "InstanceofStub unexpected call site cache (check)") \
+ V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
+ "InstanceofStub unexpected call site cache (cmp 1)") \
+ V(kInstanceofStubUnexpectedCallSiteCacheCmp2, \
+ "InstanceofStub unexpected call site cache (cmp 2)") \
+ V(kInstanceofStubUnexpectedCallSiteCacheMov, \
+ "InstanceofStub unexpected call site cache (mov)") \
+ V(kInteger32ToSmiFieldWritingToNonSmiLocation, \
+ "Integer32ToSmiField writing to non-smi location") \
+ V(kInvalidCaptureReferenced, "Invalid capture referenced") \
+ V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
+ "Invalid ElementsKind for InternalArray or InternalPackedArray") \
+ V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
+ V(kInvalidLeftHandSideInAssignment, "invalid left-hand side in assignment") \
+ V(kInvalidLhsInCompoundAssignment, "invalid lhs in compound assignment") \
+ V(kInvalidLhsInCountOperation, "invalid lhs in count operation") \
+ V(kInvalidMinLength, "Invalid min_length") \
+ V(kJSGlobalObjectNativeContextShouldBeANativeContext, \
+ "JSGlobalObject::native_context should be a native context") \
+ V(kJSGlobalProxyContextShouldNotBeNull, \
+ "JSGlobalProxy::context() should not be null") \
+ V(kJSObjectWithFastElementsMapHasSlowElements, \
+ "JSObject with fast elements map has slow elements") \
+ V(kLetBindingReInitialization, "Let binding re-initialization") \
+ V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
+ V(kLiveEditFrameDroppingIsNotSupportedOnArm, \
+ "LiveEdit frame dropping is not supported on arm") \
+ V(kLiveEditFrameDroppingIsNotSupportedOnMips, \
+ "LiveEdit frame dropping is not supported on mips") \
+ V(kLiveEdit, "LiveEdit") \
+ V(kLookupVariableInCountOperation, \
+ "lookup variable in count operation") \
+ V(kMapIsNoLongerInEax, "Map is no longer in eax") \
+ V(kModuleDeclaration, "Module declaration") \
+ V(kModuleLiteral, "Module literal") \
+ V(kModulePath, "Module path") \
+ V(kModuleStatement, "Module statement") \
+ V(kModuleVariable, "Module variable") \
+ V(kModuleUrl, "Module url") \
+ V(kNoCasesLeft, "no cases left") \
+ V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin, \
+ "No empty arrays here in EmitFastAsciiArrayJoin") \
+ V(kNonInitializerAssignmentToConst, \
+ "non-initializer assignment to const") \
+ V(kNonSmiIndex, "Non-smi index") \
+ V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal") \
+ V(kNonSmiValue, "Non-smi value") \
+ V(kNotEnoughVirtualRegistersForValues, \
+ "not enough virtual registers for values") \
+ V(kNotEnoughSpillSlotsForOsr, \
+ "not enough spill slots for OSR") \
+ V(kNotEnoughVirtualRegistersRegalloc, \
+ "not enough virtual registers (regalloc)") \
+ V(kObjectFoundInSmiOnlyArray, "object found in smi-only array") \
+ V(kObjectLiteralWithComplexProperty, \
+ "Object literal with complex property") \
+ V(kOddballInStringTableIsNotUndefinedOrTheHole, \
+ "oddball in string table is not undefined or the hole") \
+ V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
+ V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
+ V(kOperandIsASmi, "Operand is a smi") \
+ V(kOperandIsNotAName, "Operand is not a name") \
+ V(kOperandIsNotANumber, "Operand is not a number") \
+ V(kOperandIsNotASmi, "Operand is not a smi") \
+ V(kOperandIsNotAString, "Operand is not a string") \
+ V(kOperandIsNotSmi, "Operand is not smi") \
+ V(kOperandNotANumber, "Operand not a number") \
+ V(kOptimizedTooManyTimes, "optimized too many times") \
+ V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
+ "Out of virtual registers while trying to allocate temp register") \
+ V(kParseScopeError, "parse/scope error") \
+ V(kPossibleDirectCallToEval, "possible direct call to eval") \
+ V(kPropertyAllocationCountFailed, "Property allocation count failed") \
+ V(kReceivedInvalidReturnAddress, "Received invalid return address") \
+ V(kReferenceToAVariableWhichRequiresDynamicLookup, \
+ "reference to a variable which requires dynamic lookup") \
+ V(kReferenceToGlobalLexicalVariable, \
+ "reference to global lexical variable") \
+ V(kReferenceToUninitializedVariable, "reference to uninitialized variable") \
+ V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
+ V(kRegisterWasClobbered, "register was clobbered") \
+ V(kScopedBlock, "ScopedBlock") \
+ V(kSharedFunctionInfoLiteral, "Shared function info literal") \
+ V(kSmiAdditionOverflow, "Smi addition overflow") \
+ V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
+ V(kStackFrameTypesMustMatch, "stack frame types must match") \
+ V(kSwitchStatementMixedOrNonLiteralSwitchLabels, \
+ "SwitchStatement: mixed or non-literal switch labels") \
+ V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses") \
+ V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
+ V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
+ V(kTheInstructionToPatchShouldBeALoadFromPc, \
+ "The instruction to patch should be a load from pc") \
+ V(kTheInstructionToPatchShouldBeALui, \
+ "The instruction to patch should be a lui") \
+ V(kTheInstructionToPatchShouldBeAnOri, \
+ "The instruction to patch should be an ori") \
+ V(kTooManyParametersLocals, "too many parameters/locals") \
+ V(kTooManyParameters, "too many parameters") \
+ V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR") \
+ V(kToOperandIsDoubleRegisterUnimplemented, \
+ "ToOperand IsDoubleRegister unimplemented") \
+ V(kToOperandUnsupportedDoubleImmediate, \
+ "ToOperand Unsupported double immediate") \
+ V(kTryCatchStatement, "TryCatchStatement") \
+ V(kTryFinallyStatement, "TryFinallyStatement") \
+ V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
+ V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
+ V(kUndefinedValueNotLoaded, "Undefined value not loaded") \
+ V(kUndoAllocationOfNonAllocatedMemory, \
+ "Undo allocation of non allocated memory") \
+ V(kUnexpectedAllocationTop, "Unexpected allocation top") \
+ V(kUnexpectedElementsKindInArrayConstructor, \
+ "Unexpected ElementsKind in array constructor") \
+ V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
+ "Unexpected fallthrough from CharCodeAt slow case") \
+ V(kUnexpectedFallthroughFromCharFromCodeSlowCase, \
+ "Unexpected fallthrough from CharFromCode slow case") \
+ V(kUnexpectedFallThroughFromStringComparison, \
+ "Unexpected fall-through from string comparison") \
+ V(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode, \
+ "Unexpected fall-through in BinaryStub_GenerateFloatingPointCode") \
+ V(kUnexpectedFallthroughToCharCodeAtSlowCase, \
+ "Unexpected fallthrough to CharCodeAt slow case") \
+ V(kUnexpectedFallthroughToCharFromCodeSlowCase, \
+ "Unexpected fallthrough to CharFromCode slow case") \
+ V(kUnexpectedFPUStackDepthAfterInstruction, \
+ "Unexpected FPU stack depth after instruction") \
+ V(kUnexpectedInitialMapForArrayFunction1, \
+ "Unexpected initial map for Array function (1)") \
+ V(kUnexpectedInitialMapForArrayFunction2, \
+ "Unexpected initial map for Array function (2)") \
+ V(kUnexpectedInitialMapForArrayFunction, \
+ "Unexpected initial map for Array function") \
+ V(kUnexpectedInitialMapForInternalArrayFunction, \
+ "Unexpected initial map for InternalArray function") \
+ V(kUnexpectedLevelAfterReturnFromApiCall, \
+ "Unexpected level after return from api call") \
+ V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
+ "Unexpected number of pre-allocated property fields") \
+ V(kUnexpectedStringFunction, "Unexpected String function") \
+ V(kUnexpectedStringType, "Unexpected string type") \
+ V(kUnexpectedStringWrapperInstanceSize, \
+ "Unexpected string wrapper instance size") \
+ V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
+ "Unexpected type for RegExp data, FixedArray expected") \
+ V(kUnexpectedUnusedPropertiesOfStringWrapper, \
+ "Unexpected unused properties of string wrapper") \
+ V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
+ V(kUnknown, "unknown") \
+ V(kUnsupportedConstCompoundAssignment, \
+ "unsupported const compound assignment") \
+ V(kUnsupportedCountOperationWithConst, \
+ "unsupported count operation with const") \
+ V(kUnsupportedDoubleImmediate, "unsupported double immediate") \
+ V(kUnsupportedLetCompoundAssignment, "unsupported let compound assignment") \
+ V(kUnsupportedLookupSlotInDeclaration, \
+ "unsupported lookup slot in declaration") \
+ V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
+ V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
+ V(kUnsupportedPhiUseOfConstVariable, \
+ "Unsupported phi use of const variable") \
+ V(kUnsupportedTaggedImmediate, "unsupported tagged immediate") \
+ V(kVariableResolvedToWithContext, "Variable resolved to with context") \
+ V(kWeShouldNotHaveAnEmptyLexicalContext, \
+ "we should not have an empty lexical context") \
+ V(kWithStatement, "WithStatement") \
+ V(kWrongAddressOrValuePassedToRecordWrite, \
+ "Wrong address or value passed to RecordWrite") \
+ V(kYield, "Yield")
+
+
+#define ERROR_MESSAGES_CONSTANTS(C, T) C,
+enum BailoutReason {
+ ERROR_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS)
+ kLastErrorMessage
+};
+#undef ERROR_MESSAGES_CONSTANTS
+
+
+const char* GetBailoutReason(BailoutReason reason);
// Object is the abstract superclass for all classes in the
@@ -1148,8 +1449,8 @@ class Object : public MaybeObject {
inline bool HasSpecificClassOf(String* name);
- MUST_USE_RESULT MaybeObject* ToObject(); // ECMA-262 9.9.
- bool BooleanValue(); // ECMA-262 9.2.
+ MUST_USE_RESULT MaybeObject* ToObject(Isolate* isolate); // ECMA-262 9.9.
+ bool BooleanValue(); // ECMA-262 9.2.
// Convert to a JSObject if needed.
// native_context is used when creating wrapper object.
@@ -1171,7 +1472,8 @@ class Object : public MaybeObject {
Name* key,
PropertyAttributes* attributes);
- static Handle<Object> GetProperty(Handle<Object> object, Handle<Name> key);
+ static Handle<Object> GetProperty(Handle<Object> object,
+ Handle<Name> key);
static Handle<Object> GetProperty(Handle<Object> object,
Handle<Object> receiver,
LookupResult* result,
@@ -1193,11 +1495,15 @@ class Object : public MaybeObject {
MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
JSReceiver* getter);
- static Handle<Object> GetElement(Handle<Object> object, uint32_t index);
- MUST_USE_RESULT inline MaybeObject* GetElement(uint32_t index);
+ static Handle<Object> GetElement(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t index);
+ MUST_USE_RESULT inline MaybeObject* GetElement(Isolate* isolate,
+ uint32_t index);
// For use when we know that no exception can be thrown.
- inline Object* GetElementNoExceptionThrown(uint32_t index);
- MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Object* receiver,
+ inline Object* GetElementNoExceptionThrown(Isolate* isolate, uint32_t index);
+ MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Isolate* isolate,
+ Object* receiver,
uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
@@ -1229,10 +1535,7 @@ class Object : public MaybeObject {
inline void VerifyApiCallResultType();
// Prints this object without details.
- inline void ShortPrint() {
- ShortPrint(stdout);
- }
- void ShortPrint(FILE* out);
+ void ShortPrint(FILE* out = stdout);
// Prints this object without details to a message accumulator.
void ShortPrint(StringStream* accumulator);
@@ -1271,10 +1574,7 @@ class Smi: public Object {
static inline Smi* cast(Object* object);
// Dispatched behavior.
- inline void SmiPrint() {
- SmiPrint(stdout);
- }
- void SmiPrint(FILE* out);
+ void SmiPrint(FILE* out = stdout);
void SmiPrint(StringStream* accumulator);
DECLARE_VERIFIER(Smi)
@@ -1345,10 +1645,7 @@ class Failure: public MaybeObject {
static inline Failure* cast(MaybeObject* object);
// Dispatched behavior.
- inline void FailurePrint() {
- FailurePrint(stdout);
- }
- void FailurePrint(FILE* out);
+ void FailurePrint(FILE* out = stdout);
void FailurePrint(StringStream* accumulator);
DECLARE_VERIFIER(Failure)
@@ -1429,9 +1726,7 @@ class HeapObject: public Object {
// The Heap the object was allocated in. Used also to access Isolate.
inline Heap* GetHeap();
- // Convenience method to get current isolate. This method can be
- // accessed only when its result is the same as
- // Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
+ // Convenience method to get current isolate.
inline Isolate* GetIsolate();
// Converts an address to a HeapObject pointer.
@@ -1463,6 +1758,13 @@ class HeapObject: public Object {
// during marking GC.
static inline Object** RawField(HeapObject* obj, int offset);
+ // Adds the |code| object related to |name| to the code cache of this map. If
+ // this map is a dictionary map that is shared, the map copied and installed
+ // onto the object.
+ static void UpdateMapCodeCache(Handle<HeapObject> object,
+ Handle<Name> name,
+ Handle<Code> code);
+
// Casting.
static inline HeapObject* cast(Object* obj);
@@ -1477,12 +1779,9 @@ class HeapObject: public Object {
// Dispatched behavior.
void HeapObjectShortPrint(StringStream* accumulator);
#ifdef OBJECT_PRINT
- inline void HeapObjectPrint() {
- HeapObjectPrint(stdout);
- }
- void HeapObjectPrint(FILE* out);
void PrintHeader(FILE* out, const char* id);
#endif
+ DECLARE_PRINTER(HeapObject)
DECLARE_VERIFIER(HeapObject)
#ifdef VERIFY_HEAP
inline void VerifyObjectField(int offset);
@@ -1566,10 +1865,7 @@ class HeapNumber: public HeapObject {
// Dispatched behavior.
bool HeapNumberBooleanValue();
- inline void HeapNumberPrint() {
- HeapNumberPrint(stdout);
- }
- void HeapNumberPrint(FILE* out);
+ void HeapNumberPrint(FILE* out = stdout);
void HeapNumberPrint(StringStream* accumulator);
DECLARE_VERIFIER(HeapNumber)
@@ -1644,12 +1940,6 @@ class JSReceiver: public HeapObject {
CERTAINLY_NOT_STORE_FROM_KEYED
};
- // Indicates whether a value can be loaded as a constant.
- enum StoreMode {
- ALLOW_AS_CONSTANT,
- FORCE_FIELD
- };
-
// Internal properties (e.g. the hidden properties dictionary) might
// be added even though the receiver is non-extensible.
enum ExtensibilityCheck {
@@ -1665,6 +1955,11 @@ class JSReceiver: public HeapObject {
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
+ static Handle<Object> SetElement(Handle<JSReceiver> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
MUST_USE_RESULT static MaybeObject* SetPropertyOrFail(
Handle<JSReceiver> object,
@@ -1696,15 +1991,7 @@ class JSReceiver: public HeapObject {
DeleteMode mode = NORMAL_DELETION);
static Handle<Object> DeleteElement(Handle<JSReceiver> object,
uint32_t index,
- DeleteMode mode);
-
- // Set the index'th array element.
- // Can cause GC, or return failure if GC is required.
- MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool check_prototype);
+ DeleteMode mode = NORMAL_DELETION);
// Tests for the fast common case for property enumeration.
bool IsSimpleEnum();
@@ -1877,7 +2164,6 @@ class JSObject: public JSReceiver {
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- ExtensibilityCheck extensibility_check,
StoreMode mode = ALLOW_AS_CONSTANT);
static Handle<Object> SetLocalPropertyIgnoreAttributes(
@@ -1886,7 +2172,8 @@ class JSObject: public JSReceiver {
Handle<Object> value,
PropertyAttributes attributes,
ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT);
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
static inline Handle<String> ExpectedTransitionKey(Handle<Map> map);
static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map);
@@ -1910,12 +2197,13 @@ class JSObject: public JSReceiver {
inline MUST_USE_RESULT MaybeObject* TryMigrateInstance();
// Can cause GC.
- MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
+ MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributesTrampoline(
Name* key,
Object* value,
PropertyAttributes attributes,
ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT);
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
// Retrieve a value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
@@ -1944,7 +2232,6 @@ class JSObject: public JSReceiver {
PropertyDetails details);
static void OptimizeAsPrototype(Handle<JSObject> object);
- MUST_USE_RESULT MaybeObject* OptimizeAsPrototype();
// Retrieve interceptors.
InterceptorInfo* GetNamedInterceptor();
@@ -1970,11 +2257,13 @@ class JSObject: public JSReceiver {
Handle<Name> name,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ v8::AccessControl access_control = v8::DEFAULT);
MaybeObject* LookupAccessor(Name* name, AccessorComponent component);
- MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
+ static Handle<Object> SetAccessor(Handle<JSObject> object,
+ Handle<AccessorInfo> info);
// Used from Object::GetProperty().
MUST_USE_RESULT MaybeObject* GetPropertyWithFailedAccessCheck(
@@ -2025,23 +2314,18 @@ class JSObject: public JSReceiver {
Object* GetHiddenProperty(Name* key);
// Deletes a hidden property. Deleting a non-existing property is
// considered successful.
- void DeleteHiddenProperty(Name* key);
+ static void DeleteHiddenProperty(Handle<JSObject> object,
+ Handle<Name> key);
// Returns true if the object has a property with the hidden string as name.
bool HasHiddenProperties();
- static int GetIdentityHash(Handle<JSObject> obj);
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
- MUST_USE_RESULT MaybeObject* SetIdentityHash(Smi* hash, CreationFlag flag);
-
- static Handle<Object> DeleteElement(Handle<JSObject> obj,
- uint32_t index,
- DeleteMode mode = NORMAL_DELETION);
- MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
+ static int GetIdentityHash(Handle<JSObject> object);
+ static void SetIdentityHash(Handle<JSObject> object, Smi* hash);
inline void ValidateElements();
// Makes sure that this object can contain HeapObject as elements.
- MUST_USE_RESULT inline MaybeObject* EnsureCanContainHeapObjectElements();
+ static inline void EnsureCanContainHeapObjectElements(Handle<JSObject> obj);
// Makes sure that this object can contain the specified elements.
MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
@@ -2217,7 +2501,8 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* AddConstantProperty(
Name* name,
Object* constant,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ TransitionFlag flag);
MUST_USE_RESULT MaybeObject* ReplaceSlowProperty(
Name* name,
@@ -2240,25 +2525,11 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
- // Replaces an existing transition with a transition to a map with a FIELD.
- MUST_USE_RESULT MaybeObject* ConvertTransitionToMapTransition(
- int transition_index,
- Name* name,
- Object* new_value,
- PropertyAttributes attributes);
-
- // Converts a descriptor of any other type to a real field, backed by the
- // properties array.
- MUST_USE_RESULT MaybeObject* ConvertDescriptorToField(
- Name* name,
- Object* new_value,
- PropertyAttributes attributes,
- TransitionFlag flag = OMIT_TRANSITION);
-
MUST_USE_RESULT MaybeObject* MigrateToMap(Map* new_map);
MUST_USE_RESULT MaybeObject* GeneralizeFieldRepresentation(
int modify_index,
- Representation new_representation);
+ Representation new_representation,
+ StoreMode store_mode);
// Add a property to a fast-case object.
MUST_USE_RESULT MaybeObject* AddFastProperty(
@@ -2266,7 +2537,8 @@ class JSObject: public JSReceiver {
Object* value,
PropertyAttributes attributes,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
- ValueType value_type = OPTIMAL_REPRESENTATION);
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ TransitionFlag flag = INSERT_TRANSITION);
// Add a property to a slow-case object.
MUST_USE_RESULT MaybeObject* AddSlowProperty(Name* name,
@@ -2282,7 +2554,8 @@ class JSObject: public JSReceiver {
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT);
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ TransitionFlag flag = INSERT_TRANSITION);
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
@@ -2303,12 +2576,6 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* NormalizeElements();
- static void UpdateMapCodeCache(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Code> code);
-
- MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(Name* name, Code* code);
-
// Transform slow named properties to fast variants.
// Returns failure if allocation failed.
static void TransformToFastProperties(Handle<JSObject> object,
@@ -2371,25 +2638,17 @@ class JSObject: public JSReceiver {
DECLARE_PRINTER(JSObject)
DECLARE_VERIFIER(JSObject)
#ifdef OBJECT_PRINT
- inline void PrintProperties() {
- PrintProperties(stdout);
- }
- void PrintProperties(FILE* out);
-
- inline void PrintElements() {
- PrintElements(stdout);
- }
- void PrintElements(FILE* out);
- inline void PrintTransitions() {
- PrintTransitions(stdout);
- }
- void PrintTransitions(FILE* out);
+ void PrintProperties(FILE* out = stdout);
+ void PrintElements(FILE* out = stdout);
+ void PrintTransitions(FILE* out = stdout);
#endif
void PrintElementsTransition(
FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
ElementsKind to_kind, FixedArrayBase* to_elements);
+ void PrintInstanceMigration(FILE* file, Map* original_map, Map* new_map);
+
#ifdef DEBUG
// Structure for collecting spill information about JSObjects.
class SpillInformation {
@@ -2416,7 +2675,8 @@ class JSObject: public JSReceiver {
// Maximal number of fast properties for the JSObject. Used to
// restrict the number of map transitions to avoid an explosion in
// the number of maps for objects used as dictionaries.
- inline bool TooManyFastProperties(int properties, StoreFromKeyed store_mode);
+ inline bool TooManyFastProperties(
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
// Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
// Also maximal value of JSArray's length property.
@@ -2437,7 +2697,10 @@ class JSObject: public JSReceiver {
// don't want to be wasteful with long lived objects.
static const int kMaxUncheckedOldFastElementsLength = 500;
+ // Note that Heap::MaxRegularSpaceAllocationSize() puts a limit on
+ // permissible values (see the ASSERT in heap.cc).
static const int kInitialMaxFastElementArray = 100000;
+
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 64;
static const int kMaxInstanceSize = 255 * kPointerSize;
@@ -2471,6 +2734,15 @@ class JSObject: public JSReceiver {
friend class DictionaryElementsAccessor;
friend class JSReceiver;
+ // TODO(mstarzinger): Soon to be handlified.
+ MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
+ Name* key,
+ Object* value,
+ PropertyAttributes attributes,
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
+
MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver,
Object* structure,
uint32_t index,
@@ -2529,11 +2801,11 @@ class JSObject: public JSReceiver {
Handle<Name> name,
DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElementWithInterceptor(uint32_t index);
-
- MUST_USE_RESULT MaybeObject* DeleteFastElement(uint32_t index);
- MUST_USE_RESULT MaybeObject* DeleteDictionaryElement(uint32_t index,
- DeleteMode mode);
+ static Handle<Object> DeleteElement(Handle<JSObject> object,
+ uint32_t index,
+ DeleteMode mode);
+ static Handle<Object> DeleteElementWithInterceptor(Handle<JSObject> object,
+ uint32_t index);
bool ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
@@ -2546,26 +2818,28 @@ class JSObject: public JSReceiver {
void GetElementsCapacityAndUsage(int* capacity, int* used);
bool CanSetCallback(Name* name);
- MUST_USE_RESULT MaybeObject* SetElementCallback(
- uint32_t index,
- Object* structure,
- PropertyAttributes attributes);
- MUST_USE_RESULT MaybeObject* SetPropertyCallback(
- Name* name,
- Object* structure,
- PropertyAttributes attributes);
+ static void SetElementCallback(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> structure,
+ PropertyAttributes attributes);
+ static void SetPropertyCallback(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> structure,
+ PropertyAttributes attributes);
static void DefineElementAccessor(Handle<JSObject> object,
uint32_t index,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ v8::AccessControl access_control);
static Handle<AccessorPair> CreateAccessorPairFor(Handle<JSObject> object,
Handle<Name> name);
static void DefinePropertyAccessor(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> getter,
Handle<Object> setter,
- PropertyAttributes attributes);
+ PropertyAttributes attributes,
+ v8::AccessControl access_control);
// Try to define a single accessor paying attention to map transitions.
// Returns false if this was not possible and we have to use the slow case.
@@ -2591,6 +2865,8 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* SetHiddenPropertiesHashTable(
Object* value);
+ MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2632,11 +2908,7 @@ class FixedArray: public FixedArrayBase {
// Setters for frequently used oddballs located in old space.
inline void set_undefined(int index);
- // TODO(isolates): duplicate.
- inline void set_undefined(Heap* heap, int index);
inline void set_null(int index);
- // TODO(isolates): duplicate.
- inline void set_null(Heap* heap, int index);
inline void set_the_hole(int index);
inline Object** GetFirstElementAddress();
@@ -2906,6 +3178,8 @@ class DescriptorArray: public FixedArray {
MUST_USE_RESULT MaybeObject* Merge(int verbatim,
int valid,
int new_size,
+ int modify_index,
+ StoreMode store_mode,
DescriptorArray* other);
bool IsMoreGeneralThan(int verbatim,
@@ -2933,7 +3207,8 @@ class DescriptorArray: public FixedArray {
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_descriptors,
+ MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate,
+ int number_of_descriptors,
int slack = 0);
// Casting.
@@ -2967,10 +3242,7 @@ class DescriptorArray: public FixedArray {
#ifdef OBJECT_PRINT
// Print all the descriptors.
- inline void PrintDescriptors() {
- PrintDescriptors(stdout);
- }
- void PrintDescriptors(FILE* out);
+ void PrintDescriptors(FILE* out = stdout);
#endif
#ifdef DEBUG
@@ -3208,6 +3480,9 @@ class HashTable: public FixedArray {
inline int FindEntry(Key key);
int FindEntry(Isolate* isolate, Key key);
+ // Rehashes the table in-place.
+ void Rehash(Key key);
+
protected:
// Find the entry at which to insert element with the given key that
// has the given hash value.
@@ -3254,6 +3529,13 @@ class HashTable: public FixedArray {
return (last + number) & (size - 1);
}
+ // Returns _expected_ if one of entries given by the first _probe_ probes is
+ // equal to _expected_. Otherwise, returns the entry given by the probe
+ // number _probe_.
+ uint32_t EntryForProbe(Key key, Object* k, int probe, uint32_t expected);
+
+ void Swap(uint32_t entry1, uint32_t entry2, WriteBarrierMode mode);
+
// Rehashes this hash-table into the new table.
MUST_USE_RESULT MaybeObject* Rehash(HashTable* new_table, Key key);
@@ -3463,10 +3745,7 @@ class Dictionary: public HashTable<Shape, Key> {
MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
#ifdef OBJECT_PRINT
- inline void Print() {
- Print(stdout);
- }
- void Print(FILE* out);
+ void Print(FILE* out = stdout);
#endif
// Returns the key (slow).
Object* SlowReverseLookup(Object* value);
@@ -4373,7 +4652,8 @@ class DeoptimizationInputData: public FixedArray {
}
// Allocates a DeoptimizationInputData.
- MUST_USE_RESULT static MaybeObject* Allocate(int deopt_entry_count,
+ MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate,
+ int deopt_entry_count,
PretenureFlag pretenure);
// Casting.
@@ -4419,7 +4699,8 @@ class DeoptimizationOutputData: public FixedArray {
}
// Allocates a DeoptimizationOutputData.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_deopt_points,
+ MUST_USE_RESULT static MaybeObject* Allocate(Isolate* isolate,
+ int number_of_deopt_points,
PretenureFlag pretenure);
// Casting.
@@ -4501,7 +4782,6 @@ class Code: public HeapObject {
V(KEYED_CALL_IC) \
V(STORE_IC) \
V(KEYED_STORE_IC) \
- V(UNARY_OP_IC) \
V(BINARY_OP_IC) \
V(COMPARE_IC) \
V(COMPARE_NIL_IC) \
@@ -4549,10 +4829,7 @@ class Code: public HeapObject {
static const char* ICState2String(InlineCacheState state);
static const char* StubType2String(StubType type);
static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
- inline void Disassemble(const char* name) {
- Disassemble(name, stdout);
- }
- void Disassemble(const char* name, FILE* out);
+ void Disassemble(const char* name, FILE* out = stdout);
#endif // ENABLE_DISASSEMBLER
// [instruction_size]: Size of the native instructions
@@ -4569,21 +4846,19 @@ class Code: public HeapObject {
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
- // [type_feedback_info]: Struct containing type feedback information for
- // unoptimized code. Optimized code can temporarily store the head of
- // the list of code to be deoptimized during mark-compact GC.
- // STUBs can use this slot to store arbitrary information as a Smi.
- // Will contain either a TypeFeedbackInfo object, or JSFunction object,
- // or undefined, or a Smi.
+ // [type_feedback_info]: This field stores various things, depending on the
+ // kind of the code object.
+ // FUNCTION => type feedback information.
+ // STUB => various things, e.g. a SMI
+ // OPTIMIZED_FUNCTION => the next_code_link for optimized code list.
DECL_ACCESSORS(type_feedback_info, Object)
inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value);
inline int stub_info();
inline void set_stub_info(int info);
- // Used during GC to code a list of code objects to deoptimize.
- inline Object* code_to_deoptimize_link();
- inline void set_code_to_deoptimize_link(Object* value);
- inline Object** code_to_deoptimize_link_slot();
+ // [next_code_link]: Link for lists of optimized or deoptimized code.
+ // Note that storage for this field is overlapped with typefeedback_info.
+ DECL_ACCESSORS(next_code_link, Object)
// [gc_metadata]: Field used to hold GC related metadata. The contents of this
// field does not have to be traced during garbage collection since
@@ -4620,8 +4895,7 @@ class Code: public HeapObject {
// TODO(danno): This is a bit of a hack right now since there are still
// clients of this API that pass "extra" values in for argc. These clients
// should be retrofitted to used ExtendedExtraICState.
- return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
- kind == UNARY_OP_IC;
+ return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC;
}
inline StubType type(); // Only valid for monomorphic IC stubs.
@@ -4629,14 +4903,13 @@ class Code: public HeapObject {
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
- inline bool is_debug_break();
+ inline bool is_debug_stub();
inline bool is_load_stub() { return kind() == LOAD_IC; }
inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
inline bool is_store_stub() { return kind() == STORE_IC; }
inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
inline bool is_call_stub() { return kind() == CALL_IC; }
inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
- inline bool is_unary_op_stub() { return kind() == UNARY_OP_IC; }
inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; }
@@ -4710,10 +4983,6 @@ class Code: public HeapObject {
inline CheckType check_type();
inline void set_check_type(CheckType value);
- // [type-recording unary op type]: For kind UNARY_OP_IC.
- inline byte unary_op_type();
- inline void set_unary_op_type(byte value);
-
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
@@ -4869,6 +5138,8 @@ class Code: public HeapObject {
void ClearInlineCaches();
void ClearTypeFeedbackCells(Heap* heap);
+ BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
+
#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
enum Age {
kNoAge = 0,
@@ -4908,6 +5179,7 @@ class Code: public HeapObject {
kHandlerTableOffset + kPointerSize;
static const int kTypeFeedbackInfoOffset =
kDeoptimizationDataOffset + kPointerSize;
+ static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset; // Shared.
static const int kGCMetadataOffset = kTypeFeedbackInfoOffset + kPointerSize;
static const int kICAgeOffset =
kGCMetadataOffset + kPointerSize;
@@ -4952,9 +5224,6 @@ class Code: public HeapObject {
// KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
static const int kStackSlotsBitCount = 24;
- static const int kUnaryOpTypeFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kUnaryOpTypeBitCount = 3;
static const int kHasFunctionCacheFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kHasFunctionCacheBitCount = 1;
@@ -4963,15 +5232,12 @@ class Code: public HeapObject {
static const int kMarkedForDeoptimizationBitCount = 1;
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
- STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
STATIC_ASSERT(kMarkedForDeoptimizationFirstBit +
kMarkedForDeoptimizationBitCount <= 32);
class StackSlotsField: public BitField<int,
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
- class UnaryOpTypeField: public BitField<int,
- kUnaryOpTypeFirstBit, kUnaryOpTypeBitCount> {}; // NOLINT
class HasFunctionCacheField: public BitField<bool,
kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {}; // NOLINT
class MarkedForDeoptimizationField: public BitField<bool,
@@ -5176,8 +5442,8 @@ class Map: public HeapObject {
inline void set_bit_field2(byte value);
// Bit field 3.
- inline int bit_field3();
- inline void set_bit_field3(int value);
+ inline uint32_t bit_field3();
+ inline void set_bit_field3(uint32_t bits);
class EnumLengthBits: public BitField<int, 0, 11> {};
class NumberOfOwnDescriptorsBits: public BitField<int, 11, 11> {};
@@ -5189,6 +5455,7 @@ class Map: public HeapObject {
class Deprecated: public BitField<bool, 27, 1> {};
class IsFrozen: public BitField<bool, 28, 1> {};
class IsUnstable: public BitField<bool, 29, 1> {};
+ class IsMigrationTarget: public BitField<bool, 30, 1> {};
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
@@ -5347,11 +5614,30 @@ class Map: public HeapObject {
static Handle<Map> GeneralizeRepresentation(
Handle<Map> map,
int modify_index,
- Representation new_representation);
+ Representation new_representation,
+ StoreMode store_mode);
MUST_USE_RESULT MaybeObject* GeneralizeRepresentation(
int modify_index,
- Representation representation);
- MUST_USE_RESULT MaybeObject* CopyGeneralizeAllRepresentations();
+ Representation representation,
+ StoreMode store_mode);
+ MUST_USE_RESULT MaybeObject* CopyGeneralizeAllRepresentations(
+ int modify_index,
+ StoreMode store_mode,
+ PropertyAttributes attributes,
+ const char* reason);
+
+ void PrintGeneralization(FILE* file,
+ const char* reason,
+ int modify_index,
+ int split,
+ int descriptors,
+ bool constant_to_field,
+ Representation old_representation,
+ Representation new_representation);
+
+ // Returns the constructor name (the name (possibly, inferred name) of the
+ // function that was used to instantiate the object).
+ String* constructor_name();
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
@@ -5486,7 +5772,6 @@ class Map: public HeapObject {
set_bit_field3(EnumLengthBits::update(bit_field3(), length));
}
- inline bool CanTrackAllocationSite();
inline bool owns_descriptors();
inline void set_owns_descriptors(bool is_shared);
inline bool is_observed();
@@ -5495,6 +5780,8 @@ class Map: public HeapObject {
inline bool is_frozen();
inline void mark_unstable();
inline bool is_stable();
+ inline void set_migration_target(bool value);
+ inline bool is_migration_target();
inline void deprecate();
inline bool is_deprecated();
inline bool CanBeDeprecated();
@@ -5533,6 +5820,9 @@ class Map: public HeapObject {
TransitionFlag flag);
MUST_USE_RESULT MaybeObject* CopyForObserved();
+ static Handle<Map> CopyNormalized(Handle<Map> map,
+ PropertyNormalizationMode mode,
+ NormalizedMapSharingMode sharing);
MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing);
@@ -5636,12 +5926,15 @@ class Map: public HeapObject {
return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
+ bool IsJSObjectMap() {
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
+ }
+
// Fires when the layout of an object with a leaf map changes.
// This includes adding transitions to the leaf map or changing
// the descriptor array.
inline void NotifyLeafMapLayoutChange();
- inline bool CanOmitPrototypeChecks();
inline bool CanOmitMapChecks();
void AddDependentCompilationInfo(DependentCode::DependencyGroup group,
@@ -5658,7 +5951,7 @@ class Map: public HeapObject {
#ifdef VERIFY_HEAP
void SharedMapVerify();
- void VerifyOmittedPrototypeChecks();
+ void VerifyOmittedMapChecks();
#endif
inline int visitor_id();
@@ -6305,7 +6598,9 @@ class SharedFunctionInfo: public HeapObject {
// Disable (further) attempted optimization of all functions sharing this
// shared function info.
- void DisableOptimization(const char* reason);
+ void DisableOptimization(BailoutReason reason);
+
+ inline BailoutReason DisableOptimizationReason();
// Lookup the bailout ID and ASSERT that it exists in the non-optimized
// code, returns whether it asserted (i.e., always true if assertions are
@@ -6336,6 +6631,21 @@ class SharedFunctionInfo: public HeapObject {
inline void set_counters(int value);
inline int counters();
+ // Stores opt_count and bailout_reason as bit-fields.
+ inline void set_opt_count_and_bailout_reason(int value);
+ inline int opt_count_and_bailout_reason();
+
+ void set_bailout_reason(BailoutReason reason) {
+ set_opt_count_and_bailout_reason(
+ DisabledOptimizationReasonBits::update(opt_count_and_bailout_reason(),
+ reason));
+ }
+
+ void set_dont_optimize_reason(BailoutReason reason) {
+ set_bailout_reason(reason);
+ set_dont_optimize(reason != kNoReason);
+ }
+
// Source size of this function.
int SourceSize();
@@ -6402,8 +6712,10 @@ class SharedFunctionInfo: public HeapObject {
kEndPositionOffset + kPointerSize;
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kPointerSize;
- static const int kOptCountOffset = kCompilerHintsOffset + kPointerSize;
- static const int kCountersOffset = kOptCountOffset + kPointerSize;
+ static const int kOptCountAndBailoutReasonOffset =
+ kCompilerHintsOffset + kPointerSize;
+ static const int kCountersOffset =
+ kOptCountAndBailoutReasonOffset + kPointerSize;
// Total size.
static const int kSize = kCountersOffset + kPointerSize;
@@ -6437,9 +6749,11 @@ class SharedFunctionInfo: public HeapObject {
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kIntSize;
- static const int kOptCountOffset = kCompilerHintsOffset + kIntSize;
+ static const int kOptCountAndBailoutReasonOffset =
+ kCompilerHintsOffset + kIntSize;
- static const int kCountersOffset = kOptCountOffset + kIntSize;
+ static const int kCountersOffset =
+ kOptCountAndBailoutReasonOffset + kIntSize;
// Total size.
static const int kSize = kCountersOffset + kIntSize;
@@ -6498,6 +6812,9 @@ class SharedFunctionInfo: public HeapObject {
class OptReenableTriesBits: public BitField<int, 4, 18> {};
class ICAgeBits: public BitField<int, 22, 8> {};
+ class OptCountBits: public BitField<int, 0, 22> {};
+ class DisabledOptimizationReasonBits: public BitField<int, 22, 8> {};
+
private:
#if V8_HOST_ARCH_32_BIT
// On 32 bit platforms, compiler hints is a smi.
@@ -6682,8 +6999,7 @@ class JSFunction: public JSObject {
// Mark this function for lazy recompilation. The function will be
// recompiled the next time it is executed.
void MarkForLazyRecompilation();
- void MarkForParallelRecompilation();
- void MarkForInstallingRecompiledCode();
+ void MarkForConcurrentRecompilation();
void MarkInRecompileQueue();
// Helpers to compile this function. Returns true on success, false on
@@ -6692,18 +7008,18 @@ class JSFunction: public JSObject {
ClearExceptionFlag flag);
static bool CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag);
+ static Handle<Code> CompileOsr(Handle<JSFunction> function,
+ BailoutId osr_ast_id,
+ ClearExceptionFlag flag);
static bool CompileOptimized(Handle<JSFunction> function,
- BailoutId osr_ast_id,
ClearExceptionFlag flag);
// Tells whether or not the function is already marked for lazy
// recompilation.
inline bool IsMarkedForLazyRecompilation();
- inline bool IsMarkedForParallelRecompilation();
- inline bool IsMarkedForInstallingRecompiledCode();
+ inline bool IsMarkedForConcurrentRecompilation();
- // Tells whether or not the function is on the parallel
- // recompilation queue.
+ // Tells whether or not the function is on the concurrent recompilation queue.
inline bool IsInRecompileQueue();
// Check whether or not this function is inlineable.
@@ -6768,15 +7084,14 @@ class JSFunction: public JSObject {
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
- // [next_function_link]: Field for linking functions. This list is treated as
- // a weak list by the GC.
+ // [next_function_link]: Links functions into various lists, e.g. the list
+ // of optimized functions hanging off the native_context. The CodeFlusher
+ // uses this link to chain together flushing candidates. Treated weakly
+ // by the garbage collector.
DECL_ACCESSORS(next_function_link, Object)
// Prints the name of the function using PrintF.
- inline void PrintName() {
- PrintName(stdout);
- }
- void PrintName(FILE* out);
+ void PrintName(FILE* out = stdout);
// Casting.
static inline JSFunction* cast(Object* obj);
@@ -6795,7 +7110,8 @@ class JSFunction: public JSObject {
// Retrieve the native context from a function's literal array.
static Context* NativeContextFromLiterals(FixedArray* literals);
- bool PassesHydrogenFilter();
+ // Used for flags such as --hydrogen-filter.
+ bool PassesFilter(const char* raw_filter);
// Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
// kSize) is weak and has special handling during garbage collection.
@@ -6885,10 +7201,6 @@ class GlobalObject: public JSObject {
return answer;
}
- // Ensure that the global object has a cell for the given property name.
- static Handle<PropertyCell> EnsurePropertyCell(Handle<GlobalObject> global,
- Handle<Name> name);
-
// Casting.
static inline GlobalObject* cast(Object* obj);
@@ -6910,6 +7222,10 @@ class JSGlobalObject: public GlobalObject {
// Casting.
static inline JSGlobalObject* cast(Object* obj);
+ // Ensure that the global object has a cell for the given property name.
+ static Handle<PropertyCell> EnsurePropertyCell(Handle<JSGlobalObject> global,
+ Handle<Name> name);
+
// Dispatched behavior.
DECLARE_PRINTER(JSGlobalObject)
DECLARE_VERIFIER(JSGlobalObject)
@@ -7554,6 +7870,7 @@ class AllocationSite: public Struct {
static inline AllocationSiteMode GetMode(
ElementsKind boilerplate_elements_kind);
static inline AllocationSiteMode GetMode(ElementsKind from, ElementsKind to);
+ static inline bool CanTrack(InstanceType type);
static const int kTransitionInfoOffset = HeapObject::kHeaderSize;
static const int kWeakNextOffset = kTransitionInfoOffset + kPointerSize;
@@ -7750,6 +8067,8 @@ class Name: public HeapObject {
// Casting.
static inline Name* cast(Object* obj);
+ bool IsCacheable(Isolate* isolate);
+
DECLARE_PRINTER(Name)
// Layout description.
@@ -8019,13 +8338,9 @@ class String: public Name {
// Dispatched behavior.
void StringShortPrint(StringStream* accumulator);
#ifdef OBJECT_PRINT
- inline void StringPrint() {
- StringPrint(stdout);
- }
- void StringPrint(FILE* out);
-
char* ToAsciiArray();
#endif
+ DECLARE_PRINTER(String)
DECLARE_VERIFIER(String)
inline bool IsFlat();
@@ -8477,13 +8792,14 @@ class Relocatable BASE_EMBEDDED {
virtual void IterateInstance(ObjectVisitor* v) { }
virtual void PostGarbageCollection() { }
- static void PostGarbageCollectionProcessing();
+ static void PostGarbageCollectionProcessing(Isolate* isolate);
static int ArchiveSpacePerThread();
static char* ArchiveState(Isolate* isolate, char* to);
static char* RestoreState(Isolate* isolate, char* from);
- static void Iterate(ObjectVisitor* v);
+ static void Iterate(Isolate* isolate, ObjectVisitor* v);
static void Iterate(ObjectVisitor* v, Relocatable* top);
static char* Iterate(ObjectVisitor* v, char* t);
+
private:
Isolate* isolate_;
Relocatable* prev_;
@@ -8619,7 +8935,8 @@ class Oddball: public HeapObject {
DECLARE_VERIFIER(Oddball)
// Initialize the fields.
- MUST_USE_RESULT MaybeObject* Initialize(const char* to_string,
+ MUST_USE_RESULT MaybeObject* Initialize(Heap* heap,
+ const char* to_string,
Object* to_number,
byte kind);
@@ -8769,11 +9086,6 @@ class JSProxy: public JSReceiver {
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetElementWithHandler(
- JSReceiver* receiver,
- uint32_t index,
- Object* value,
- StrictModeFlag strict_mode);
// If the handler defines an accessor property with a setter, invoke it.
// If it defines an accessor property without a setter, or a data property
@@ -8794,10 +9106,8 @@ class JSProxy: public JSReceiver {
JSReceiver* receiver,
uint32_t index);
- MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
-
- // Turn this into an (empty) JSObject.
- void Fix();
+ // Turn the proxy into an (empty) JSObject.
+ static void Fix(Handle<JSProxy> proxy);
// Initializes the body after the handler slot.
inline void InitializeBody(int object_size, Object* value);
@@ -8832,13 +9142,23 @@ class JSProxy: public JSReceiver {
private:
friend class JSReceiver;
- static Handle<Object> DeletePropertyWithHandler(Handle<JSProxy> object,
+ static Handle<Object> SetElementWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode);
+
+ static Handle<Object> DeletePropertyWithHandler(Handle<JSProxy> proxy,
Handle<Name> name,
DeleteMode mode);
- static Handle<Object> DeleteElementWithHandler(Handle<JSProxy> object,
+ static Handle<Object> DeleteElementWithHandler(Handle<JSProxy> proxy,
uint32_t index,
DeleteMode mode);
+ MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+ static Handle<Object> GetIdentityHash(Handle<JSProxy> proxy,
+ CreationFlag flag);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
};
@@ -9239,6 +9559,11 @@ class AccessorInfo: public Struct {
// Dispatched behavior.
DECLARE_VERIFIER(AccessorInfo)
+ // Append all descriptors to the array that are not already there.
+ // Return number added.
+ static int AppendUnique(Handle<Object> descriptors,
+ Handle<FixedArray> array,
+ int valid_descriptors);
static const int kNameOffset = HeapObject::kHeaderSize;
static const int kFlagOffset = kNameOffset + kPointerSize;
@@ -9402,10 +9727,18 @@ class ExecutableAccessorInfo: public AccessorInfo {
// * undefined: considered an accessor by the spec, too, strangely enough
// * the hole: an accessor which has not been set
// * a pointer to a map: a transition used to ensure map sharing
+// access_flags provides the ability to override access checks on access check
+// failure.
class AccessorPair: public Struct {
public:
DECL_ACCESSORS(getter, Object)
DECL_ACCESSORS(setter, Object)
+ DECL_ACCESSORS(access_flags, Smi)
+
+ inline void set_access_flags(v8::AccessControl access_control);
+ inline bool all_can_read();
+ inline bool all_can_write();
+ inline bool prohibits_overwriting();
static inline AccessorPair* cast(Object* obj);
@@ -9442,9 +9775,14 @@ class AccessorPair: public Struct {
static const int kGetterOffset = HeapObject::kHeaderSize;
static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kSize = kSetterOffset + kPointerSize;
+ static const int kAccessFlagsOffset = kSetterOffset + kPointerSize;
+ static const int kSize = kAccessFlagsOffset + kPointerSize;
private:
+ static const int kAllCanReadBit = 0;
+ static const int kAllCanWriteBit = 1;
+ static const int kProhibitsOverwritingBit = 2;
+
// Strangely enough, in addition to functions and harmony proxies, the spec
// requires us to consider undefined as a kind of accessor, too:
// var obj = {};
@@ -9532,12 +9870,15 @@ class TemplateInfo: public Struct {
public:
DECL_ACCESSORS(tag, Object)
DECL_ACCESSORS(property_list, Object)
+ DECL_ACCESSORS(property_accessors, Object)
DECLARE_VERIFIER(TemplateInfo)
- static const int kTagOffset = HeapObject::kHeaderSize;
+ static const int kTagOffset = HeapObject::kHeaderSize;
static const int kPropertyListOffset = kTagOffset + kPointerSize;
- static const int kHeaderSize = kPropertyListOffset + kPointerSize;
+ static const int kPropertyAccessorsOffset =
+ kPropertyListOffset + kPointerSize;
+ static const int kHeaderSize = kPropertyAccessorsOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
@@ -9548,7 +9889,6 @@ class FunctionTemplateInfo: public TemplateInfo {
public:
DECL_ACCESSORS(serial_number, Object)
DECL_ACCESSORS(call_code, Object)
- DECL_ACCESSORS(property_accessors, Object)
DECL_ACCESSORS(prototype_template, Object)
DECL_ACCESSORS(parent_template, Object)
DECL_ACCESSORS(named_property_handler, Object)
@@ -9570,6 +9910,8 @@ class FunctionTemplateInfo: public TemplateInfo {
// requires access check.
DECL_BOOLEAN_ACCESSORS(needs_access_check)
DECL_BOOLEAN_ACCESSORS(read_only_prototype)
+ DECL_BOOLEAN_ACCESSORS(remove_prototype)
+ DECL_BOOLEAN_ACCESSORS(do_not_cache)
static inline FunctionTemplateInfo* cast(Object* obj);
@@ -9579,9 +9921,8 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
static const int kCallCodeOffset = kSerialNumberOffset + kPointerSize;
- static const int kPropertyAccessorsOffset = kCallCodeOffset + kPointerSize;
static const int kPrototypeTemplateOffset =
- kPropertyAccessorsOffset + kPointerSize;
+ kCallCodeOffset + kPointerSize;
static const int kParentTemplateOffset =
kPrototypeTemplateOffset + kPointerSize;
static const int kNamedPropertyHandlerOffset =
@@ -9605,6 +9946,8 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kUndetectableBit = 1;
static const int kNeedsAccessCheckBit = 2;
static const int kReadOnlyPrototypeBit = 3;
+ static const int kRemovePrototypeBit = 4;
+ static const int kDoNotCacheBit = 5;
DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
};
@@ -9787,6 +10130,7 @@ class BreakPointInfo: public Struct {
V(kHandleScope, "handlescope", "(Handle scope)") \
V(kBuiltins, "builtins", "(Builtins)") \
V(kGlobalHandles, "globalhandles", "(Global handles)") \
+ V(kEternalHandles, "eternalhandles", "(Eternal handles)") \
V(kThreadManager, "threadmanager", "(Thread manager)") \
V(kExtensions, "Extensions", "(Extensions)")
diff --git a/chromium/v8/src/optimizing-compiler-thread.cc b/chromium/v8/src/optimizing-compiler-thread.cc
index 21ef2371071..085143d9983 100644
--- a/chromium/v8/src/optimizing-compiler-thread.cc
+++ b/chromium/v8/src/optimizing-compiler-thread.cc
@@ -39,7 +39,7 @@ namespace internal {
void OptimizingCompilerThread::Run() {
#ifdef DEBUG
- { ScopedLock lock(thread_id_mutex_);
+ { LockGuard<Mutex> lock_guard(&thread_id_mutex_);
thread_id_ = ThreadId::Current().ToInteger();
}
#endif
@@ -48,33 +48,46 @@ void OptimizingCompilerThread::Run() {
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
- int64_t epoch = 0;
- if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
+ ElapsedTimer total_timer;
+ if (FLAG_trace_concurrent_recompilation) total_timer.Start();
while (true) {
- input_queue_semaphore_->Wait();
+ input_queue_semaphore_.Wait();
Logger::TimerEventScope timer(
- isolate_, Logger::TimerEventScope::v8_recompile_parallel);
+ isolate_, Logger::TimerEventScope::v8_recompile_concurrent);
- if (FLAG_parallel_recompilation_delay != 0) {
- OS::Sleep(FLAG_parallel_recompilation_delay);
+ if (FLAG_concurrent_recompilation_delay != 0) {
+ OS::Sleep(FLAG_concurrent_recompilation_delay);
}
- if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_->Signal();
- if (FLAG_trace_parallel_recompilation) {
- time_spent_total_ = OS::Ticks() - epoch;
- }
- return;
+ switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
+ case CONTINUE:
+ break;
+ case STOP:
+ if (FLAG_trace_concurrent_recompilation) {
+ time_spent_total_ = total_timer.Elapsed();
+ }
+ stop_semaphore_.Signal();
+ return;
+ case FLUSH:
+ // The main thread is blocked, waiting for the stop semaphore.
+ { AllowHandleDereference allow_handle_dereference;
+ FlushInputQueue(true);
+ }
+ Release_Store(&queue_length_, static_cast<AtomicWord>(0));
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
+ stop_semaphore_.Signal();
+ // Return to start of consumer loop.
+ continue;
}
- int64_t compiling_start = 0;
- if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
+ ElapsedTimer compiling_timer;
+ if (FLAG_trace_concurrent_recompilation) compiling_timer.Start();
CompileNext();
- if (FLAG_trace_parallel_recompilation) {
- time_spent_compiling_ += OS::Ticks() - compiling_start;
+ if (FLAG_trace_concurrent_recompilation) {
+ time_spent_compiling_ += compiling_timer.Elapsed();
}
}
}
@@ -82,7 +95,9 @@ void OptimizingCompilerThread::Run() {
void OptimizingCompilerThread::CompileNext() {
OptimizingCompiler* optimizing_compiler = NULL;
- input_queue_.Dequeue(&optimizing_compiler);
+ bool result = input_queue_.Dequeue(&optimizing_compiler);
+ USE(result);
+ ASSERT(result);
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
// The function may have already been optimized by OSR. Simply continue.
@@ -93,44 +108,87 @@ void OptimizingCompilerThread::CompileNext() {
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
// are always also queued.
- ScopedLock mark_and_queue(install_mutex_);
- { Heap::RelocationLock relocation_lock(isolate_->heap());
- AllowHandleDereference ahd;
- optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
+ if (!optimizing_compiler->info()->osr_ast_id().IsNone()) {
+ ASSERT(FLAG_concurrent_osr);
+ LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ osr_candidates_.RemoveElement(optimizing_compiler);
+ ready_for_osr_.Add(optimizing_compiler);
+ } else {
+ output_queue_.Enqueue(optimizing_compiler);
+ isolate_->stack_guard()->RequestInstallCode();
+ }
+}
+
+
+void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
+ OptimizingCompiler* optimizing_compiler;
+ // The optimizing compiler is allocated in the CompilationInfo's zone.
+ while (input_queue_.Dequeue(&optimizing_compiler)) {
+ // This should not block, since we have one signal on the input queue
+ // semaphore corresponding to each element in the input queue.
+ input_queue_semaphore_.Wait();
+ CompilationInfo* info = optimizing_compiler->info();
+ if (restore_function_code) {
+ Handle<JSFunction> function = info->closure();
+ function->ReplaceCode(function->shared()->code());
+ }
+ delete info;
}
- output_queue_.Enqueue(optimizing_compiler);
+}
+
+
+void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
+ OptimizingCompiler* optimizing_compiler;
+ // The optimizing compiler is allocated in the CompilationInfo's zone.
+ while (output_queue_.Dequeue(&optimizing_compiler)) {
+ CompilationInfo* info = optimizing_compiler->info();
+ if (restore_function_code) {
+ Handle<JSFunction> function = info->closure();
+ function->ReplaceCode(function->shared()->code());
+ }
+ delete info;
+ }
+
+ osr_candidates_.Clear();
+ RemoveStaleOSRCandidates(0);
+}
+
+
+void OptimizingCompilerThread::Flush() {
+ ASSERT(!IsOptimizerThread());
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
+ input_queue_semaphore_.Signal();
+ stop_semaphore_.Wait();
+ FlushOutputQueue(true);
}
void OptimizingCompilerThread::Stop() {
ASSERT(!IsOptimizerThread());
- Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
- input_queue_semaphore_->Signal();
- stop_semaphore_->Wait();
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
+ input_queue_semaphore_.Signal();
+ stop_semaphore_.Wait();
- if (FLAG_parallel_recompilation_delay != 0) {
+ if (FLAG_concurrent_recompilation_delay != 0) {
// Barrier when loading queue length is not necessary since the write
// happens in CompileNext on the same thread.
+ // This is used only for testing.
while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
InstallOptimizedFunctions();
} else {
- OptimizingCompiler* optimizing_compiler;
- // The optimizing compiler is allocated in the CompilationInfo's zone.
- while (input_queue_.Dequeue(&optimizing_compiler)) {
- delete optimizing_compiler->info();
- }
- while (output_queue_.Dequeue(&optimizing_compiler)) {
- delete optimizing_compiler->info();
- }
+ FlushInputQueue(false);
+ FlushOutputQueue(false);
}
- if (FLAG_trace_parallel_recompilation) {
- double compile_time = static_cast<double>(time_spent_compiling_);
- double total_time = static_cast<double>(time_spent_total_);
- double percentage = (compile_time * 100) / total_time;
+ if (FLAG_trace_concurrent_recompilation) {
+ double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
}
+ if (FLAG_trace_osr && FLAG_concurrent_osr) {
+ PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
+ }
+
Join();
}
@@ -140,12 +198,13 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
HandleScope handle_scope(isolate_);
OptimizingCompiler* compiler;
while (true) {
- { // Memory barrier to ensure marked functions are queued.
- ScopedLock marked_and_queued(install_mutex_);
- if (!output_queue_.Dequeue(&compiler)) return;
- }
+ if (!output_queue_.Dequeue(&compiler)) return;
Compiler::InstallOptimizedCode(compiler);
}
+
+ // Remove the oldest OSR candidates that are ready so that we
+ // only have limited number of them waiting.
+ if (FLAG_concurrent_osr) RemoveStaleOSRCandidates();
}
@@ -154,16 +213,82 @@ void OptimizingCompilerThread::QueueForOptimization(
ASSERT(IsQueueAvailable());
ASSERT(!IsOptimizerThread());
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
- optimizing_compiler->info()->closure()->MarkInRecompileQueue();
+ if (optimizing_compiler->info()->osr_ast_id().IsNone()) {
+ optimizing_compiler->info()->closure()->MarkInRecompileQueue();
+ } else {
+ LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ osr_candidates_.Add(optimizing_compiler);
+ osr_attempts_++;
+ }
input_queue_.Enqueue(optimizing_compiler);
- input_queue_semaphore_->Signal();
+ input_queue_semaphore_.Signal();
+}
+
+
+OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate(
+ Handle<JSFunction> function, uint32_t osr_pc_offset) {
+ ASSERT(!IsOptimizerThread());
+ OptimizingCompiler* result = NULL;
+ { LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ for (int i = 0; i < ready_for_osr_.length(); i++) {
+ if (ready_for_osr_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ osr_hits_++;
+ result = ready_for_osr_.Remove(i);
+ break;
+ }
+ }
+ }
+ RemoveStaleOSRCandidates();
+ return result;
+}
+
+
+bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
+ uint32_t osr_pc_offset) {
+ ASSERT(!IsOptimizerThread());
+ LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ for (int i = 0; i < osr_candidates_.length(); i++) {
+ if (osr_candidates_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
+ ASSERT(!IsOptimizerThread());
+ LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ for (int i = 0; i < osr_candidates_.length(); i++) {
+ if (*osr_candidates_[i]->info()->closure() == function) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void OptimizingCompilerThread::RemoveStaleOSRCandidates(int limit) {
+ ASSERT(!IsOptimizerThread());
+ LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ while (ready_for_osr_.length() > limit) {
+ OptimizingCompiler* compiler = ready_for_osr_.Remove(0);
+ CompilationInfo* throw_away = compiler->info();
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - Discarded ");
+ throw_away->closure()->PrintName();
+ PrintF(", AST id %d]\n",
+ throw_away->osr_ast_id().ToInt());
+ }
+ delete throw_away;
+ }
}
#ifdef DEBUG
bool OptimizingCompilerThread::IsOptimizerThread() {
- if (!FLAG_parallel_recompilation) return false;
- ScopedLock lock(thread_id_mutex_);
+ if (!FLAG_concurrent_recompilation) return false;
+ LockGuard<Mutex> lock_guard(&thread_id_mutex_);
return ThreadId::Current().ToInteger() == thread_id_;
}
#endif
diff --git a/chromium/v8/src/optimizing-compiler-thread.h b/chromium/v8/src/optimizing-compiler-thread.h
index 275ceb40b71..d1ed6a2c59f 100644
--- a/chromium/v8/src/optimizing-compiler-thread.h
+++ b/chromium/v8/src/optimizing-compiler-thread.h
@@ -30,7 +30,10 @@
#include "atomicops.h"
#include "flags.h"
+#include "list.h"
#include "platform.h"
+#include "platform/mutex.h"
+#include "platform/time.h"
#include "unbound-queue-inl.h"
namespace v8 {
@@ -46,23 +49,29 @@ class OptimizingCompilerThread : public Thread {
Thread("OptimizingCompilerThread"),
#ifdef DEBUG
thread_id_(0),
- thread_id_mutex_(OS::CreateMutex()),
#endif
isolate_(isolate),
- stop_semaphore_(OS::CreateSemaphore(0)),
- input_queue_semaphore_(OS::CreateSemaphore(0)),
- install_mutex_(OS::CreateMutex()),
- time_spent_compiling_(0),
- time_spent_total_(0) {
- NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+ stop_semaphore_(0),
+ input_queue_semaphore_(0),
+ osr_candidates_(2),
+ ready_for_osr_(2),
+ osr_hits_(0),
+ osr_attempts_(0) {
+ NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
}
+ ~OptimizingCompilerThread() {}
void Run();
void Stop();
- void CompileNext();
+ void Flush();
void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
void InstallOptimizedFunctions();
+ OptimizingCompiler* FindReadyOSRCandidate(Handle<JSFunction> function,
+ uint32_t osr_pc_offset);
+ bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);
+
+ bool IsQueuedForOSR(JSFunction* function);
inline bool IsQueueAvailable() {
// We don't need a barrier since we have a data dependency right
@@ -75,38 +84,52 @@ class OptimizingCompilerThread : public Thread {
// only one thread can run inside an Isolate at one time, a direct
// doesn't introduce a race -- queue_length_ may decreased in
// meantime, but not increased.
- return (current_length < FLAG_parallel_recompilation_queue_length);
+ return (current_length < FLAG_concurrent_recompilation_queue_length);
}
#ifdef DEBUG
bool IsOptimizerThread();
#endif
- ~OptimizingCompilerThread() {
- delete install_mutex_;
- delete input_queue_semaphore_;
- delete stop_semaphore_;
-#ifdef DEBUG
- delete thread_id_mutex_;
-#endif
- }
-
private:
+ enum StopFlag { CONTINUE, STOP, FLUSH };
+
+ // Remove the oldest OSR candidates that are ready so that we
+ // only have |limit| left waiting.
+ void RemoveStaleOSRCandidates(int limit = kReadyForOSRLimit);
+
+ void FlushInputQueue(bool restore_function_code);
+ void FlushOutputQueue(bool restore_function_code);
+ void CompileNext();
+
#ifdef DEBUG
int thread_id_;
- Mutex* thread_id_mutex_;
+ Mutex thread_id_mutex_;
#endif
Isolate* isolate_;
- Semaphore* stop_semaphore_;
- Semaphore* input_queue_semaphore_;
+ Semaphore stop_semaphore_;
+ Semaphore input_queue_semaphore_;
+
+ // Queue of incoming recompilation tasks (including OSR).
UnboundQueue<OptimizingCompiler*> input_queue_;
+ // Queue of recompilation tasks ready to be installed (excluding OSR).
UnboundQueue<OptimizingCompiler*> output_queue_;
- Mutex* install_mutex_;
+ // List of all OSR related recompilation tasks (both incoming and ready ones).
+ List<OptimizingCompiler*> osr_candidates_;
+ // List of recompilation tasks ready for OSR.
+ List<OptimizingCompiler*> ready_for_osr_;
+
volatile AtomicWord stop_thread_;
volatile Atomic32 queue_length_;
- int64_t time_spent_compiling_;
- int64_t time_spent_total_;
+ TimeDelta time_spent_compiling_;
+ TimeDelta time_spent_total_;
+
+ Mutex osr_list_mutex_;
+ int osr_hits_;
+ int osr_attempts_;
+
+ static const int kReadyForOSRLimit = 4;
};
} } // namespace v8::internal
diff --git a/chromium/v8/src/parser.cc b/chromium/v8/src/parser.cc
index df568ef1bbb..05ae11e4291 100644
--- a/chromium/v8/src/parser.cc
+++ b/chromium/v8/src/parser.cc
@@ -542,6 +542,7 @@ Parser::Parser(CompilationInfo* info)
scanner_(isolate_->unicode_cache()),
reusable_preparser_(NULL),
top_scope_(NULL),
+ original_scope_(NULL),
current_function_state_(NULL),
target_stack_(NULL),
extension_(info->extension()),
@@ -568,10 +569,13 @@ Parser::Parser(CompilationInfo* info)
FunctionLiteral* Parser::ParseProgram() {
- HistogramTimerScope timer(isolate()->counters()->parse());
+ HistogramTimerScope timer_scope(isolate()->counters()->parse());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
- int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
+ ElapsedTimer timer;
+ if (FLAG_trace_parse) {
+ timer.Start();
+ }
fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
// Initialize parser state.
@@ -592,7 +596,7 @@ FunctionLiteral* Parser::ParseProgram() {
}
if (FLAG_trace_parse && result != NULL) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms = timer.Elapsed().InMillisecondsF();
if (info()->is_eval()) {
PrintF("[parsing eval");
} else if (info()->script()->name()->IsString()) {
@@ -622,6 +626,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
if (!info->context().is_null()) {
scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
}
+ original_scope_ = scope;
if (info->is_eval()) {
if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
scope = NewScope(scope, EVAL_SCOPE);
@@ -682,6 +687,8 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
FunctionLiteral::kNotParenthesized,
FunctionLiteral::kNotGenerator);
result->set_ast_properties(factory()->visitor()->ast_properties());
+ result->set_dont_optimize_reason(
+ factory()->visitor()->dont_optimize_reason());
} else if (stack_overflow_) {
isolate()->StackOverflow();
}
@@ -695,10 +702,13 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
FunctionLiteral* Parser::ParseLazy() {
- HistogramTimerScope timer(isolate()->counters()->parse_lazy());
+ HistogramTimerScope timer_scope(isolate()->counters()->parse_lazy());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
- int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
+ ElapsedTimer timer;
+ if (FLAG_trace_parse) {
+ timer.Start();
+ }
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
// Initialize parser state.
@@ -718,7 +728,7 @@ FunctionLiteral* Parser::ParseLazy() {
}
if (FLAG_trace_parse && result != NULL) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms = timer.Elapsed().InMillisecondsF();
SmartArrayPointer<char> name_chars = result->debug_name()->ToCString();
PrintF("[parsing function: %s - took %0.3f ms]\n", *name_chars, ms);
}
@@ -749,6 +759,7 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
zone());
}
+ original_scope_ = scope;
FunctionState function_state(this, scope, isolate());
ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
ASSERT(scope->language_mode() != EXTENDED_MODE ||
@@ -3197,6 +3208,20 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
factory()->NewNumberLiteral(1),
position);
}
+ // The same idea for '-foo' => 'foo*(-1)'.
+ if (op == Token::SUB) {
+ return factory()->NewBinaryOperation(Token::MUL,
+ expression,
+ factory()->NewNumberLiteral(-1),
+ position);
+ }
+ // ...and one more time for '~foo' => 'foo^(~0)'.
+ if (op == Token::BIT_NOT) {
+ return factory()->NewBinaryOperation(Token::BIT_XOR,
+ expression,
+ factory()->NewNumberLiteral(~0),
+ position);
+ }
return factory()->NewUnaryOperation(op, expression, position);
@@ -3721,8 +3746,9 @@ bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
}
-Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
- Factory* factory = Isolate::Current()->factory();
+Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
+ Expression* expression) {
+ Factory* factory = isolate->factory();
ASSERT(IsCompileTimeValue(expression));
Handle<FixedArray> result = factory->NewFixedArray(2, TENURED);
ObjectLiteral* object_literal = expression->AsObjectLiteral();
@@ -3761,7 +3787,7 @@ Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
return expression->AsLiteral()->value();
}
if (CompileTimeValue::IsCompileTimeValue(expression)) {
- return CompileTimeValue::GetValue(expression);
+ return CompileTimeValue::GetValue(isolate(), expression);
}
return isolate()->factory()->uninitialized_value();
}
@@ -4265,10 +4291,38 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Function declarations are function scoped in normal mode, so they are
// hoisted. In harmony block scoping mode they are block scoped, so they
// are not hoisted.
+ //
+ // One tricky case are function declarations in a local sloppy-mode eval:
+ // their declaration is hoisted, but they still see the local scope. E.g.,
+ //
+ // function() {
+ // var x = 0
+ // try { throw 1 } catch (x) { eval("function g() { return x }") }
+ // return g()
+ // }
+ //
+ // needs to return 1. To distinguish such cases, we need to detect
+ // (1) whether a function stems from a sloppy eval, and
+ // (2) whether it actually hoists across the eval.
+ // Unfortunately, we do not represent sloppy eval scopes, so we do not have
+ // either information available directly, especially not when lazily compiling
+ // a function like 'g'. We hence rely on the following invariants:
+ // - (1) is the case iff the innermost scope of the deserialized scope chain
+ // under which we compile is _not_ a declaration scope. This holds because
+ // in all normal cases, function declarations are fully hoisted to a
+ // declaration scope and compiled relative to that.
+ // - (2) is the case iff the current declaration scope is still the original
+ // one relative to the deserialized scope chain. Otherwise we must be
+ // compiling a function in an inner declaration scope in the eval, e.g. a
+ // nested function, and hoisting works normally relative to that.
+ Scope* declaration_scope = top_scope_->DeclarationScope();
+ Scope* original_declaration_scope = original_scope_->DeclarationScope();
Scope* scope =
- (function_type == FunctionLiteral::DECLARATION && !is_extended_mode())
- ? NewScope(top_scope_->DeclarationScope(), FUNCTION_SCOPE)
- : NewScope(top_scope_, FUNCTION_SCOPE);
+ function_type == FunctionLiteral::DECLARATION && !is_extended_mode() &&
+ (original_scope_ == original_declaration_scope ||
+ declaration_scope != original_declaration_scope)
+ ? NewScope(declaration_scope, FUNCTION_SCOPE)
+ : NewScope(top_scope_, FUNCTION_SCOPE);
ZoneList<Statement*>* body = NULL;
int materialized_literal_count = -1;
int expected_property_count = -1;
@@ -4282,6 +4336,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
? FunctionLiteral::kIsGenerator
: FunctionLiteral::kNotGenerator;
AstProperties ast_properties;
+ BailoutReason dont_optimize_reason = kNoReason;
// Parse function body.
{ FunctionState function_state(this, scope, isolate());
top_scope_->SetScopeName(function_name);
@@ -4541,6 +4596,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
CHECK_OK);
}
ast_properties = *factory()->visitor()->ast_properties();
+ dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
}
if (is_extended_mode()) {
@@ -4562,6 +4618,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
generator);
function_literal->set_function_token_position(function_token_position);
function_literal->set_ast_properties(&ast_properties);
+ function_literal->set_dont_optimize_reason(dont_optimize_reason);
if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
return function_literal;
@@ -4972,7 +5029,7 @@ RegExpParser::RegExpParser(FlatStringReader* in,
Handle<String>* error,
bool multiline,
Zone* zone)
- : isolate_(Isolate::Current()),
+ : isolate_(zone->isolate()),
zone_(zone),
error_(error),
captures_(NULL),
@@ -5844,9 +5901,9 @@ int ScriptDataImpl::ReadNumber(byte** source) {
// Create a Scanner for the preparser to use as input, and preparse the source.
-ScriptDataImpl* PreParserApi::PreParse(Utf16CharacterStream* source) {
+ScriptDataImpl* PreParserApi::PreParse(Isolate* isolate,
+ Utf16CharacterStream* source) {
CompleteParserRecorder recorder;
- Isolate* isolate = Isolate::Current();
HistogramTimerScope timer(isolate->counters()->pre_parse());
Scanner scanner(isolate->unicode_cache());
intptr_t stack_limit = isolate->stack_guard()->real_climit();
diff --git a/chromium/v8/src/parser.h b/chromium/v8/src/parser.h
index 68a74b78a9a..783626ad190 100644
--- a/chromium/v8/src/parser.h
+++ b/chromium/v8/src/parser.h
@@ -170,7 +170,8 @@ class PreParserApi {
// This interface is here instead of in preparser.h because it instantiates a
// preparser recorder object that is suited to the parser's purposes. Also,
// the preparser doesn't know about ScriptDataImpl.
- static ScriptDataImpl* PreParse(Utf16CharacterStream* source);
+ static ScriptDataImpl* PreParse(Isolate* isolate,
+ Utf16CharacterStream* source);
};
@@ -855,6 +856,7 @@ class Parser BASE_EMBEDDED {
Scanner scanner_;
preparser::PreParser* reusable_preparser_;
Scope* top_scope_;
+ Scope* original_scope_; // for ES5 function declarations in sloppy eval
FunctionState* current_function_state_;
Target* target_stack_; // for break, continue statements
v8::Extension* extension_;
@@ -893,7 +895,7 @@ class CompileTimeValue: public AllStatic {
static bool IsCompileTimeValue(Expression* expression);
// Get the value as a compile time value.
- static Handle<FixedArray> GetValue(Expression* expression);
+ static Handle<FixedArray> GetValue(Isolate* isolate, Expression* expression);
// Get the type of a compile time value returned by GetValue().
static LiteralType GetLiteralType(Handle<FixedArray> value);
diff --git a/chromium/v8/src/platform-cygwin.cc b/chromium/v8/src/platform-cygwin.cc
index 4c7b0175923..4d3b1e313e6 100644
--- a/chromium/v8/src/platform-cygwin.cc
+++ b/chromium/v8/src/platform-cygwin.cc
@@ -52,9 +52,6 @@ namespace v8 {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -76,31 +73,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -108,11 +80,10 @@ void* OS::Allocate(const size_t requested,
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
@@ -170,7 +141,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@@ -181,7 +152,6 @@ void OS::LogSharedLibraryAddresses() {
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
- i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
@@ -265,8 +235,9 @@ static void* GetRandomAddr() {
static const intptr_t kAllocationRandomAddressMin = 0x04000000;
static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif
- uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits)
- | kAllocationRandomAddressMin;
+ uintptr_t address =
+ (isolate->random_number_generator()->NextInt() << kPageSizeBits) |
+ kAllocationRandomAddressMin;
address &= kAllocationRandomAddressMax;
return reinterpret_cast<void *>(address);
}
@@ -365,8 +336,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
return true;
}
@@ -375,7 +344,7 @@ bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
- PAGE_READONLY | PAGE_GUARD)) {
+ PAGE_NOACCESS)) {
return false;
}
return true;
@@ -397,87 +366,4 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-
-class CygwinSemaphore : public Semaphore {
- public:
- explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~CygwinSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void CygwinSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool CygwinSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new CygwinSemaphore(count);
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/platform-freebsd.cc b/chromium/v8/src/platform-freebsd.cc
index e0917fa567a..d81827805a4 100644
--- a/chromium/v8/src/platform-freebsd.cc
+++ b/chromium/v8/src/platform-freebsd.cc
@@ -63,9 +63,6 @@ namespace v8 {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -84,31 +81,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool executable) {
@@ -117,11 +89,10 @@ void* OS::Allocate(const size_t requested,
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
@@ -184,7 +155,7 @@ static unsigned StringToLong(char* buffer) {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
static const int MAP_LENGTH = 1024;
int fd = open("/proc/self/maps", O_RDONLY);
if (fd < 0) return;
@@ -218,7 +189,7 @@ void OS::LogSharedLibraryAddresses() {
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
- LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
+ LOG(isolate SharedLibraryEvent(start_of_path, start, end));
}
close(fd);
}
@@ -345,8 +316,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
@@ -371,78 +340,4 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-
-class FreeBSDSemaphore : public Semaphore {
- public:
- explicit FreeBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~FreeBSDSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void FreeBSDSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-bool FreeBSDSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new FreeBSDSemaphore(count);
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/platform-linux.cc b/chromium/v8/src/platform-linux.cc
index 5c252bbf884..b8b96025e1f 100644
--- a/chromium/v8/src/platform-linux.cc
+++ b/chromium/v8/src/platform-linux.cc
@@ -76,143 +76,7 @@ namespace v8 {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
#ifdef __arm__
-static bool CPUInfoContainsString(const char * search_string) {
- const char* file_name = "/proc/cpuinfo";
- // This is written as a straight shot one pass parser
- // and not using STL string and ifstream because,
- // on Linux, it's reading from a (non-mmap-able)
- // character special device.
- FILE* f = NULL;
- const char* what = search_string;
-
- if (NULL == (f = fopen(file_name, "r"))) {
- OS::PrintError("Failed to open /proc/cpuinfo\n");
- return false;
- }
-
- int k;
- while (EOF != (k = fgetc(f))) {
- if (k == *what) {
- ++what;
- while ((*what != '\0') && (*what == fgetc(f))) {
- ++what;
- }
- if (*what == '\0') {
- fclose(f);
- return true;
- } else {
- what = search_string;
- }
- }
- }
- fclose(f);
-
- // Did not find string in the proc file.
- return false;
-}
-
-
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- const char* search_string = NULL;
- // Simple detection of VFP at runtime for Linux.
- // It is based on /proc/cpuinfo, which reveals hardware configuration
- // to user-space applications. According to ARM (mid 2009), no similar
- // facility is universally available on the ARM architectures,
- // so it's up to individual OSes to provide such.
- switch (feature) {
- case VFP3:
- search_string = "vfpv3";
- break;
- case NEON:
- search_string = "neon";
- break;
- case ARMv7:
- search_string = "ARMv7";
- break;
- case SUDIV:
- search_string = "idiva";
- break;
- case VFP32DREGS:
- // This case is handled specially below.
- break;
- default:
- UNREACHABLE();
- }
-
- if (feature == VFP32DREGS) {
- return ArmCpuHasFeature(VFP3) && !CPUInfoContainsString("d16");
- }
-
- if (CPUInfoContainsString(search_string)) {
- return true;
- }
-
- if (feature == VFP3) {
- // Some old kernels will report vfp not vfpv3. Here we make a last attempt
- // to detect vfpv3 by checking for vfp *and* neon, since neon is only
- // available on architectures with vfpv3.
- // Checking neon on its own is not enough as it is possible to have neon
- // without vfp.
- if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
- return true;
- }
- }
-
- return false;
-}
-
-
-CpuImplementer OS::GetCpuImplementer() {
- static bool use_cached_value = false;
- static CpuImplementer cached_value = UNKNOWN_IMPLEMENTER;
- if (use_cached_value) {
- return cached_value;
- }
- if (CPUInfoContainsString("CPU implementer\t: 0x41")) {
- cached_value = ARM_IMPLEMENTER;
- } else if (CPUInfoContainsString("CPU implementer\t: 0x51")) {
- cached_value = QUALCOMM_IMPLEMENTER;
- } else {
- cached_value = UNKNOWN_IMPLEMENTER;
- }
- use_cached_value = true;
- return cached_value;
-}
-
-
-CpuPart OS::GetCpuPart(CpuImplementer implementer) {
- static bool use_cached_value = false;
- static CpuPart cached_value = CPU_UNKNOWN;
- if (use_cached_value) {
- return cached_value;
- }
- if (implementer == ARM_IMPLEMENTER) {
- if (CPUInfoContainsString("CPU part\t: 0xc0f")) {
- cached_value = CORTEX_A15;
- } else if (CPUInfoContainsString("CPU part\t: 0xc0c")) {
- cached_value = CORTEX_A12;
- } else if (CPUInfoContainsString("CPU part\t: 0xc09")) {
- cached_value = CORTEX_A9;
- } else if (CPUInfoContainsString("CPU part\t: 0xc08")) {
- cached_value = CORTEX_A8;
- } else if (CPUInfoContainsString("CPU part\t: 0xc07")) {
- cached_value = CORTEX_A7;
- } else if (CPUInfoContainsString("CPU part\t: 0xc05")) {
- cached_value = CORTEX_A5;
- } else {
- cached_value = CPU_UNKNOWN;
- }
- } else {
- cached_value = CPU_UNKNOWN;
- }
- use_cached_value = true;
- return cached_value;
-}
-
bool OS::ArmUsingHardFloat() {
// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
@@ -239,7 +103,8 @@ bool OS::ArmUsingHardFloat() {
#else
#if defined(__ARM_PCS_VFP)
return true;
-#elif defined(__ARM_PCS) || defined(__SOFTFP) || !defined(__VFP_FP__)
+#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
+ !defined(__VFP_FP__)
return false;
#else
#error "Your version of GCC does not report the FP ABI compiled for." \
@@ -254,60 +119,6 @@ bool OS::ArmUsingHardFloat() {
#endif // def __arm__
-#ifdef __mips__
-bool OS::MipsCpuHasFeature(CpuFeature feature) {
- const char* search_string = NULL;
- const char* file_name = "/proc/cpuinfo";
- // Simple detection of FPU at runtime for Linux.
- // It is based on /proc/cpuinfo, which reveals hardware configuration
- // to user-space applications. According to MIPS (early 2010), no similar
- // facility is universally available on the MIPS architectures,
- // so it's up to individual OSes to provide such.
- //
- // This is written as a straight shot one pass parser
- // and not using STL string and ifstream because,
- // on Linux, it's reading from a (non-mmap-able)
- // character special device.
-
- switch (feature) {
- case FPU:
- search_string = "FPU";
- break;
- default:
- UNREACHABLE();
- }
-
- FILE* f = NULL;
- const char* what = search_string;
-
- if (NULL == (f = fopen(file_name, "r"))) {
- OS::PrintError("Failed to open /proc/cpuinfo\n");
- return false;
- }
-
- int k;
- while (EOF != (k = fgetc(f))) {
- if (k == *what) {
- ++what;
- while ((*what != '\0') && (*what == fgetc(f))) {
- ++what;
- }
- if (*what == '\0') {
- fclose(f);
- return true;
- } else {
- what = search_string;
- }
- }
- }
- fclose(f);
-
- // Did not find string in the proc file.
- return false;
-}
-#endif // def __mips__
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -326,31 +137,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -364,7 +150,6 @@ void* OS::Allocate(const size_t requested,
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
@@ -435,7 +220,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@@ -446,7 +231,6 @@ void OS::LogSharedLibraryAddresses() {
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
- i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
@@ -658,7 +442,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
return false;
}
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
@@ -682,88 +465,4 @@ bool VirtualMemory::HasLazyCommits() {
return true;
}
-
-class LinuxSemaphore : public Semaphore {
- public:
- explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void LinuxSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool LinuxSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result > 0) {
- // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
- errno = result;
- result = -1;
- }
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new LinuxSemaphore(count);
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/platform-macos.cc b/chromium/v8/src/platform-macos.cc
index 6135cd13740..67cc96f9379 100644
--- a/chromium/v8/src/platform-macos.cc
+++ b/chromium/v8/src/platform-macos.cc
@@ -79,34 +79,6 @@ namespace v8 {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
// Constants used for mmap.
// kMmapFd is used to pass vm_alloc flags to tag the region with the user
// defined tag 255 This helps identify V8-allocated regions in memory analysis
@@ -131,7 +103,6 @@ void* OS::Allocate(const size_t requested,
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
@@ -202,7 +173,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
unsigned int images_count = _dyld_image_count();
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
@@ -221,7 +192,7 @@ void OS::LogSharedLibraryAddresses() {
if (code_ptr == NULL) continue;
const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
- LOG(Isolate::Current(),
+ LOG(isolate,
SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
}
}
@@ -366,8 +337,6 @@ bool VirtualMemory::CommitRegion(void* address,
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(address, size);
return true;
}
@@ -391,65 +360,4 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-
-class MacOSSemaphore : public Semaphore {
- public:
- explicit MacOSSemaphore(int count) {
- int r;
- r = semaphore_create(mach_task_self(),
- &semaphore_,
- SYNC_POLICY_FIFO,
- count);
- ASSERT(r == KERN_SUCCESS);
- }
-
- ~MacOSSemaphore() {
- int r;
- r = semaphore_destroy(mach_task_self(), semaphore_);
- ASSERT(r == KERN_SUCCESS);
- }
-
- void Wait() {
- int r;
- do {
- r = semaphore_wait(semaphore_);
- ASSERT(r == KERN_SUCCESS || r == KERN_ABORTED);
- } while (r == KERN_ABORTED);
- }
-
- bool Wait(int timeout);
-
- void Signal() { semaphore_signal(semaphore_); }
-
- private:
- semaphore_t semaphore_;
-};
-
-
-bool MacOSSemaphore::Wait(int timeout) {
- mach_timespec_t ts;
- ts.tv_sec = timeout / 1000000;
- ts.tv_nsec = (timeout % 1000000) * 1000;
- return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new MacOSSemaphore(count);
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/platform-nullos.cc b/chromium/v8/src/platform-nullos.cc
deleted file mode 100644
index dd5a3ddb32f..00000000000
--- a/chromium/v8/src/platform-nullos.cc
+++ /dev/null
@@ -1,573 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for NULLOS goes here
-
-// Minimal include to get access to abort, fprintf and friends for bootstrapping
-// messages.
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "platform.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// Give V8 the opportunity to override the default ceil behaviour.
-double ceiling(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Give V8 the opportunity to override the default fmod behavior.
-double modulo(double x, double y) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_sin(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_cos(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_tan(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-double fast_log(double x) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Initialize OS class early in the V8 startup.
-void OS::SetUp() {
- // Seed the random number generator.
- UNIMPLEMENTED();
-}
-
-
-void OS::PostSetUp() {
- UNIMPLEMENTED();
-}
-
-
-void OS::TearDown() {
- UNIMPLEMENTED();
-}
-
-
-// Returns the accumulated user time for thread.
-int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
- UNIMPLEMENTED();
- *secs = 0;
- *usecs = 0;
- return 0;
-}
-
-
-// Returns current time as the number of milliseconds since
-// 00:00:00 UTC, January 1, 1970.
-double OS::TimeCurrentMillis() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns ticks in microsecond resolution.
-int64_t OS::Ticks() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns a string identifying the current timezone taking into
-// account daylight saving.
-const char* OS::LocalTimezone(double time) {
- UNIMPLEMENTED();
- return "<none>";
-}
-
-
-// Returns the daylight savings offset in milliseconds for the given time.
-double OS::DaylightSavingsOffset(double time) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-int OS::GetLastError() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Returns the local time offset in milliseconds east of UTC without
-// taking daylight savings time into account.
-double OS::LocalTimeOffset() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-// Print (debug) message to console.
-void OS::Print(const char* format, ...) {
- UNIMPLEMENTED();
-}
-
-
-// Print (debug) message to console.
-void OS::VPrint(const char* format, va_list args) {
- // Minimalistic implementation for bootstrapping.
- vfprintf(stdout, format, args);
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
- va_list args;
- va_start(args, format);
- VFPrint(out, format, args);
- va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
- vfprintf(out, format, args);
-}
-
-
-// Print error message to console.
-void OS::PrintError(const char* format, ...) {
- // Minimalistic implementation for bootstrapping.
- va_list args;
- va_start(args, format);
- VPrintError(format, args);
- va_end(args);
-}
-
-
-// Print error message to console.
-void OS::VPrintError(const char* format, va_list args) {
- // Minimalistic implementation for bootstrapping.
- vfprintf(stderr, format, args);
-}
-
-
-int OS::SNPrintF(char* str, size_t size, const char* format, ...) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0;
-}
-
-
-double OS::nan_value() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-CpuImplementer OS::GetCpuImplementer() {
- UNIMPLEMENTED();
-}
-
-
-CpuPart OS::GetCpuPart(CpuImplementer implementer) {
- UNIMPLEMENTED();
-}
-
-
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
- UNIMPLEMENTED();
-}
-
-
-bool OS::ArmUsingHardFloat() {
- UNIMPLEMENTED();
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-size_t OS::AllocateAlignment() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool executable) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void OS::Free(void* buf, const size_t length) {
- // TODO(1240712): potential system call return value which is ignored here.
- UNIMPLEMENTED();
-}
-
-
-void OS::Guard(void* address, const size_t size) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Sleep(int milliseconds) {
- UNIMPLEMENTED();
-}
-
-
-int OS::NumberOfCores() {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-void OS::Abort() {
- // Minimalistic implementation for bootstrapping.
- abort();
-}
-
-
-void OS::DebugBreak() {
- UNIMPLEMENTED();
-}
-
-
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
- void* initial) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void OS::LogSharedLibraryAddresses() {
- UNIMPLEMENTED();
-}
-
-
-void OS::SignalCodeMovingGC() {
- UNIMPLEMENTED();
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-VirtualMemory::VirtualMemory() {
- UNIMPLEMENTED();
-}
-
-
-VirtualMemory::VirtualMemory(size_t size) {
- UNIMPLEMENTED();
-}
-
-
-VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
- UNIMPLEMENTED();
-}
-
-
-VirtualMemory::~VirtualMemory() {
- UNIMPLEMENTED();
-}
-
-
-bool VirtualMemory::IsReserved() {
- UNIMPLEMENTED();
- return false;
-}
-
-
-void VirtualMemory::Reset() {
- UNIMPLEMENTED();
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
- UNIMPLEMENTED();
- return false;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
- PlatformData() {
- UNIMPLEMENTED();
- }
-
- void* pd_data_;
-};
-
-
-Thread::Thread(const Options& options)
- : data_(new PlatformData()),
- stack_size_(options.stack_size),
- start_semaphore_(NULL) {
- set_name(options.name);
- UNIMPLEMENTED();
-}
-
-
-Thread::Thread(const char* name)
- : data_(new PlatformData()),
- stack_size_(0) {
- set_name(name);
- UNIMPLEMENTED();
-}
-
-
-Thread::~Thread() {
- delete data_;
- UNIMPLEMENTED();
-}
-
-
-void Thread::set_name(const char* name) {
- strncpy(name_, name, sizeof(name_));
- name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
- UNIMPLEMENTED();
-}
-
-
-void Thread::Join() {
- UNIMPLEMENTED();
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
- UNIMPLEMENTED();
- return static_cast<LocalStorageKey>(0);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- UNIMPLEMENTED();
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- UNIMPLEMENTED();
-}
-
-
-void Thread::YieldCPU() {
- UNIMPLEMENTED();
-}
-
-
-class NullMutex : public Mutex {
- public:
- NullMutex() : data_(NULL) {
- UNIMPLEMENTED();
- }
-
- virtual ~NullMutex() {
- UNIMPLEMENTED();
- }
-
- virtual int Lock() {
- UNIMPLEMENTED();
- return 0;
- }
-
- virtual int Unlock() {
- UNIMPLEMENTED();
- return 0;
- }
-
- private:
- void* data_;
-};
-
-
-Mutex* OS::CreateMutex() {
- UNIMPLEMENTED();
- return new NullMutex();
-}
-
-
-class NullSemaphore : public Semaphore {
- public:
- explicit NullSemaphore(int count) : data_(NULL) {
- UNIMPLEMENTED();
- }
-
- virtual ~NullSemaphore() {
- UNIMPLEMENTED();
- }
-
- virtual void Wait() {
- UNIMPLEMENTED();
- }
-
- virtual void Signal() {
- UNIMPLEMENTED();
- }
- private:
- void* data_;
-};
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- UNIMPLEMENTED();
- return new NullSemaphore(count);
-}
-
-
-class ProfileSampler::PlatformData : public Malloced {
- public:
- PlatformData() {
- UNIMPLEMENTED();
- }
-};
-
-
-ProfileSampler::ProfileSampler(int interval) {
- UNIMPLEMENTED();
- // Shared setup follows.
- data_ = new PlatformData();
- interval_ = interval;
- active_ = false;
-}
-
-
-ProfileSampler::~ProfileSampler() {
- UNIMPLEMENTED();
- // Shared tear down follows.
- delete data_;
-}
-
-
-void ProfileSampler::Start() {
- UNIMPLEMENTED();
-}
-
-
-void ProfileSampler::Stop() {
- UNIMPLEMENTED();
-}
-
-
-} } // namespace v8::internal
diff --git a/chromium/v8/src/platform-openbsd.cc b/chromium/v8/src/platform-openbsd.cc
index e59160109f0..30a484f4b30 100644
--- a/chromium/v8/src/platform-openbsd.cc
+++ b/chromium/v8/src/platform-openbsd.cc
@@ -61,9 +61,6 @@ namespace v8 {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -82,31 +79,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -120,7 +92,6 @@ void* OS::Allocate(const size_t requested,
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
@@ -178,7 +149,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@@ -189,7 +160,6 @@ void OS::LogSharedLibraryAddresses() {
const int kLibNameLen = FILENAME_MAX + 1;
char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
- i::Isolate* isolate = ISOLATE;
// This loop will terminate once the scanning hits an EOF.
while (true) {
uintptr_t start, end;
@@ -402,8 +372,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
@@ -428,87 +396,4 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-
-class OpenBSDSemaphore : public Semaphore {
- public:
- explicit OpenBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~OpenBSDSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void OpenBSDSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-bool OpenBSDSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
-
- int to = ts.tv_sec;
-
- while (true) {
- int result = sem_trywait(&sem_);
- if (result == 0) return true; // Successfully got semaphore.
- if (!to) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- usleep(ts.tv_nsec / 1000);
- to--;
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new OpenBSDSemaphore(count);
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/platform-posix.cc b/chromium/v8/src/platform-posix.cc
index 13b819bd1e7..fe27eaf71f4 100644
--- a/chromium/v8/src/platform-posix.cc
+++ b/chromium/v8/src/platform-posix.cc
@@ -69,6 +69,7 @@
#include "v8.h"
#include "codegen.h"
+#include "isolate-inl.h"
#include "platform.h"
namespace v8 {
@@ -79,11 +80,11 @@ static const pthread_t kNoThread = (pthread_t) 0;
uint64_t OS::CpuFeaturesImpliedByPlatform() {
-#if defined(__APPLE__)
+#if V8_OS_MACOSX
// Mac OS X requires all these to install so we can assume they are present.
// These constants are defined by the CPUid instructions.
const uint64_t one = 1;
- return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
+ return (one << SSE2) | (one << CMOV);
#else
return 0; // Nothing special about the other systems.
#endif
@@ -152,7 +153,7 @@ void OS::ProtectCode(void* address, const size_t size) {
void OS::Guard(void* address, const size_t size) {
#if defined(__CYGWIN__)
DWORD oldprotect;
- VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
+ VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
#else
mprotect(address, size, PROT_NONE);
#endif
@@ -171,17 +172,14 @@ void* OS::GetRandomMmapAddr() {
// CpuFeatures::Probe. We don't care about randomization in this case because
// the code page is immediately freed.
if (isolate != NULL) {
+ uintptr_t raw_addr;
+ isolate->random_number_generator()->NextBytes(&raw_addr, sizeof(raw_addr));
#if V8_TARGET_ARCH_X64
- uint64_t rnd1 = V8::RandomPrivate(isolate);
- uint64_t rnd2 = V8::RandomPrivate(isolate);
- uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
// Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
- uint32_t raw_addr = V8::RandomPrivate(isolate);
-
raw_addr &= 0x3ffff000;
# ifdef __sun
@@ -219,11 +217,6 @@ void OS::Sleep(int milliseconds) {
}
-int OS::NumberOfCores() {
- return sysconf(_SC_NPROCESSORS_ONLN);
-}
-
-
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
if (FLAG_break_on_abort) {
@@ -318,19 +311,7 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
double OS::TimeCurrentMillis() {
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0) return 0.0;
- return (static_cast<double>(tv.tv_sec) * 1000) +
- (static_cast<double>(tv.tv_usec) / 1000);
-}
-
-
-int64_t OS::Ticks() {
- // gettimeofday has microsecond resolution.
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0)
- return 0;
- return (static_cast<int64_t>(tv.tv_sec) * 1000000) + tv.tv_usec;
+ return Time::Now().ToJsTime();
}
@@ -756,244 +737,4 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
}
-class POSIXMutex : public Mutex {
- public:
- POSIXMutex() {
- pthread_mutexattr_t attr;
- memset(&attr, 0, sizeof(attr));
- int result = pthread_mutexattr_init(&attr);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attr);
- ASSERT(result == 0);
- result = pthread_mutexattr_destroy(&attr);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~POSIXMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() { return pthread_mutex_lock(&mutex_); }
-
- virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new POSIXMutex();
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX socket support.
-//
-
-class POSIXSocket : public Socket {
- public:
- explicit POSIXSocket() {
- // Create the socket.
- socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- if (IsValid()) {
- // Allow rapid reuse.
- static const int kOn = 1;
- int ret = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
- &kOn, sizeof(kOn));
- ASSERT(ret == 0);
- USE(ret);
- }
- }
- explicit POSIXSocket(int socket): socket_(socket) { }
- virtual ~POSIXSocket() { Shutdown(); }
-
- // Server initialization.
- bool Bind(const int port);
- bool Listen(int backlog) const;
- Socket* Accept() const;
-
- // Client initialization.
- bool Connect(const char* host, const char* port);
-
- // Shutdown socket for both read and write.
- bool Shutdown();
-
- // Data Transimission
- int Send(const char* data, int len) const;
- int Receive(char* data, int len) const;
-
- bool SetReuseAddress(bool reuse_address);
-
- bool IsValid() const { return socket_ != -1; }
-
- private:
- int socket_;
-};
-
-
-bool POSIXSocket::Bind(const int port) {
- if (!IsValid()) {
- return false;
- }
-
- sockaddr_in addr;
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr.sin_port = htons(port);
- int status = bind(socket_,
- BitCast<struct sockaddr *>(&addr),
- sizeof(addr));
- return status == 0;
-}
-
-
-bool POSIXSocket::Listen(int backlog) const {
- if (!IsValid()) {
- return false;
- }
-
- int status = listen(socket_, backlog);
- return status == 0;
-}
-
-
-Socket* POSIXSocket::Accept() const {
- if (!IsValid()) {
- return NULL;
- }
-
- int socket;
- do {
- socket = accept(socket_, NULL, NULL);
- } while (socket == -1 && errno == EINTR);
-
- if (socket == -1) {
- return NULL;
- } else {
- return new POSIXSocket(socket);
- }
-}
-
-
-bool POSIXSocket::Connect(const char* host, const char* port) {
- if (!IsValid()) {
- return false;
- }
-
- // Lookup host and port.
- struct addrinfo *result = NULL;
- struct addrinfo hints;
- memset(&hints, 0, sizeof(addrinfo));
- hints.ai_family = AF_INET;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_protocol = IPPROTO_TCP;
- int status = getaddrinfo(host, port, &hints, &result);
- if (status != 0) {
- return false;
- }
-
- // Connect.
- do {
- status = connect(socket_, result->ai_addr, result->ai_addrlen);
- } while (status == -1 && errno == EINTR);
- freeaddrinfo(result);
- return status == 0;
-}
-
-
-bool POSIXSocket::Shutdown() {
- if (IsValid()) {
- // Shutdown socket for both read and write.
- int status = shutdown(socket_, SHUT_RDWR);
- close(socket_);
- socket_ = -1;
- return status == 0;
- }
- return true;
-}
-
-
-int POSIXSocket::Send(const char* data, int len) const {
- if (len <= 0) return 0;
- int written = 0;
- while (written < len) {
- int status = send(socket_, data + written, len - written, 0);
- if (status == 0) {
- break;
- } else if (status > 0) {
- written += status;
- } else if (errno != EINTR) {
- return 0;
- }
- }
- return written;
-}
-
-
-int POSIXSocket::Receive(char* data, int len) const {
- if (len <= 0) return 0;
- int status;
- do {
- status = recv(socket_, data, len, 0);
- } while (status == -1 && errno == EINTR);
- return (status < 0) ? 0 : status;
-}
-
-
-bool POSIXSocket::SetReuseAddress(bool reuse_address) {
- int on = reuse_address ? 1 : 0;
- int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
- return status == 0;
-}
-
-
-bool Socket::SetUp() {
- // Nothing to do on POSIX.
- return true;
-}
-
-
-int Socket::LastError() {
- return errno;
-}
-
-
-uint16_t Socket::HToN(uint16_t value) {
- return htons(value);
-}
-
-
-uint16_t Socket::NToH(uint16_t value) {
- return ntohs(value);
-}
-
-
-uint32_t Socket::HToN(uint32_t value) {
- return htonl(value);
-}
-
-
-uint32_t Socket::NToH(uint32_t value) {
- return ntohl(value);
-}
-
-
-Socket* OS::CreateSocket() {
- return new POSIXSocket();
-}
-
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/platform-solaris.cc b/chromium/v8/src/platform-solaris.cc
index b1d88af2939..f082af12540 100644
--- a/chromium/v8/src/platform-solaris.cc
+++ b/chromium/v8/src/platform-solaris.cc
@@ -81,9 +81,6 @@ namespace v8 {
namespace internal {
-static Mutex* limit_mutex = NULL;
-
-
const char* OS::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -99,31 +96,6 @@ double OS::LocalTimeOffset() {
}
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
- return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
@@ -132,11 +104,10 @@ void* OS::Allocate(const size_t requested,
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
@@ -194,7 +165,7 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
}
@@ -366,8 +337,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
kMmapFdOffset)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, size);
return true;
}
@@ -392,100 +361,4 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-
-class SolarisSemaphore : public Semaphore {
- public:
- explicit SolarisSemaphore(int count) { sem_init(&sem_, 0, count); }
- virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
-
- virtual void Wait();
- virtual bool Wait(int timeout);
- virtual void Signal() { sem_post(&sem_); }
- private:
- sem_t sem_;
-};
-
-
-void SolarisSemaphore::Wait() {
- while (true) {
- int result = sem_wait(&sem_);
- if (result == 0) return; // Successfully got semaphore.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
- (ts)->tv_sec = (tv)->tv_sec; \
- (ts)->tv_nsec = (tv)->tv_usec * 1000; \
-} while (false)
-#endif
-
-
-#ifndef timeradd
-#define timeradd(a, b, result) \
- do { \
- (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
- (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
- if ((result)->tv_usec >= 1000000) { \
- ++(result)->tv_sec; \
- (result)->tv_usec -= 1000000; \
- } \
- } while (0)
-#endif
-
-
-bool SolarisSemaphore::Wait(int timeout) {
- const long kOneSecondMicros = 1000000; // NOLINT
-
- // Split timeout into second and nanosecond parts.
- struct timeval delta;
- delta.tv_usec = timeout % kOneSecondMicros;
- delta.tv_sec = timeout / kOneSecondMicros;
-
- struct timeval current_time;
- // Get the current time.
- if (gettimeofday(&current_time, NULL) == -1) {
- return false;
- }
-
- // Calculate time for end of timeout.
- struct timeval end_time;
- timeradd(&current_time, &delta, &end_time);
-
- struct timespec ts;
- TIMEVAL_TO_TIMESPEC(&end_time, &ts);
- // Wait for semaphore signalled or timeout.
- while (true) {
- int result = sem_timedwait(&sem_, &ts);
- if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
- CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
- }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new SolarisSemaphore(count);
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly will cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/platform-win32.cc b/chromium/v8/src/platform-win32.cc
index 292c24a3dac..ea4f7ea11f4 100644
--- a/chromium/v8/src/platform-win32.cc
+++ b/chromium/v8/src/platform-win32.cc
@@ -38,12 +38,12 @@
#endif // MINGW_HAS_SECURE_API
#endif // __MINGW32__
-#define V8_WIN32_HEADERS_FULL
#include "win32-headers.h"
#include "v8.h"
#include "codegen.h"
+#include "isolate-inl.h"
#include "platform.h"
#include "simulator.h"
#include "vm-state-inl.h"
@@ -125,13 +125,6 @@ int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
#endif // __MINGW32__
-// Generate a pseudo-random number in the range 0-2^31-1. Usually
-// defined in stdlib.h. Missing in both Microsoft Visual Studio C++ and MinGW.
-int random() {
- return rand();
-}
-
-
namespace v8 {
namespace internal {
@@ -145,8 +138,6 @@ double ceiling(double x) {
}
-static Mutex* limit_mutex = NULL;
-
#if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
@@ -246,19 +237,15 @@ void MathSetup() {
// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
// January 1, 1970.
-class Time {
+class Win32Time {
public:
// Constructors.
- Time();
- explicit Time(double jstime);
- Time(int year, int mon, int day, int hour, int min, int sec);
+ explicit Win32Time(double jstime);
+ Win32Time(int year, int mon, int day, int hour, int min, int sec);
// Convert timestamp to JavaScript representation.
double ToJSTime();
- // Set timestamp to current time.
- void SetToCurrentTime();
-
// Returns the local timezone offset in milliseconds east of UTC. This is
// the number of milliseconds you must add to UTC to get local time, i.e.
// LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
@@ -300,10 +287,6 @@ class Time {
// Return whether or not daylight savings time is in effect at this time.
bool InDST();
- // Return the difference (in milliseconds) between this timestamp and
- // another timestamp.
- int64_t Diff(Time* other);
-
// Accessor for FILETIME representation.
FILETIME& ft() { return time_.ft_; }
@@ -325,26 +308,20 @@ class Time {
// Static variables.
-bool Time::tz_initialized_ = false;
-TIME_ZONE_INFORMATION Time::tzinfo_;
-char Time::std_tz_name_[kTzNameSize];
-char Time::dst_tz_name_[kTzNameSize];
-
-
-// Initialize timestamp to start of epoc.
-Time::Time() {
- t() = 0;
-}
+bool Win32Time::tz_initialized_ = false;
+TIME_ZONE_INFORMATION Win32Time::tzinfo_;
+char Win32Time::std_tz_name_[kTzNameSize];
+char Win32Time::dst_tz_name_[kTzNameSize];
// Initialize timestamp from a JavaScript timestamp.
-Time::Time(double jstime) {
+Win32Time::Win32Time(double jstime) {
t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
}
// Initialize timestamp from date/time components.
-Time::Time(int year, int mon, int day, int hour, int min, int sec) {
+Win32Time::Win32Time(int year, int mon, int day, int hour, int min, int sec) {
SYSTEMTIME st;
st.wYear = year;
st.wMonth = mon;
@@ -358,14 +335,14 @@ Time::Time(int year, int mon, int day, int hour, int min, int sec) {
// Convert timestamp to JavaScript timestamp.
-double Time::ToJSTime() {
+double Win32Time::ToJSTime() {
return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
}
// Guess the name of the timezone from the bias.
// The guess is very biased towards the northern hemisphere.
-const char* Time::GuessTimezoneNameFromBias(int bias) {
+const char* Win32Time::GuessTimezoneNameFromBias(int bias) {
static const int kHour = 60;
switch (-bias) {
case -9*kHour: return "Alaska";
@@ -390,7 +367,7 @@ const char* Time::GuessTimezoneNameFromBias(int bias) {
// Initialize timezone information. The timezone information is obtained from
// windows. If we cannot get the timezone information we fall back to CET.
// Please notice that this code is not thread-safe.
-void Time::TzSet() {
+void Win32Time::TzSet() {
// Just return if timezone information has already been initialized.
if (tz_initialized_) return;
@@ -439,78 +416,16 @@ void Time::TzSet() {
}
-// Return the difference in milliseconds between this and another timestamp.
-int64_t Time::Diff(Time* other) {
- return (t() - other->t()) / kTimeScaler;
-}
-
-
-// Set timestamp to current time.
-void Time::SetToCurrentTime() {
- // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
- // Because we're fast, we like fast timers which have at least a
- // 1ms resolution.
- //
- // timeGetTime() provides 1ms granularity when combined with
- // timeBeginPeriod(). If the host application for v8 wants fast
- // timers, it can use timeBeginPeriod to increase the resolution.
- //
- // Using timeGetTime() has a drawback because it is a 32bit value
- // and hence rolls-over every ~49days.
- //
- // To use the clock, we use GetSystemTimeAsFileTime as our base;
- // and then use timeGetTime to extrapolate current time from the
- // start time. To deal with rollovers, we resync the clock
- // any time when more than kMaxClockElapsedTime has passed or
- // whenever timeGetTime creates a rollover.
-
- static bool initialized = false;
- static TimeStamp init_time;
- static DWORD init_ticks;
- static const int64_t kHundredNanosecondsPerSecond = 10000000;
- static const int64_t kMaxClockElapsedTime =
- 60*kHundredNanosecondsPerSecond; // 1 minute
-
- // If we are uninitialized, we need to resync the clock.
- bool needs_resync = !initialized;
-
- // Get the current time.
- TimeStamp time_now;
- GetSystemTimeAsFileTime(&time_now.ft_);
- DWORD ticks_now = timeGetTime();
-
- // Check if we need to resync due to clock rollover.
- needs_resync |= ticks_now < init_ticks;
-
- // Check if we need to resync due to elapsed time.
- needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
-
- // Check if we need to resync due to backwards time change.
- needs_resync |= time_now.t_ < init_time.t_;
-
- // Resync the clock if necessary.
- if (needs_resync) {
- GetSystemTimeAsFileTime(&init_time.ft_);
- init_ticks = ticks_now = timeGetTime();
- initialized = true;
- }
-
- // Finally, compute the actual time. Why is this so hard.
- DWORD elapsed = ticks_now - init_ticks;
- this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
-}
-
-
// Return the local timezone offset in milliseconds east of UTC. This
// takes into account whether daylight saving is in effect at the time.
// Only times in the 32-bit Unix range may be passed to this function.
// Also, adding the time-zone offset to the input must not overflow.
// The function EquivalentTime() in date.js guarantees this.
-int64_t Time::LocalOffset() {
+int64_t Win32Time::LocalOffset() {
// Initialize timezone information, if needed.
TzSet();
- Time rounded_to_second(*this);
+ Win32Time rounded_to_second(*this);
rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
1000 * kTimeScaler;
// Convert to local time using POSIX localtime function.
@@ -541,7 +456,7 @@ int64_t Time::LocalOffset() {
// Return whether or not daylight savings time is in effect at this time.
-bool Time::InDST() {
+bool Win32Time::InDST() {
// Initialize timezone information, if needed.
TzSet();
@@ -565,14 +480,14 @@ bool Time::InDST() {
// Return the daylight savings time offset for this time.
-int64_t Time::DaylightSavingsOffset() {
+int64_t Win32Time::DaylightSavingsOffset() {
return InDST() ? 60 * kMsPerMinute : 0;
}
// Returns a string identifying the current timezone for the
// timestamp taking into account daylight saving.
-char* Time::LocalTimezone() {
+char* Win32Time::LocalTimezone() {
// Return the standard or DST time zone name based on whether daylight
// saving is in effect at the given time.
return InDST() ? dst_tz_name_ : std_tz_name_;
@@ -614,22 +529,14 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
// Returns current time as the number of milliseconds since
// 00:00:00 UTC, January 1, 1970.
double OS::TimeCurrentMillis() {
- Time t;
- t.SetToCurrentTime();
- return t.ToJSTime();
-}
-
-
-// Returns the tickcounter based on timeGetTime.
-int64_t OS::Ticks() {
- return timeGetTime() * 1000; // Convert to microseconds.
+ return Time::Now().ToJsTime();
}
// Returns a string identifying the current timezone taking into
// account daylight saving.
const char* OS::LocalTimezone(double time) {
- return Time(time).LocalTimezone();
+ return Win32Time(time).LocalTimezone();
}
@@ -637,7 +544,7 @@ const char* OS::LocalTimezone(double time) {
// taking daylight savings time into account.
double OS::LocalTimeOffset() {
// Use current time, rounded to the millisecond.
- Time t(TimeCurrentMillis());
+ Win32Time t(TimeCurrentMillis());
// Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
return static_cast<double>(t.LocalOffset() - t.DaylightSavingsOffset());
}
@@ -646,7 +553,7 @@ double OS::LocalTimeOffset() {
// Returns the daylight savings offset in milliseconds for the given
// time.
double OS::DaylightSavingsOffset(double time) {
- int64_t offset = Time(time).DaylightSavingsOffset();
+ int64_t offset = Win32Time(time).DaylightSavingsOffset();
return static_cast<double>(offset);
}
@@ -835,35 +742,6 @@ void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) {
#undef _TRUNCATE
#undef STRUNCATE
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, i.e., not all addresses in
-// 'allocated' space are actually allocated to our heap. The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
- ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
-
- lowest_ever_allocated = Min(lowest_ever_allocated, address);
- highest_ever_allocated =
- Max(highest_ever_allocated,
- reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* pointer) {
- if (pointer < lowest_ever_allocated || pointer >= highest_ever_allocated)
- return true;
- // Ask the Windows API
- if (IsBadWritePtr(pointer, 1))
- return true;
- return false;
-}
-
// Get the system's page size used by VirtualAlloc() or the next power
// of two. The reason for always returning a power of two is that the
@@ -910,8 +788,9 @@ void* OS::GetRandomMmapAddr() {
static const intptr_t kAllocationRandomAddressMin = 0x04000000;
static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif
- uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits)
- | kAllocationRandomAddressMin;
+ uintptr_t address =
+ (isolate->random_number_generator()->NextInt() << kPageSizeBits) |
+ kAllocationRandomAddressMin;
address &= kAllocationRandomAddressMax;
return reinterpret_cast<void *>(address);
}
@@ -950,14 +829,13 @@ void* OS::Allocate(const size_t requested,
prot);
if (mbase == NULL) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed"));
+ LOG(Isolate::Current(), StringEvent("OS::Allocate", "VirtualAlloc failed"));
return NULL;
}
ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize));
return mbase;
}
@@ -982,7 +860,7 @@ void OS::ProtectCode(void* address, const size_t size) {
void OS::Guard(void* address, const size_t size) {
DWORD oldprotect;
- VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
+ VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
}
@@ -991,13 +869,6 @@ void OS::Sleep(int milliseconds) {
}
-int OS::NumberOfCores() {
- SYSTEM_INFO info;
- GetSystemInfo(&info);
- return info.dwNumberOfProcessors;
-}
-
-
void OS::Abort() {
if (IsDebuggerPresent() || FLAG_break_on_abort) {
DebugBreak();
@@ -1010,6 +881,9 @@ void OS::Abort() {
void OS::DebugBreak() {
#ifdef _MSC_VER
+ // To avoid Visual Studio runtime support the following code can be used
+ // instead
+ // __asm { int 3 }
__debugbreak();
#else
::DebugBreak();
@@ -1255,7 +1129,7 @@ TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
// Load the symbols for generating stack traces.
-static bool LoadSymbols(HANDLE process_handle) {
+static bool LoadSymbols(Isolate* isolate, HANDLE process_handle) {
static bool symbols_loaded = false;
if (symbols_loaded) return true;
@@ -1304,7 +1178,7 @@ static bool LoadSymbols(HANDLE process_handle) {
if (err != ERROR_MOD_NOT_FOUND &&
err != ERROR_INVALID_HANDLE) return false;
}
- LOG(i::Isolate::Current(),
+ LOG(isolate,
SharedLibraryEvent(
module_entry.szExePath,
reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
@@ -1319,14 +1193,14 @@ static bool LoadSymbols(HANDLE process_handle) {
}
-void OS::LogSharedLibraryAddresses() {
+void OS::LogSharedLibraryAddresses(Isolate* isolate) {
// SharedLibraryEvents are logged when loading symbol information.
// Only the shared libraries loaded at the time of the call to
// LogSharedLibraryAddresses are logged. DLLs loaded after
// initialization are not accounted for.
if (!LoadDbgHelpAndTlHelp32()) return;
HANDLE process_handle = GetCurrentProcess();
- LoadSymbols(process_handle);
+ LoadSymbols(isolate, process_handle);
}
@@ -1352,7 +1226,7 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
HANDLE thread_handle = GetCurrentThread();
// Read the symbols.
- if (!LoadSymbols(process_handle)) return kStackWalkError;
+ if (!LoadSymbols(Isolate::Current(), process_handle)) return kStackWalkError;
// Capture current context.
CONTEXT context;
@@ -1458,7 +1332,7 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
#pragma warning(pop)
#else // __MINGW32__
-void OS::LogSharedLibraryAddresses() { }
+void OS::LogSharedLibraryAddresses(Isolate* isolate) { }
void OS::SignalCodeMovingGC() { }
int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
#endif // __MINGW32__
@@ -1562,7 +1436,7 @@ bool VirtualMemory::Guard(void* address) {
if (NULL == VirtualAlloc(address,
OS::CommitPageSize(),
MEM_COMMIT,
- PAGE_READONLY | PAGE_GUARD)) {
+ PAGE_NOACCESS)) {
return false;
}
return true;
@@ -1579,8 +1453,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
return false;
}
-
- UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
return true;
}
@@ -1703,297 +1575,4 @@ void Thread::YieldCPU() {
Sleep(0);
}
-
-// ----------------------------------------------------------------------------
-// Win32 mutex support.
-//
-// On Win32 mutexes are implemented using CRITICAL_SECTION objects. These are
-// faster than Win32 Mutex objects because they are implemented using user mode
-// atomic instructions. Therefore we only do ring transitions if there is lock
-// contention.
-
-class Win32Mutex : public Mutex {
- public:
- Win32Mutex() { InitializeCriticalSection(&cs_); }
-
- virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); }
-
- virtual int Lock() {
- EnterCriticalSection(&cs_);
- return 0;
- }
-
- virtual int Unlock() {
- LeaveCriticalSection(&cs_);
- return 0;
- }
-
-
- virtual bool TryLock() {
- // Returns non-zero if critical section is entered successfully entered.
- return TryEnterCriticalSection(&cs_);
- }
-
- private:
- CRITICAL_SECTION cs_; // Critical section used for mutex
-};
-
-
-Mutex* OS::CreateMutex() {
- return new Win32Mutex();
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 semaphore support.
-//
-// On Win32 semaphores are implemented using Win32 Semaphore objects. The
-// semaphores are anonymous. Also, the semaphores are initialized to have
-// no upper limit on count.
-
-
-class Win32Semaphore : public Semaphore {
- public:
- explicit Win32Semaphore(int count) {
- sem = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
- }
-
- ~Win32Semaphore() {
- CloseHandle(sem);
- }
-
- void Wait() {
- WaitForSingleObject(sem, INFINITE);
- }
-
- bool Wait(int timeout) {
- // Timeout in Windows API is in milliseconds.
- DWORD millis_timeout = timeout / 1000;
- return WaitForSingleObject(sem, millis_timeout) != WAIT_TIMEOUT;
- }
-
- void Signal() {
- LONG dummy;
- ReleaseSemaphore(sem, 1, &dummy);
- }
-
- private:
- HANDLE sem;
-};
-
-
-Semaphore* OS::CreateSemaphore(int count) {
- return new Win32Semaphore(count);
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 socket support.
-//
-
-class Win32Socket : public Socket {
- public:
- explicit Win32Socket() {
- // Create the socket.
- socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- }
- explicit Win32Socket(SOCKET socket): socket_(socket) { }
- virtual ~Win32Socket() { Shutdown(); }
-
- // Server initialization.
- bool Bind(const int port);
- bool Listen(int backlog) const;
- Socket* Accept() const;
-
- // Client initialization.
- bool Connect(const char* host, const char* port);
-
- // Shutdown socket for both read and write.
- bool Shutdown();
-
- // Data Transimission
- int Send(const char* data, int len) const;
- int Receive(char* data, int len) const;
-
- bool SetReuseAddress(bool reuse_address);
-
- bool IsValid() const { return socket_ != INVALID_SOCKET; }
-
- private:
- SOCKET socket_;
-};
-
-
-bool Win32Socket::Bind(const int port) {
- if (!IsValid()) {
- return false;
- }
-
- sockaddr_in addr;
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- addr.sin_port = htons(port);
- int status = bind(socket_,
- reinterpret_cast<struct sockaddr *>(&addr),
- sizeof(addr));
- return status == 0;
-}
-
-
-bool Win32Socket::Listen(int backlog) const {
- if (!IsValid()) {
- return false;
- }
-
- int status = listen(socket_, backlog);
- return status == 0;
-}
-
-
-Socket* Win32Socket::Accept() const {
- if (!IsValid()) {
- return NULL;
- }
-
- SOCKET socket = accept(socket_, NULL, NULL);
- if (socket == INVALID_SOCKET) {
- return NULL;
- } else {
- return new Win32Socket(socket);
- }
-}
-
-
-bool Win32Socket::Connect(const char* host, const char* port) {
- if (!IsValid()) {
- return false;
- }
-
- // Lookup host and port.
- struct addrinfo *result = NULL;
- struct addrinfo hints;
- memset(&hints, 0, sizeof(addrinfo));
- hints.ai_family = AF_INET;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_protocol = IPPROTO_TCP;
- int status = getaddrinfo(host, port, &hints, &result);
- if (status != 0) {
- return false;
- }
-
- // Connect.
- status = connect(socket_,
- result->ai_addr,
- static_cast<int>(result->ai_addrlen));
- freeaddrinfo(result);
- return status == 0;
-}
-
-
-bool Win32Socket::Shutdown() {
- if (IsValid()) {
- // Shutdown socket for both read and write.
- int status = shutdown(socket_, SD_BOTH);
- closesocket(socket_);
- socket_ = INVALID_SOCKET;
- return status == SOCKET_ERROR;
- }
- return true;
-}
-
-
-int Win32Socket::Send(const char* data, int len) const {
- if (len <= 0) return 0;
- int written = 0;
- while (written < len) {
- int status = send(socket_, data + written, len - written, 0);
- if (status == 0) {
- break;
- } else if (status > 0) {
- written += status;
- } else {
- return 0;
- }
- }
- return written;
-}
-
-
-int Win32Socket::Receive(char* data, int len) const {
- if (len <= 0) return 0;
- int status = recv(socket_, data, len, 0);
- return (status == SOCKET_ERROR) ? 0 : status;
-}
-
-
-bool Win32Socket::SetReuseAddress(bool reuse_address) {
- BOOL on = reuse_address ? true : false;
- int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
- reinterpret_cast<char*>(&on), sizeof(on));
- return status == SOCKET_ERROR;
-}
-
-
-bool Socket::SetUp() {
- // Initialize Winsock32
- int err;
- WSADATA winsock_data;
- WORD version_requested = MAKEWORD(1, 0);
- err = WSAStartup(version_requested, &winsock_data);
- if (err != 0) {
- PrintF("Unable to initialize Winsock, err = %d\n", Socket::LastError());
- }
-
- return err == 0;
-}
-
-
-int Socket::LastError() {
- return WSAGetLastError();
-}
-
-
-uint16_t Socket::HToN(uint16_t value) {
- return htons(value);
-}
-
-
-uint16_t Socket::NToH(uint16_t value) {
- return ntohs(value);
-}
-
-
-uint32_t Socket::HToN(uint32_t value) {
- return htonl(value);
-}
-
-
-uint32_t Socket::NToH(uint32_t value) {
- return ntohl(value);
-}
-
-
-Socket* OS::CreateSocket() {
- return new Win32Socket();
-}
-
-
-void OS::SetUp() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
- srand(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
-}
-
-
-void OS::TearDown() {
- delete limit_mutex;
-}
-
-
} } // namespace v8::internal
diff --git a/chromium/v8/src/platform.h b/chromium/v8/src/platform.h
index 8b27c19a65f..ee8fb92910b 100644
--- a/chromium/v8/src/platform.h
+++ b/chromium/v8/src/platform.h
@@ -44,6 +44,13 @@
#ifndef V8_PLATFORM_H_
#define V8_PLATFORM_H_
+#include <cstdarg>
+
+#include "platform/mutex.h"
+#include "platform/semaphore.h"
+#include "utils.h"
+#include "v8globals.h"
+
#ifdef __sun
# ifndef signbit
namespace std {
@@ -52,22 +59,8 @@ int signbit(double x);
# endif
#endif
-// GCC specific stuff
-#ifdef __GNUC__
-
-// Needed for va_list on at least MinGW and Android.
-#include <stdarg.h>
-
-#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
-
-#endif // __GNUC__
-
-
-// Windows specific stuff.
-#ifdef WIN32
-
// Microsoft Visual C++ specific stuff.
-#ifdef _MSC_VER
+#if V8_CC_MSVC
#include "win32-headers.h"
#include "win32-math.h"
@@ -76,7 +69,7 @@ int strncasecmp(const char* s1, const char* s2, int n);
inline int lrint(double flt) {
int intgr;
-#if defined(V8_TARGET_ARCH_IA32)
+#if V8_TARGET_ARCH_IA32
__asm {
fld flt
fistp intgr
@@ -91,25 +84,11 @@ inline int lrint(double flt) {
return intgr;
}
-#endif // _MSC_VER
-
-#ifndef __CYGWIN__
-// Random is missing on both Visual Studio and MinGW.
-int random();
-#endif
-
-#endif // WIN32
-
-#include "lazy-instance.h"
-#include "utils.h"
-#include "v8globals.h"
+#endif // V8_CC_MSVC
namespace v8 {
namespace internal {
-class Semaphore;
-class Mutex;
-
double ceiling(double x);
double modulo(double x, double y);
@@ -124,9 +103,6 @@ double fast_sqrt(double input);
// on demand.
void lazily_initialize_fast_exp();
-// Forward declarations.
-class Socket;
-
// ----------------------------------------------------------------------------
// Fast TLS support
@@ -190,26 +166,16 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
class OS {
public:
- // Initializes the platform OS support. Called once at VM startup.
- static void SetUp();
-
// Initializes the platform OS support that depend on CPU features. This is
// called after CPU initialization.
static void PostSetUp();
- // Clean up platform-OS-related things. Called once at VM shutdown.
- static void TearDown();
-
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
// strive for high-precision timer resolution, preferable
// micro-second resolution.
static int GetUserTime(uint32_t* secs, uint32_t* usecs);
- // Get a tick counter normalized to one tick per microsecond.
- // Used for calculating time intervals.
- static int64_t Ticks();
-
// Returns current time as the number of milliseconds since
// 00:00:00 UTC, January 1, 1970.
static double TimeCurrentMillis();
@@ -277,18 +243,9 @@ class OS {
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
- // Returns an indication of whether a pointer is in a space that
- // has been allocated by Allocate(). This method may conservatively
- // always return false, but giving more accurate information may
- // improve the robustness of the stack dump code in the presence of
- // heap corruption.
- static bool IsOutsideAllocatedSpace(void* pointer);
-
// Sleep for a number of milliseconds.
static void Sleep(const int milliseconds);
- static int NumberOfCores();
-
// Abort the current process.
static void Abort();
@@ -309,18 +266,6 @@ class OS {
static int StackWalk(Vector<StackFrame> frames);
- // Factory method for creating platform dependent Mutex.
- // Please use delete to reclaim the storage for the returned Mutex.
- static Mutex* CreateMutex();
-
- // Factory method for creating platform dependent Semaphore.
- // Please use delete to reclaim the storage for the returned Semaphore.
- static Semaphore* CreateSemaphore(int count);
-
- // Factory method for creating platform dependent Socket.
- // Please use delete to reclaim the storage for the returned Socket.
- static Socket* CreateSocket();
-
class MemoryMappedFile {
public:
static MemoryMappedFile* open(const char* name);
@@ -342,7 +287,7 @@ class OS {
// Support for the profiler. Can do nothing, in which case ticks
// occuring in shared libraries will not be properly accounted for.
- static void LogSharedLibraryAddresses();
+ static void LogSharedLibraryAddresses(Isolate* isolate);
// Support for the profiler. Notifies the external profiling
// process that a code moving garbage collection starts. Can do
@@ -365,22 +310,10 @@ class OS {
// Returns the double constant NAN
static double nan_value();
- // Support runtime detection of Cpu implementer
- static CpuImplementer GetCpuImplementer();
-
- // Support runtime detection of Cpu implementer
- static CpuPart GetCpuPart(CpuImplementer implementer);
-
- // Support runtime detection of VFP3 on ARM CPUs.
- static bool ArmCpuHasFeature(CpuFeature feature);
-
// Support runtime detection of whether the hard float option of the
// EABI is used.
static bool ArmUsingHardFloat();
- // Support runtime detection of FPU on MIPS CPUs.
- static bool MipsCpuHasFeature(CpuFeature feature);
-
// Returns the activation frame alignment constraint or zero if
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();
@@ -547,59 +480,6 @@ class VirtualMemory {
// ----------------------------------------------------------------------------
-// Semaphore
-//
-// A semaphore object is a synchronization object that maintains a count. The
-// count is decremented each time a thread completes a wait for the semaphore
-// object and incremented each time a thread signals the semaphore. When the
-// count reaches zero, threads waiting for the semaphore blocks until the
-// count becomes non-zero.
-
-class Semaphore {
- public:
- virtual ~Semaphore() {}
-
- // Suspends the calling thread until the semaphore counter is non zero
- // and then decrements the semaphore counter.
- virtual void Wait() = 0;
-
- // Suspends the calling thread until the counter is non zero or the timeout
- // time has passed. If timeout happens the return value is false and the
- // counter is unchanged. Otherwise the semaphore counter is decremented and
- // true is returned. The timeout value is specified in microseconds.
- virtual bool Wait(int timeout) = 0;
-
- // Increments the semaphore counter.
- virtual void Signal() = 0;
-};
-
-template <int InitialValue>
-struct CreateSemaphoreTrait {
- static Semaphore* Create() {
- return OS::CreateSemaphore(InitialValue);
- }
-};
-
-// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// // The following semaphore starts at 0.
-// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
-//
-// void my_function() {
-// // Do something with my_semaphore.Pointer().
-// }
-//
-template <int InitialValue>
-struct LazySemaphore {
- typedef typename LazyDynamicInstance<
- Semaphore, CreateSemaphoreTrait<InitialValue>,
- ThreadSafeInitOnceTrait>::type type;
-};
-
-#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-
-// ----------------------------------------------------------------------------
// Thread
//
// Thread objects are used for creating and running threads. When the start()
@@ -641,7 +521,7 @@ class Thread {
// Start new thread and wait until Run() method is called on the new thread.
void StartSynchronously() {
- start_semaphore_ = OS::CreateSemaphore(0);
+ start_semaphore_ = new Semaphore(0);
Start();
start_semaphore_->Wait();
delete start_semaphore_;
@@ -714,113 +594,6 @@ class Thread {
DISALLOW_COPY_AND_ASSIGN(Thread);
};
-
-// ----------------------------------------------------------------------------
-// Mutex
-//
-// Mutexes are used for serializing access to non-reentrant sections of code.
-// The implementations of mutex should allow for nested/recursive locking.
-
-class Mutex {
- public:
- virtual ~Mutex() {}
-
- // Locks the given mutex. If the mutex is currently unlocked, it becomes
- // locked and owned by the calling thread, and immediately. If the mutex
- // is already locked by another thread, suspends the calling thread until
- // the mutex is unlocked.
- virtual int Lock() = 0;
-
- // Unlocks the given mutex. The mutex is assumed to be locked and owned by
- // the calling thread on entrance.
- virtual int Unlock() = 0;
-
- // Tries to lock the given mutex. Returns whether the mutex was
- // successfully locked.
- virtual bool TryLock() = 0;
-};
-
-struct CreateMutexTrait {
- static Mutex* Create() {
- return OS::CreateMutex();
- }
-};
-
-// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
-//
-// void my_function() {
-// ScopedLock my_lock(my_mutex.Pointer());
-// // Do something.
-// }
-//
-typedef LazyDynamicInstance<
- Mutex, CreateMutexTrait, ThreadSafeInitOnceTrait>::type LazyMutex;
-
-#define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-// ----------------------------------------------------------------------------
-// ScopedLock
-//
-// Stack-allocated ScopedLocks provide block-scoped locking and
-// unlocking of a mutex.
-class ScopedLock {
- public:
- explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
- ASSERT(mutex_ != NULL);
- mutex_->Lock();
- }
- ~ScopedLock() {
- mutex_->Unlock();
- }
-
- private:
- Mutex* mutex_;
- DISALLOW_COPY_AND_ASSIGN(ScopedLock);
-};
-
-
-// ----------------------------------------------------------------------------
-// Socket
-//
-
-class Socket {
- public:
- virtual ~Socket() {}
-
- // Server initialization.
- virtual bool Bind(const int port) = 0;
- virtual bool Listen(int backlog) const = 0;
- virtual Socket* Accept() const = 0;
-
- // Client initialization.
- virtual bool Connect(const char* host, const char* port) = 0;
-
- // Shutdown socket for both read and write. This causes blocking Send and
- // Receive calls to exit. After Shutdown the Socket object cannot be used for
- // any communication.
- virtual bool Shutdown() = 0;
-
- // Data Transimission
- // Return 0 on failure.
- virtual int Send(const char* data, int len) const = 0;
- virtual int Receive(char* data, int len) const = 0;
-
- // Set the value of the SO_REUSEADDR socket option.
- virtual bool SetReuseAddress(bool reuse_address) = 0;
-
- virtual bool IsValid() const = 0;
-
- static bool SetUp();
- static int LastError();
- static uint16_t HToN(uint16_t value);
- static uint16_t NToH(uint16_t value);
- static uint32_t HToN(uint32_t value);
- static uint32_t NToH(uint32_t value);
-};
-
-
} } // namespace v8::internal
#endif // V8_PLATFORM_H_
diff --git a/chromium/v8/src/platform/condition-variable.cc b/chromium/v8/src/platform/condition-variable.cc
new file mode 100644
index 00000000000..e2bf3882ece
--- /dev/null
+++ b/chromium/v8/src/platform/condition-variable.cc
@@ -0,0 +1,345 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/condition-variable.h"
+
+#include <cerrno>
+#include <ctime>
+
+#include "platform/time.h"
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_POSIX
+
+ConditionVariable::ConditionVariable() {
+ // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
+ // hack to support cross-compiling Chrome for Android in AOSP. Remove
+ // this once AOSP is fixed.
+#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
+ (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
+ // On Free/Net/OpenBSD and Linux with glibc we can change the time
+ // source for pthread_cond_timedwait() to use the monotonic clock.
+ pthread_condattr_t attr;
+ int result = pthread_condattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
+ ASSERT_EQ(0, result);
+ result = pthread_cond_init(&native_handle_, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_condattr_destroy(&attr);
+#else
+ int result = pthread_cond_init(&native_handle_, NULL);
+#endif
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+ConditionVariable::~ConditionVariable() {
+ int result = pthread_cond_destroy(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void ConditionVariable::NotifyOne() {
+ int result = pthread_cond_signal(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void ConditionVariable::NotifyAll() {
+ int result = pthread_cond_broadcast(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void ConditionVariable::Wait(Mutex* mutex) {
+ mutex->AssertHeldAndUnmark();
+ int result = pthread_cond_wait(&native_handle_, &mutex->native_handle());
+ ASSERT_EQ(0, result);
+ USE(result);
+ mutex->AssertUnheldAndMark();
+}
+
+
+bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
+ struct timespec ts;
+ int result;
+ mutex->AssertHeldAndUnmark();
+#if V8_OS_MACOSX
+ // Mac OS X provides pthread_cond_timedwait_relative_np(), which does
+ // not depend on the real time clock, which is what you really WANT here!
+ ts = rel_time.ToTimespec();
+ ASSERT_GE(ts.tv_sec, 0);
+ ASSERT_GE(ts.tv_nsec, 0);
+ result = pthread_cond_timedwait_relative_np(
+ &native_handle_, &mutex->native_handle(), &ts);
+#else
+ // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
+ // hack to support cross-compiling Chrome for Android in AOSP. Remove
+ // this once AOSP is fixed.
+#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
+ (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
+ // On Free/Net/OpenBSD and Linux with glibc we can change the time
+ // source for pthread_cond_timedwait() to use the monotonic clock.
+ result = clock_gettime(CLOCK_MONOTONIC, &ts);
+ ASSERT_EQ(0, result);
+ Time now = Time::FromTimespec(ts);
+#else
+ // The timeout argument to pthread_cond_timedwait() is in absolute time.
+ Time now = Time::NowFromSystemTime();
+#endif
+ Time end_time = now + rel_time;
+ ASSERT_GE(end_time, now);
+ ts = end_time.ToTimespec();
+ result = pthread_cond_timedwait(
+ &native_handle_, &mutex->native_handle(), &ts);
+#endif // V8_OS_MACOSX
+ mutex->AssertUnheldAndMark();
+ if (result == ETIMEDOUT) {
+ return false;
+ }
+ ASSERT_EQ(0, result);
+ return true;
+}
+
+#elif V8_OS_WIN
+
+struct ConditionVariable::Event {
+ Event() : handle_(::CreateEventA(NULL, true, false, NULL)) {
+ ASSERT(handle_ != NULL);
+ }
+
+ ~Event() {
+ BOOL ok = ::CloseHandle(handle_);
+ ASSERT(ok);
+ USE(ok);
+ }
+
+ bool WaitFor(DWORD timeout_ms) {
+ DWORD result = ::WaitForSingleObject(handle_, timeout_ms);
+ if (result == WAIT_OBJECT_0) {
+ return true;
+ }
+ ASSERT(result == WAIT_TIMEOUT);
+ return false;
+ }
+
+ HANDLE handle_;
+ Event* next_;
+ HANDLE thread_;
+ volatile bool notified_;
+};
+
+
+ConditionVariable::NativeHandle::~NativeHandle() {
+ ASSERT(waitlist_ == NULL);
+
+ while (freelist_ != NULL) {
+ Event* event = freelist_;
+ freelist_ = event->next_;
+ delete event;
+ }
+}
+
+
+ConditionVariable::Event* ConditionVariable::NativeHandle::Pre() {
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Grab an event from the free list or create a new one.
+ Event* event = freelist_;
+ if (event != NULL) {
+ freelist_ = event->next_;
+ } else {
+ event = new Event;
+ }
+ event->thread_ = GetCurrentThread();
+ event->notified_ = false;
+
+#ifdef DEBUG
+ // The event must not be on the wait list.
+ for (Event* we = waitlist_; we != NULL; we = we->next_) {
+ ASSERT_NE(event, we);
+ }
+#endif
+
+ // Prepend the event to the wait list.
+ event->next_ = waitlist_;
+ waitlist_ = event;
+
+ return event;
+}
+
+
+void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Remove the event from the wait list.
+ for (Event** wep = &waitlist_;; wep = &(*wep)->next_) {
+ ASSERT_NE(NULL, *wep);
+ if (*wep == event) {
+ *wep = event->next_;
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ // The event must not be on the free list.
+ for (Event* fe = freelist_; fe != NULL; fe = fe->next_) {
+ ASSERT_NE(event, fe);
+ }
+#endif
+
+ // Reset the event.
+ BOOL ok = ::ResetEvent(event->handle_);
+ ASSERT(ok);
+ USE(ok);
+
+ // Insert the event into the free list.
+ event->next_ = freelist_;
+ freelist_ = event;
+
+ // Forward signals delivered after the timeout to the next waiting event.
+ if (!result && event->notified_ && waitlist_ != NULL) {
+ ok = ::SetEvent(waitlist_->handle_);
+ ASSERT(ok);
+ USE(ok);
+ waitlist_->notified_ = true;
+ }
+}
+
+
+ConditionVariable::ConditionVariable() {}
+
+
+ConditionVariable::~ConditionVariable() {}
+
+
+void ConditionVariable::NotifyOne() {
+ // Notify the thread with the highest priority in the waitlist
+ // that was not already signalled.
+ LockGuard<Mutex> lock_guard(native_handle_.mutex());
+ Event* highest_event = NULL;
+ int highest_priority = std::numeric_limits<int>::min();
+ for (Event* event = native_handle().waitlist();
+ event != NULL;
+ event = event->next_) {
+ if (event->notified_) {
+ continue;
+ }
+ int priority = GetThreadPriority(event->thread_);
+ ASSERT_NE(THREAD_PRIORITY_ERROR_RETURN, priority);
+ if (priority >= highest_priority) {
+ highest_priority = priority;
+ highest_event = event;
+ }
+ }
+ if (highest_event != NULL) {
+ ASSERT(!highest_event->notified_);
+ ::SetEvent(highest_event->handle_);
+ highest_event->notified_ = true;
+ }
+}
+
+
+void ConditionVariable::NotifyAll() {
+ // Notify all threads on the waitlist.
+ LockGuard<Mutex> lock_guard(native_handle_.mutex());
+ for (Event* event = native_handle().waitlist();
+ event != NULL;
+ event = event->next_) {
+ if (!event->notified_) {
+ ::SetEvent(event->handle_);
+ event->notified_ = true;
+ }
+ }
+}
+
+
+void ConditionVariable::Wait(Mutex* mutex) {
+ // Create and setup the wait event.
+ Event* event = native_handle_.Pre();
+
+ // Release the user mutex.
+ mutex->Unlock();
+
+ // Wait on the wait event.
+ while (!event->WaitFor(INFINITE))
+ ;
+
+ // Reaquire the user mutex.
+ mutex->Lock();
+
+ // Release the wait event (we must have been notified).
+ ASSERT(event->notified_);
+ native_handle_.Post(event, true);
+}
+
+
+bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
+ // Create and setup the wait event.
+ Event* event = native_handle_.Pre();
+
+ // Release the user mutex.
+ mutex->Unlock();
+
+ // Wait on the wait event.
+ TimeTicks now = TimeTicks::Now();
+ TimeTicks end = now + rel_time;
+ bool result = false;
+ while (true) {
+ int64_t msec = (end - now).InMilliseconds();
+ if (msec >= static_cast<int64_t>(INFINITE)) {
+ result = event->WaitFor(INFINITE - 1);
+ if (result) {
+ break;
+ }
+ now = TimeTicks::Now();
+ } else {
+ result = event->WaitFor((msec < 0) ? 0 : static_cast<DWORD>(msec));
+ break;
+ }
+ }
+
+ // Reaquire the user mutex.
+ mutex->Lock();
+
+ // Release the wait event.
+ ASSERT(!result || event->notified_);
+ native_handle_.Post(event, result);
+
+ return result;
+}
+
+#endif // V8_OS_POSIX
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/platform/condition-variable.h b/chromium/v8/src/platform/condition-variable.h
new file mode 100644
index 00000000000..4d8a88aee79
--- /dev/null
+++ b/chromium/v8/src/platform/condition-variable.h
@@ -0,0 +1,140 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_CONDITION_VARIABLE_H_
+#define V8_PLATFORM_CONDITION_VARIABLE_H_
+
+#include "platform/mutex.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class ConditionVariableEvent;
+class TimeDelta;
+
+// -----------------------------------------------------------------------------
+// ConditionVariable
+//
+// This class is a synchronization primitive that can be used to block a thread,
+// or multiple threads at the same time, until:
+// - a notification is received from another thread,
+// - a timeout expires, or
+// - a spurious wakeup occurs
+// Any thread that intends to wait on a ConditionVariable has to acquire a lock
+// on a Mutex first. The |Wait()| and |WaitFor()| operations atomically release
+// the mutex and suspend the execution of the calling thread. When the condition
+// variable is notified, the thread is awakened, and the mutex is reacquired.
+
+class ConditionVariable V8_FINAL {
+ public:
+ ConditionVariable();
+ ~ConditionVariable();
+
+ // If any threads are waiting on this condition variable, calling
+ // |NotifyOne()| unblocks one of the waiting threads.
+ void NotifyOne();
+
+ // Unblocks all threads currently waiting for this condition variable.
+ void NotifyAll();
+
+ // |Wait()| causes the calling thread to block until the condition variable is
+ // notified or a spurious wakeup occurs. Atomically releases the mutex, blocks
+ // the current executing thread, and adds it to the list of threads waiting on
+ // this condition variable. The thread will be unblocked when |NotifyAll()| or
+ // |NotifyOne()| is executed. It may also be unblocked spuriously. When
+ // unblocked, regardless of the reason, the lock on the mutex is reacquired
+ // and |Wait()| exits.
+ void Wait(Mutex* mutex);
+
+ // Atomically releases the mutex, blocks the current executing thread, and
+ // adds it to the list of threads waiting on this condition variable. The
+ // thread will be unblocked when |NotifyAll()| or |NotifyOne()| is executed,
+ // or when the relative timeout |rel_time| expires. It may also be unblocked
+ // spuriously. When unblocked, regardless of the reason, the lock on the mutex
+ // is reacquired and |WaitFor()| exits. Returns true if the condition variable
+ // was notified prior to the timeout.
+ bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef pthread_cond_t NativeHandle;
+#elif V8_OS_WIN
+ struct Event;
+ class NativeHandle V8_FINAL {
+ public:
+ NativeHandle() : waitlist_(NULL), freelist_(NULL) {}
+ ~NativeHandle();
+
+ Event* Pre() V8_WARN_UNUSED_RESULT;
+ void Post(Event* event, bool result);
+
+ Mutex* mutex() { return &mutex_; }
+ Event* waitlist() { return waitlist_; }
+
+ private:
+ Event* waitlist_;
+ Event* freelist_;
+ Mutex mutex_;
+
+ DISALLOW_COPY_AND_ASSIGN(NativeHandle);
+ };
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
+
+// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+// static LazyConditionVariable my_condvar =
+// LAZY_CONDITION_VARIABLE_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<Mutex> lock_guard(&my_mutex);
+// my_condvar.Pointer()->Wait(&my_mutex);
+// }
+typedef LazyStaticInstance<ConditionVariable,
+ DefaultConstructTrait<ConditionVariable>,
+ ThreadSafeInitOnceTrait>::type LazyConditionVariable;
+
+#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_CONDITION_VARIABLE_H_
diff --git a/chromium/v8/src/platform/elapsed-timer.h b/chromium/v8/src/platform/elapsed-timer.h
new file mode 100644
index 00000000000..2311db2f524
--- /dev/null
+++ b/chromium/v8/src/platform/elapsed-timer.h
@@ -0,0 +1,120 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_ELAPSED_TIMER_H_
+#define V8_PLATFORM_ELAPSED_TIMER_H_
+
+#include "checks.h"
+#include "platform/time.h"
+
+namespace v8 {
+namespace internal {
+
+class ElapsedTimer V8_FINAL BASE_EMBEDDED {
+ public:
+#ifdef DEBUG
+ ElapsedTimer() : started_(false) {}
+#endif
+
+ // Starts this timer. Once started a timer can be checked with
+ // |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|.
+ // This method must not be called on an already started timer.
+ void Start() {
+ ASSERT(!IsStarted());
+ start_ticks_ = Now();
+#ifdef DEBUG
+ started_ = true;
+#endif
+ ASSERT(IsStarted());
+ }
+
+ // Stops this timer. Must not be called on a timer that was not
+ // started before.
+ void Stop() {
+ ASSERT(IsStarted());
+ start_ticks_ = TimeTicks();
+#ifdef DEBUG
+ started_ = false;
+#endif
+ ASSERT(!IsStarted());
+ }
+
+ // Returns |true| if this timer was started previously.
+ bool IsStarted() const {
+ ASSERT(started_ || start_ticks_.IsNull());
+ ASSERT(!started_ || !start_ticks_.IsNull());
+ return !start_ticks_.IsNull();
+ }
+
+ // Restarts the timer and returns the time elapsed since the previous start.
+ // This method is equivalent to obtaining the elapsed time with |Elapsed()|
+ // and then starting the timer again, but does so in one single operation,
+ // avoiding the need to obtain the clock value twice. It may only be called
+ // on a previously started timer.
+ TimeDelta Restart() {
+ ASSERT(IsStarted());
+ TimeTicks ticks = Now();
+ TimeDelta elapsed = ticks - start_ticks_;
+ ASSERT(elapsed.InMicroseconds() >= 0);
+ start_ticks_ = ticks;
+ ASSERT(IsStarted());
+ return elapsed;
+ }
+
+ // Returns the time elapsed since the previous start. This method may only
+ // be called on a previously started timer.
+ TimeDelta Elapsed() const {
+ ASSERT(IsStarted());
+ TimeDelta elapsed = Now() - start_ticks_;
+ ASSERT(elapsed.InMicroseconds() >= 0);
+ return elapsed;
+ }
+
+ // Returns |true| if the specified |time_delta| has elapsed since the
+ // previous start, or |false| if not. This method may only be called on
+ // a previously started timer.
+ bool HasExpired(TimeDelta time_delta) const {
+ ASSERT(IsStarted());
+ return Elapsed() >= time_delta;
+ }
+
+ private:
+ static V8_INLINE TimeTicks Now() {
+ TimeTicks now = TimeTicks::HighResNow();
+ ASSERT(!now.IsNull());
+ return now;
+ }
+
+ TimeTicks start_ticks_;
+#ifdef DEBUG
+ bool started_;
+#endif
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_ELAPSED_TIMER_H_
diff --git a/chromium/v8/src/platform/mutex.cc b/chromium/v8/src/platform/mutex.cc
new file mode 100644
index 00000000000..ad97740995d
--- /dev/null
+++ b/chromium/v8/src/platform/mutex.cc
@@ -0,0 +1,214 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/mutex.h"
+
+#include <cerrno>
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_POSIX
+
+static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
+ int result;
+#if defined(DEBUG)
+ // Use an error checking mutex in debug mode.
+ pthread_mutexattr_t attr;
+ result = pthread_mutexattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
+ ASSERT_EQ(0, result);
+ result = pthread_mutex_init(mutex, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_destroy(&attr);
+#else
+ // Use a fast mutex (default attributes).
+ result = pthread_mutex_init(mutex, NULL);
+#endif // defined(DEBUG)
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) {
+ pthread_mutexattr_t attr;
+ int result = pthread_mutexattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ ASSERT_EQ(0, result);
+ result = pthread_mutex_init(mutex, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_destroy(&attr);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_destroy(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_lock(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_unlock(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) {
+ int result = pthread_mutex_trylock(mutex);
+ if (result == EBUSY) {
+ return false;
+ }
+ ASSERT_EQ(0, result);
+ return true;
+}
+
+#elif V8_OS_WIN
+
+static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) {
+ InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) {
+ InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) {
+ DeleteCriticalSection(cs);
+}
+
+
+static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) {
+ EnterCriticalSection(cs);
+}
+
+
+static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) {
+ LeaveCriticalSection(cs);
+}
+
+
+static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) {
+ return TryEnterCriticalSection(cs);
+}
+
+#endif // V8_OS_POSIX
+
+
+Mutex::Mutex() {
+ InitializeNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
+}
+
+
+Mutex::~Mutex() {
+ DestroyNativeHandle(&native_handle_);
+ ASSERT_EQ(0, level_);
+}
+
+
+void Mutex::Lock() {
+ LockNativeHandle(&native_handle_);
+ AssertUnheldAndMark();
+}
+
+
+void Mutex::Unlock() {
+ AssertHeldAndUnmark();
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool Mutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+ AssertUnheldAndMark();
+ return true;
+}
+
+
+RecursiveMutex::RecursiveMutex() {
+ InitializeRecursiveNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
+}
+
+
+RecursiveMutex::~RecursiveMutex() {
+ DestroyNativeHandle(&native_handle_);
+ ASSERT_EQ(0, level_);
+}
+
+
+void RecursiveMutex::Lock() {
+ LockNativeHandle(&native_handle_);
+#ifdef DEBUG
+ ASSERT_LE(0, level_);
+ level_++;
+#endif
+}
+
+
+void RecursiveMutex::Unlock() {
+#ifdef DEBUG
+ ASSERT_LT(0, level_);
+ level_--;
+#endif
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool RecursiveMutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+#ifdef DEBUG
+ ASSERT_LE(0, level_);
+ level_++;
+#endif
+ return true;
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/platform/mutex.h b/chromium/v8/src/platform/mutex.h
new file mode 100644
index 00000000000..0f899ca5976
--- /dev/null
+++ b/chromium/v8/src/platform/mutex.h
@@ -0,0 +1,238 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_MUTEX_H_
+#define V8_PLATFORM_MUTEX_H_
+
+#include "lazy-instance.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+#if V8_OS_POSIX
+#include <pthread.h> // NOLINT
+#endif
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Mutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A mutex offers
+// exclusive, non-recursive ownership semantics:
+// - A calling thread owns a mutex from the time that it successfully calls
+// either |Lock()| or |TryLock()| until it calls |Unlock()|.
+// - When a thread owns a mutex, all other threads will block (for calls to
+// |Lock()|) or receive a |false| return value (for |TryLock()|) if they
+// attempt to claim ownership of the mutex.
+// A calling thread must not own the mutex prior to calling |Lock()| or
+// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
+// while still owned by some thread. The Mutex class is non-copyable.
+
+class Mutex V8_FINAL {
+ public:
+ Mutex();
+ ~Mutex();
+
+ // Locks the given mutex. If the mutex is currently unlocked, it becomes
+ // locked and owned by the calling thread, and immediately. If the mutex
+ // is already locked by another thread, suspends the calling thread until
+ // the mutex is unlocked.
+ void Lock();
+
+ // Unlocks the given mutex. The mutex is assumed to be locked and owned by
+ // the calling thread on entrance.
+ void Unlock();
+
+ // Tries to lock the given mutex. Returns whether the mutex was
+ // successfully locked.
+ bool TryLock() V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef pthread_mutex_t NativeHandle;
+#elif V8_OS_WIN
+ typedef CRITICAL_SECTION NativeHandle;
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+#ifdef DEBUG
+ int level_;
+#endif
+
+ V8_INLINE void AssertHeldAndUnmark() {
+#ifdef DEBUG
+ ASSERT_EQ(1, level_);
+ level_--;
+#endif
+ }
+
+ V8_INLINE void AssertUnheldAndMark() {
+#ifdef DEBUG
+ ASSERT_EQ(0, level_);
+ level_++;
+#endif
+ }
+
+ friend class ConditionVariable;
+
+ DISALLOW_COPY_AND_ASSIGN(Mutex);
+};
+
+
+// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<Mutex> guard(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyStaticInstance<Mutex,
+ DefaultConstructTrait<Mutex>,
+ ThreadSafeInitOnceTrait>::type LazyMutex;
+
+#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// RecursiveMutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A recursive
+// mutex offers exclusive, recursive ownership semantics:
+// - A calling thread owns a recursive mutex for a period of time that starts
+// when it successfully calls either |Lock()| or |TryLock()|. During this
+// period, the thread may make additional calls to |Lock()| or |TryLock()|.
+// The period of ownership ends when the thread makes a matching number of
+// calls to |Unlock()|.
+// - When a thread owns a recursive mutex, all other threads will block (for
+// calls to |Lock()|) or receive a |false| return value (for |TryLock()|) if
+// they attempt to claim ownership of the recursive mutex.
+// - The maximum number of times that a recursive mutex may be locked is
+// unspecified, but after that number is reached, calls to |Lock()| will
+// probably abort the process and calls to |TryLock()| return false.
+// The behavior of a program is undefined if a recursive mutex is destroyed
+// while still owned by some thread. The RecursiveMutex class is non-copyable.
+
+class RecursiveMutex V8_FINAL {
+ public:
+ RecursiveMutex();
+ ~RecursiveMutex();
+
+ // Locks the mutex. If another thread has already locked the mutex, a call to
+ // |Lock()| will block execution until the lock is acquired. A thread may call
+ // |Lock()| on a recursive mutex repeatedly. Ownership will only be released
+ // after the thread makes a matching number of calls to |Unlock()|.
+ // The behavior is undefined if the mutex is not unlocked before being
+ // destroyed, i.e. some thread still owns it.
+ void Lock();
+
+ // Unlocks the mutex if its level of ownership is 1 (there was exactly one
+ // more call to |Lock()| than there were calls to unlock() made by this
+ // thread), reduces the level of ownership by 1 otherwise. The mutex must be
+ // locked by the current thread of execution, otherwise, the behavior is
+ // undefined.
+ void Unlock();
+
+ // Tries to lock the given mutex. Returns whether the mutex was
+ // successfully locked.
+ bool TryLock() V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+ typedef Mutex::NativeHandle NativeHandle;
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+#ifdef DEBUG
+ int level_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
+};
+
+
+// POD RecursiveMutex initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+// static LazyRecursiveMutex my_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<RecursiveMutex> guard(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyStaticInstance<RecursiveMutex,
+ DefaultConstructTrait<RecursiveMutex>,
+ ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
+
+#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// LockGuard
+//
+// This class is a mutex wrapper that provides a convenient RAII-style mechanism
+// for owning a mutex for the duration of a scoped block.
+// When a LockGuard object is created, it attempts to take ownership of the
+// mutex it is given. When control leaves the scope in which the LockGuard
+// object was created, the LockGuard is destructed and the mutex is released.
+// The LockGuard class is non-copyable.
+
+template <typename Mutex>
+class LockGuard V8_FINAL {
+ public:
+ explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
+ ~LockGuard() { mutex_->Unlock(); }
+
+ private:
+ Mutex* mutex_;
+
+ DISALLOW_COPY_AND_ASSIGN(LockGuard);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_MUTEX_H_
diff --git a/chromium/v8/src/platform/semaphore.cc b/chromium/v8/src/platform/semaphore.cc
new file mode 100644
index 00000000000..c3e5826f4f7
--- /dev/null
+++ b/chromium/v8/src/platform/semaphore.cc
@@ -0,0 +1,214 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/semaphore.h"
+
+#if V8_OS_MACOSX
+#include <mach/mach_init.h>
+#include <mach/task.h>
+#endif
+
+#include <cerrno>
+
+#include "checks.h"
+#include "platform/time.h"
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_MACOSX
+
+Semaphore::Semaphore(int count) {
+ kern_return_t result = semaphore_create(
+ mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+}
+
+
+Semaphore::~Semaphore() {
+ kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+}
+
+
+void Semaphore::Signal() {
+ kern_return_t result = semaphore_signal(native_handle_);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+}
+
+
+void Semaphore::Wait() {
+ while (true) {
+ kern_return_t result = semaphore_wait(native_handle_);
+ if (result == KERN_SUCCESS) return; // Semaphore was signalled.
+ ASSERT_EQ(KERN_ABORTED, result);
+ }
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+ TimeTicks now = TimeTicks::Now();
+ TimeTicks end = now + rel_time;
+ while (true) {
+ mach_timespec_t ts;
+ if (now >= end) {
+ // Return immediately if semaphore was not signalled.
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ } else {
+ ts = (end - now).ToMachTimespec();
+ }
+ kern_return_t result = semaphore_timedwait(native_handle_, ts);
+ if (result == KERN_SUCCESS) return true; // Semaphore was signalled.
+ if (result == KERN_OPERATION_TIMED_OUT) return false; // Timeout.
+ ASSERT_EQ(KERN_ABORTED, result);
+ now = TimeTicks::Now();
+ }
+}
+
+#elif V8_OS_POSIX
+
+Semaphore::Semaphore(int count) {
+ ASSERT(count >= 0);
+ int result = sem_init(&native_handle_, 0, count);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+Semaphore::~Semaphore() {
+ int result = sem_destroy(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void Semaphore::Signal() {
+ int result = sem_post(&native_handle_);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+void Semaphore::Wait() {
+ while (true) {
+ int result = sem_wait(&native_handle_);
+ if (result == 0) return; // Semaphore was signalled.
+ // Signal caused spurious wakeup.
+ ASSERT_EQ(-1, result);
+ ASSERT_EQ(EINTR, errno);
+ }
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+ // Compute the time for end of timeout.
+ const Time time = Time::NowFromSystemTime() + rel_time;
+ const struct timespec ts = time.ToTimespec();
+
+ // Wait for semaphore signalled or timeout.
+ while (true) {
+ int result = sem_timedwait(&native_handle_, &ts);
+ if (result == 0) return true; // Semaphore was signalled.
+#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
+ if (result > 0) {
+ // sem_timedwait in glibc prior to 2.3.4 returns the errno instead of -1.
+ errno = result;
+ result = -1;
+ }
+#endif
+ if (result == -1 && errno == ETIMEDOUT) {
+ // Timed out while waiting for semaphore.
+ return false;
+ }
+ // Signal caused spurious wakeup.
+ ASSERT_EQ(-1, result);
+ ASSERT_EQ(EINTR, errno);
+ }
+}
+
+#elif V8_OS_WIN
+
+Semaphore::Semaphore(int count) {
+ ASSERT(count >= 0);
+ native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
+ ASSERT(native_handle_ != NULL);
+}
+
+
+Semaphore::~Semaphore() {
+ BOOL result = CloseHandle(native_handle_);
+ ASSERT(result);
+ USE(result);
+}
+
+
+void Semaphore::Signal() {
+ LONG dummy;
+ BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy);
+ ASSERT(result);
+ USE(result);
+}
+
+
+void Semaphore::Wait() {
+ DWORD result = WaitForSingleObject(native_handle_, INFINITE);
+ ASSERT(result == WAIT_OBJECT_0);
+ USE(result);
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+ TimeTicks now = TimeTicks::Now();
+ TimeTicks end = now + rel_time;
+ while (true) {
+ int64_t msec = (end - now).InMilliseconds();
+ if (msec >= static_cast<int64_t>(INFINITE)) {
+ DWORD result = WaitForSingleObject(native_handle_, INFINITE - 1);
+ if (result == WAIT_OBJECT_0) {
+ return true;
+ }
+ ASSERT(result == WAIT_TIMEOUT);
+ now = TimeTicks::Now();
+ } else {
+ DWORD result = WaitForSingleObject(
+ native_handle_, (msec < 0) ? 0 : static_cast<DWORD>(msec));
+ if (result == WAIT_TIMEOUT) {
+ return false;
+ }
+ ASSERT(result == WAIT_OBJECT_0);
+ return true;
+ }
+ }
+}
+
+#endif // V8_OS_MACOSX
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/platform/semaphore.h b/chromium/v8/src/platform/semaphore.h
new file mode 100644
index 00000000000..2cfa1421117
--- /dev/null
+++ b/chromium/v8/src/platform/semaphore.h
@@ -0,0 +1,126 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_SEMAPHORE_H_
+#define V8_PLATFORM_SEMAPHORE_H_
+
+#include "lazy-instance.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+#if V8_OS_MACOSX
+#include <mach/semaphore.h> // NOLINT
+#elif V8_OS_POSIX
+#include <semaphore.h> // NOLINT
+#endif
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class TimeDelta;
+
+// ----------------------------------------------------------------------------
+// Semaphore
+//
+// A semaphore object is a synchronization object that maintains a count. The
+// count is decremented each time a thread completes a wait for the semaphore
+// object and incremented each time a thread signals the semaphore. When the
+// count reaches zero, threads waiting for the semaphore blocks until the
+// count becomes non-zero.
+
+class Semaphore V8_FINAL {
+ public:
+ explicit Semaphore(int count);
+ ~Semaphore();
+
+ // Increments the semaphore counter.
+ void Signal();
+
+ // Suspends the calling thread until the semaphore counter is non zero
+ // and then decrements the semaphore counter.
+ void Wait();
+
+ // Suspends the calling thread until the counter is non zero or the timeout
+ // time has passed. If timeout happens the return value is false and the
+ // counter is unchanged. Otherwise the semaphore counter is decremented and
+ // true is returned.
+ bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
+
+#if V8_OS_MACOSX
+ typedef semaphore_t NativeHandle;
+#elif V8_OS_POSIX
+ typedef sem_t NativeHandle;
+#elif V8_OS_WIN
+ typedef HANDLE NativeHandle;
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(Semaphore);
+};
+
+
+// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// // The following semaphore starts at 0.
+// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
+//
+// void my_function() {
+// // Do something with my_semaphore.Pointer().
+// }
+//
+
+template <int N>
+struct CreateSemaphoreTrait {
+ static Semaphore* Create() {
+ return new Semaphore(N);
+ }
+};
+
+template <int N>
+struct LazySemaphore {
+ typedef typename LazyDynamicInstance<
+ Semaphore,
+ CreateSemaphoreTrait<N>,
+ ThreadSafeInitOnceTrait>::type type;
+};
+
+#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_SEMAPHORE_H_
diff --git a/chromium/v8/src/platform/socket.cc b/chromium/v8/src/platform/socket.cc
new file mode 100644
index 00000000000..2fce6f2992d
--- /dev/null
+++ b/chromium/v8/src/platform/socket.cc
@@ -0,0 +1,224 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/socket.h"
+
+#if V8_OS_POSIX
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <netdb.h>
+
+#include <unistd.h>
+#endif
+
+#include <cerrno>
+
+#include "checks.h"
+#include "once.h"
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_WIN
+
+static V8_DECLARE_ONCE(initialize_winsock) = V8_ONCE_INIT;
+
+
+static void InitializeWinsock() {
+ WSADATA wsa_data;
+ int result = WSAStartup(MAKEWORD(1, 0), &wsa_data);
+ CHECK_EQ(0, result);
+}
+
+#endif // V8_OS_WIN
+
+
+Socket::Socket() {
+#if V8_OS_WIN
+ // Be sure to initialize the WinSock DLL first.
+ CallOnce(&initialize_winsock, &InitializeWinsock);
+#endif // V8_OS_WIN
+
+ // Create the native socket handle.
+ native_handle_ = ::socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+}
+
+
+bool Socket::Bind(int port) {
+ ASSERT_GE(port, 0);
+ ASSERT_LT(port, 65536);
+ if (!IsValid()) return false;
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ sin.sin_port = htons(static_cast<uint16_t>(port));
+ int result = ::bind(
+ native_handle_, reinterpret_cast<struct sockaddr*>(&sin), sizeof(sin));
+ return result == 0;
+}
+
+
+bool Socket::Listen(int backlog) {
+ if (!IsValid()) return false;
+ int result = ::listen(native_handle_, backlog);
+ return result == 0;
+}
+
+
+Socket* Socket::Accept() {
+ if (!IsValid()) return NULL;
+ while (true) {
+ NativeHandle native_handle = ::accept(native_handle_, NULL, NULL);
+ if (native_handle == kInvalidNativeHandle) {
+#if V8_OS_POSIX
+ if (errno == EINTR) continue; // Retry after signal.
+#endif
+ return NULL;
+ }
+ return new Socket(native_handle);
+ }
+}
+
+
+bool Socket::Connect(const char* host, const char* port) {
+ ASSERT_NE(NULL, host);
+ ASSERT_NE(NULL, port);
+ if (!IsValid()) return false;
+
+ // Lookup host and port.
+ struct addrinfo* info = NULL;
+ struct addrinfo hint;
+ memset(&hint, 0, sizeof(hint));
+ hint.ai_family = AF_INET;
+ hint.ai_socktype = SOCK_STREAM;
+ hint.ai_protocol = IPPROTO_TCP;
+ int result = ::getaddrinfo(host, port, &hint, &info);
+ if (result != 0) {
+ return false;
+ }
+
+ // Connect to the host on the given port.
+ for (struct addrinfo* ai = info; ai != NULL; ai = ai->ai_next) {
+ // Try to connect using this addr info.
+ while (true) {
+ result = ::connect(
+ native_handle_, ai->ai_addr, static_cast<int>(ai->ai_addrlen));
+ if (result == 0) {
+ freeaddrinfo(info);
+ return true;
+ }
+#if V8_OS_POSIX
+ if (errno == EINTR) continue; // Retry after signal.
+#endif
+ break;
+ }
+ }
+ freeaddrinfo(info);
+ return false;
+}
+
+
+bool Socket::Shutdown() {
+ if (!IsValid()) return false;
+ // Shutdown socket for both read and write.
+#if V8_OS_POSIX
+ int result = ::shutdown(native_handle_, SHUT_RDWR);
+ ::close(native_handle_);
+#elif V8_OS_WIN
+ int result = ::shutdown(native_handle_, SD_BOTH);
+ ::closesocket(native_handle_);
+#endif
+ native_handle_ = kInvalidNativeHandle;
+ return result == 0;
+}
+
+
+int Socket::Send(const char* buffer, int length) {
+ ASSERT(length <= 0 || buffer != NULL);
+ if (!IsValid()) return 0;
+ int offset = 0;
+ while (offset < length) {
+ int result = ::send(native_handle_, buffer + offset, length - offset, 0);
+ if (result == 0) {
+ break;
+ } else if (result > 0) {
+ ASSERT(result <= length - offset);
+ offset += result;
+ } else {
+#if V8_OS_POSIX
+ if (errno == EINTR) continue; // Retry after signal.
+#endif
+ return 0;
+ }
+ }
+ return offset;
+}
+
+
+int Socket::Receive(char* buffer, int length) {
+ if (!IsValid()) return 0;
+ if (length <= 0) return 0;
+ ASSERT_NE(NULL, buffer);
+ while (true) {
+ int result = ::recv(native_handle_, buffer, length, 0);
+ if (result < 0) {
+#if V8_OS_POSIX
+ if (errno == EINTR) continue; // Retry after signal.
+#endif
+ return 0;
+ }
+ return result;
+ }
+}
+
+
+bool Socket::SetReuseAddress(bool reuse_address) {
+ if (!IsValid()) return 0;
+ int v = reuse_address ? 1 : 0;
+ int result = ::setsockopt(native_handle_, SOL_SOCKET, SO_REUSEADDR,
+ reinterpret_cast<char*>(&v), sizeof(v));
+ return result == 0;
+}
+
+
+// static
+int Socket::GetLastError() {
+#if V8_OS_POSIX
+ return errno;
+#elif V8_OS_WIN
+ // Be sure to initialize the WinSock DLL first.
+ CallOnce(&initialize_winsock, &InitializeWinsock);
+
+ // Now we can safely perform WSA calls.
+ return ::WSAGetLastError();
+#endif
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/platform/socket.h b/chromium/v8/src/platform/socket.h
new file mode 100644
index 00000000000..ff8c1de7ce7
--- /dev/null
+++ b/chromium/v8/src/platform/socket.h
@@ -0,0 +1,101 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_SOCKET_H_
+#define V8_PLATFORM_SOCKET_H_
+
+#include "globals.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Socket
+//
+
+class Socket V8_FINAL {
+ public:
+ Socket();
+ ~Socket() { Shutdown(); }
+
+ // Server initialization.
+ bool Bind(int port) V8_WARN_UNUSED_RESULT;
+ bool Listen(int backlog) V8_WARN_UNUSED_RESULT;
+ Socket* Accept() V8_WARN_UNUSED_RESULT;
+
+ // Client initialization.
+ bool Connect(const char* host, const char* port) V8_WARN_UNUSED_RESULT;
+
+ // Shutdown socket for both read and write. This causes blocking Send and
+ // Receive calls to exit. After |Shutdown()| the Socket object cannot be
+ // used for any communication.
+ bool Shutdown();
+
+ // Data Transimission
+ // Return 0 on failure.
+ int Send(const char* buffer, int length) V8_WARN_UNUSED_RESULT;
+ int Receive(char* buffer, int length) V8_WARN_UNUSED_RESULT;
+
+ // Set the value of the SO_REUSEADDR socket option.
+ bool SetReuseAddress(bool reuse_address);
+
+ V8_INLINE bool IsValid() const {
+ return native_handle_ != kInvalidNativeHandle;
+ }
+
+ static int GetLastError();
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef int NativeHandle;
+ static const NativeHandle kInvalidNativeHandle = -1;
+#elif V8_OS_WIN
+ typedef SOCKET NativeHandle;
+ static const NativeHandle kInvalidNativeHandle = INVALID_SOCKET;
+#endif
+
+ NativeHandle& native_handle() {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const {
+ return native_handle_;
+ }
+
+ private:
+ explicit Socket(NativeHandle native_handle) : native_handle_(native_handle) {}
+
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(Socket);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_SOCKET_H_
diff --git a/chromium/v8/src/platform/time.cc b/chromium/v8/src/platform/time.cc
new file mode 100644
index 00000000000..ea6dd2c0bae
--- /dev/null
+++ b/chromium/v8/src/platform/time.cc
@@ -0,0 +1,613 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/time.h"
+
+#if V8_OS_POSIX
+#include <sys/time.h>
+#endif
+#if V8_OS_MACOSX
+#include <mach/mach_time.h>
+#endif
+
+#include <cstring>
+
+#include "checks.h"
+#include "cpu.h"
+#include "platform.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+#if V8_OS_WIN
+// Prototype for GetTickCount64() procedure.
+extern "C" {
+typedef ULONGLONG (WINAPI *GETTICKCOUNT64PROC)(void);
+}
+#endif
+
+namespace v8 {
+namespace internal {
+
+TimeDelta TimeDelta::FromDays(int days) {
+ return TimeDelta(days * Time::kMicrosecondsPerDay);
+}
+
+
+TimeDelta TimeDelta::FromHours(int hours) {
+ return TimeDelta(hours * Time::kMicrosecondsPerHour);
+}
+
+
+TimeDelta TimeDelta::FromMinutes(int minutes) {
+ return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+}
+
+
+TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
+ return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
+}
+
+
+TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
+ return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
+}
+
+
+TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
+ return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
+}
+
+
+int TimeDelta::InDays() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
+}
+
+
+int TimeDelta::InHours() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
+}
+
+
+int TimeDelta::InMinutes() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
+}
+
+
+double TimeDelta::InSecondsF() const {
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
+}
+
+
+int64_t TimeDelta::InSeconds() const {
+ return delta_ / Time::kMicrosecondsPerSecond;
+}
+
+
+double TimeDelta::InMillisecondsF() const {
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InMilliseconds() const {
+ return delta_ / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InNanoseconds() const {
+ return delta_ * Time::kNanosecondsPerMicrosecond;
+}
+
+
+#if V8_OS_MACOSX
+
+TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
+ ASSERT_GE(ts.tv_nsec, 0);
+ ASSERT_LT(ts.tv_nsec,
+ static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
+ return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+
+struct mach_timespec TimeDelta::ToMachTimespec() const {
+ struct mach_timespec ts;
+ ASSERT(delta_ >= 0);
+ ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+ ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
+ Time::kNanosecondsPerMicrosecond;
+ return ts;
+}
+
+#endif // V8_OS_MACOSX
+
+
+#if V8_OS_POSIX
+
+TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
+ ASSERT_GE(ts.tv_nsec, 0);
+ ASSERT_LT(ts.tv_nsec,
+ static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
+ return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+
+struct timespec TimeDelta::ToTimespec() const {
+ struct timespec ts;
+ ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+ ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
+ Time::kNanosecondsPerMicrosecond;
+ return ts;
+}
+
+#endif // V8_OS_POSIX
+
+
+#if V8_OS_WIN
+
+// We implement time using the high-resolution timers so that we can get
+// timeouts which are smaller than 10-15ms. To avoid any drift, we
+// periodically resync the internal clock to the system clock.
+class Clock V8_FINAL {
+ public:
+ Clock() : initial_time_(CurrentWallclockTime()),
+ initial_ticks_(TimeTicks::Now()) {}
+
+ Time Now() {
+ // This must be executed under lock.
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Calculate the time elapsed since we started our timer.
+ TimeDelta elapsed = TimeTicks::Now() - initial_ticks_;
+
+ // Check if we don't need to synchronize with the wallclock yet.
+ if (elapsed.InMicroseconds() <= kMaxMicrosecondsToAvoidDrift) {
+ return initial_time_ + elapsed;
+ }
+
+ // Resynchronize with the wallclock.
+ initial_ticks_ = TimeTicks::Now();
+ initial_time_ = CurrentWallclockTime();
+ return initial_time_;
+ }
+
+ Time NowFromSystemTime() {
+ // This must be executed under lock.
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Resynchronize with the wallclock.
+ initial_ticks_ = TimeTicks::Now();
+ initial_time_ = CurrentWallclockTime();
+ return initial_time_;
+ }
+
+ private:
+ // Time between resampling the un-granular clock for this API (1 minute).
+ static const int64_t kMaxMicrosecondsToAvoidDrift =
+ Time::kMicrosecondsPerMinute;
+
+ static Time CurrentWallclockTime() {
+ FILETIME ft;
+ ::GetSystemTimeAsFileTime(&ft);
+ return Time::FromFiletime(ft);
+ }
+
+ TimeTicks initial_ticks_;
+ Time initial_time_;
+ Mutex mutex_;
+};
+
+
+static LazyDynamicInstance<Clock,
+ DefaultCreateTrait<Clock>,
+ ThreadSafeInitOnceTrait>::type clock = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+Time Time::Now() {
+ return clock.Pointer()->Now();
+}
+
+
+Time Time::NowFromSystemTime() {
+ return clock.Pointer()->NowFromSystemTime();
+}
+
+
+// Time between windows epoch and standard epoch.
+static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
+
+
+Time Time::FromFiletime(FILETIME ft) {
+ if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
+ return Time();
+ }
+ if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
+ ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
+ return Max();
+ }
+ int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
+ (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
+ return Time(us - kTimeToEpochInMicroseconds);
+}
+
+
+FILETIME Time::ToFiletime() const {
+ ASSERT(us_ >= 0);
+ FILETIME ft;
+ if (IsNull()) {
+ ft.dwLowDateTime = 0;
+ ft.dwHighDateTime = 0;
+ return ft;
+ }
+ if (IsMax()) {
+ ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
+ ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
+ return ft;
+ }
+ uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
+ ft.dwLowDateTime = static_cast<DWORD>(us);
+ ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
+ return ft;
+}
+
+#elif V8_OS_POSIX
+
+Time Time::Now() {
+ struct timeval tv;
+ int result = gettimeofday(&tv, NULL);
+ ASSERT_EQ(0, result);
+ USE(result);
+ return FromTimeval(tv);
+}
+
+
+Time Time::NowFromSystemTime() {
+ return Now();
+}
+
+
+Time Time::FromTimespec(struct timespec ts) {
+ ASSERT(ts.tv_nsec >= 0);
+ ASSERT(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT
+ if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
+ return Time();
+ }
+ if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT
+ ts.tv_sec == std::numeric_limits<time_t>::max()) {
+ return Max();
+ }
+ return Time(ts.tv_sec * kMicrosecondsPerSecond +
+ ts.tv_nsec / kNanosecondsPerMicrosecond);
+}
+
+
+struct timespec Time::ToTimespec() const {
+ struct timespec ts;
+ if (IsNull()) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ return ts;
+ }
+ if (IsMax()) {
+ ts.tv_sec = std::numeric_limits<time_t>::max();
+ ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
+ return ts;
+ }
+ ts.tv_sec = us_ / kMicrosecondsPerSecond;
+ ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
+ return ts;
+}
+
+
+Time Time::FromTimeval(struct timeval tv) {
+ ASSERT(tv.tv_usec >= 0);
+ ASSERT(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
+ if (tv.tv_usec == 0 && tv.tv_sec == 0) {
+ return Time();
+ }
+ if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
+ tv.tv_sec == std::numeric_limits<time_t>::max()) {
+ return Max();
+ }
+ return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
+}
+
+
+struct timeval Time::ToTimeval() const {
+ struct timeval tv;
+ if (IsNull()) {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ return tv;
+ }
+ if (IsMax()) {
+ tv.tv_sec = std::numeric_limits<time_t>::max();
+ tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
+ return tv;
+ }
+ tv.tv_sec = us_ / kMicrosecondsPerSecond;
+ tv.tv_usec = us_ % kMicrosecondsPerSecond;
+ return tv;
+}
+
+#endif // V8_OS_WIN
+
+
+Time Time::FromJsTime(double ms_since_epoch) {
+ // The epoch is a valid time, so this constructor doesn't interpret
+ // 0 as the null time.
+ if (ms_since_epoch == std::numeric_limits<double>::max()) {
+ return Max();
+ }
+ return Time(
+ static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
+}
+
+
+double Time::ToJsTime() const {
+ if (IsNull()) {
+ // Preserve 0 so the invalid result doesn't depend on the platform.
+ return 0;
+ }
+ if (IsMax()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<double>::max();
+ }
+ return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
+}
+
+
+#if V8_OS_WIN
+
+class TickClock {
+ public:
+ virtual ~TickClock() {}
+ virtual int64_t Now() = 0;
+};
+
+
+// Overview of time counters:
+// (1) CPU cycle counter. (Retrieved via RDTSC)
+// The CPU counter provides the highest resolution time stamp and is the least
+// expensive to retrieve. However, the CPU counter is unreliable and should not
+// be used in production. Its biggest issue is that it is per processor and it
+// is not synchronized between processors. Also, on some computers, the counters
+// will change frequency due to thermal and power changes, and stop in some
+// states.
+//
+// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
+// resolution (100 nanoseconds) time stamp but is comparatively more expensive
+// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
+// (with some help from ACPI).
+// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
+// in the worst case, it gets the counter from the rollover interrupt on the
+// programmable interrupt timer. In best cases, the HAL may conclude that the
+// RDTSC counter runs at a constant frequency, then it uses that instead. On
+// multiprocessor machines, it will try to verify the values returned from
+// RDTSC on each processor are consistent with each other, and apply a handful
+// of workarounds for known buggy hardware. In other words, QPC is supposed to
+// give consistent result on a multiprocessor computer, but it is unreliable in
+// reality due to bugs in BIOS or HAL on some, especially old computers.
+// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
+// it should be used with caution.
+//
+// (3) System time. The system time provides a low-resolution (typically 10ms
+// to 55 milliseconds) time stamp but is comparatively less expensive to
+// retrieve and more reliable.
+class HighResolutionTickClock V8_FINAL : public TickClock {
+ public:
+ explicit HighResolutionTickClock(int64_t ticks_per_second)
+ : ticks_per_second_(ticks_per_second) {
+ ASSERT_LT(0, ticks_per_second);
+ }
+ virtual ~HighResolutionTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ LARGE_INTEGER now;
+ BOOL result = QueryPerformanceCounter(&now);
+ ASSERT(result);
+ USE(result);
+
+ // Intentionally calculate microseconds in a round about manner to avoid
+ // overflow and precision issues. Think twice before simplifying!
+ int64_t whole_seconds = now.QuadPart / ticks_per_second_;
+ int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
+ int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
+ ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
+
+ // Make sure we never return 0 here, so that TimeTicks::HighResNow()
+ // will never return 0.
+ return ticks + 1;
+ }
+
+ private:
+ int64_t ticks_per_second_;
+};
+
+
+// The GetTickCount64() API is what we actually want for the regular tick
+// clock, but this is only available starting with Windows Vista.
+class WindowsVistaTickClock V8_FINAL : public TickClock {
+ public:
+ explicit WindowsVistaTickClock(GETTICKCOUNT64PROC func) : func_(func) {
+ ASSERT(func_ != NULL);
+ }
+ virtual ~WindowsVistaTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ // Query the current ticks (in ms).
+ ULONGLONG tick_count_ms = (*func_)();
+
+ // Convert to microseconds (make sure to never return 0 here).
+ return (tick_count_ms * Time::kMicrosecondsPerMillisecond) + 1;
+ }
+
+ private:
+ GETTICKCOUNT64PROC func_;
+};
+
+
+class RolloverProtectedTickClock V8_FINAL : public TickClock {
+ public:
+ // We initialize rollover_ms_ to 1 to ensure that we will never
+ // return 0 from TimeTicks::HighResNow() and TimeTicks::Now() below.
+ RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
+ virtual ~RolloverProtectedTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ LockGuard<Mutex> lock_guard(&mutex_);
+ // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
+ // every ~49.7 days. We try to track rollover ourselves, which works if
+ // TimeTicks::Now() is called at least every 49 days.
+ // Note that we do not use GetTickCount() here, since timeGetTime() gives
+ // more predictable delta values, as described here:
+ // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
+ DWORD now = timeGetTime();
+ if (now < last_seen_now_) {
+ rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days.
+ }
+ last_seen_now_ = now;
+ return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
+ }
+
+ private:
+ Mutex mutex_;
+ DWORD last_seen_now_;
+ int64_t rollover_ms_;
+};
+
+
+struct CreateTickClockTrait {
+ static TickClock* Create() {
+ // Try to load GetTickCount64() from kernel32.dll (available since Vista).
+ HMODULE kernel32 = ::GetModuleHandleA("kernel32.dll");
+ ASSERT(kernel32 != NULL);
+ FARPROC proc = ::GetProcAddress(kernel32, "GetTickCount64");
+ if (proc != NULL) {
+ return new WindowsVistaTickClock(
+ reinterpret_cast<GETTICKCOUNT64PROC>(proc));
+ }
+
+ // Fallback to the rollover protected tick clock.
+ return new RolloverProtectedTickClock;
+ }
+};
+
+
+static LazyDynamicInstance<TickClock,
+ CreateTickClockTrait,
+ ThreadSafeInitOnceTrait>::type tick_clock =
+ LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+struct CreateHighResTickClockTrait {
+ static TickClock* Create() {
+ // Check if the installed hardware supports a high-resolution performance
+ // counter, and if not fallback to the low-resolution tick clock.
+ LARGE_INTEGER ticks_per_second;
+ if (!QueryPerformanceFrequency(&ticks_per_second)) {
+ return tick_clock.Pointer();
+ }
+
+ // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
+ // is unreliable, fallback to the low-resolution tick clock.
+ CPU cpu;
+ if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
+ return tick_clock.Pointer();
+ }
+
+ return new HighResolutionTickClock(ticks_per_second.QuadPart);
+ }
+};
+
+
+static LazyDynamicInstance<TickClock,
+ CreateHighResTickClockTrait,
+ ThreadSafeInitOnceTrait>::type high_res_tick_clock =
+ LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+TimeTicks TimeTicks::Now() {
+ // Make sure we never return 0 here.
+ TimeTicks ticks(tick_clock.Pointer()->Now());
+ ASSERT(!ticks.IsNull());
+ return ticks;
+}
+
+
+TimeTicks TimeTicks::HighResNow() {
+ // Make sure we never return 0 here.
+ TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
+ ASSERT(!ticks.IsNull());
+ return ticks;
+}
+
+#else // V8_OS_WIN
+
+TimeTicks TimeTicks::Now() {
+ return HighResNow();
+}
+
+
+TimeTicks TimeTicks::HighResNow() {
+ int64_t ticks;
+#if V8_OS_MACOSX
+ static struct mach_timebase_info info;
+ if (info.denom == 0) {
+ kern_return_t result = mach_timebase_info(&info);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+ }
+ ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
+ info.numer / info.denom);
+#elif V8_OS_SOLARIS
+ ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
+#elif V8_LIBRT_NOT_AVAILABLE
+ // TODO(bmeurer): This is a temporary hack to support cross-compiling
+ // Chrome for Android in AOSP. Remove this once AOSP is fixed, also
+ // cleanup the tools/gyp/v8.gyp file.
+ struct timeval tv;
+ int result = gettimeofday(&tv, NULL);
+ ASSERT_EQ(0, result);
+ USE(result);
+ ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec);
+#elif V8_OS_POSIX
+ struct timespec ts;
+ int result = clock_gettime(CLOCK_MONOTONIC, &ts);
+ ASSERT_EQ(0, result);
+ USE(result);
+ ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+#endif // V8_OS_MACOSX
+ // Make sure we never return 0 here.
+ return TimeTicks(ticks + 1);
+}
+
+#endif // V8_OS_WIN
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/platform/time.h b/chromium/v8/src/platform/time.h
new file mode 100644
index 00000000000..2ce6cdd3e99
--- /dev/null
+++ b/chromium/v8/src/platform/time.h
@@ -0,0 +1,413 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_TIME_H_
+#define V8_PLATFORM_TIME_H_
+
+#include <ctime>
+#include <limits>
+
+#include "allocation.h"
+
+// Forward declarations.
+extern "C" {
+struct _FILETIME;
+struct mach_timespec;
+struct timespec;
+struct timeval;
+}
+
+namespace v8 {
+namespace internal {
+
+class Time;
+class TimeTicks;
+
+// -----------------------------------------------------------------------------
+// TimeDelta
+//
+// This class represents a duration of time, internally represented in
+// microseonds.
+
+class TimeDelta V8_FINAL BASE_EMBEDDED {
+ public:
+ TimeDelta() : delta_(0) {}
+
+ // Converts units of time to TimeDeltas.
+ static TimeDelta FromDays(int days);
+ static TimeDelta FromHours(int hours);
+ static TimeDelta FromMinutes(int minutes);
+ static TimeDelta FromSeconds(int64_t seconds);
+ static TimeDelta FromMilliseconds(int64_t milliseconds);
+ static TimeDelta FromMicroseconds(int64_t microseconds) {
+ return TimeDelta(microseconds);
+ }
+ static TimeDelta FromNanoseconds(int64_t nanoseconds);
+
+ // Returns the time delta in some unit. The F versions return a floating
+ // point value, the "regular" versions return a rounded-down value.
+ //
+ // InMillisecondsRoundedUp() instead returns an integer that is rounded up
+ // to the next full millisecond.
+ int InDays() const;
+ int InHours() const;
+ int InMinutes() const;
+ double InSecondsF() const;
+ int64_t InSeconds() const;
+ double InMillisecondsF() const;
+ int64_t InMilliseconds() const;
+ int64_t InMillisecondsRoundedUp() const;
+ int64_t InMicroseconds() const { return delta_; }
+ int64_t InNanoseconds() const;
+
+ // Converts to/from Mach time specs.
+ static TimeDelta FromMachTimespec(struct mach_timespec ts);
+ struct mach_timespec ToMachTimespec() const;
+
+ // Converts to/from POSIX time specs.
+ static TimeDelta FromTimespec(struct timespec ts);
+ struct timespec ToTimespec() const;
+
+ TimeDelta& operator=(const TimeDelta& other) {
+ delta_ = other.delta_;
+ return *this;
+ }
+
+ // Computations with other deltas.
+ TimeDelta operator+(const TimeDelta& other) const {
+ return TimeDelta(delta_ + other.delta_);
+ }
+ TimeDelta operator-(const TimeDelta& other) const {
+ return TimeDelta(delta_ - other.delta_);
+ }
+
+ TimeDelta& operator+=(const TimeDelta& other) {
+ delta_ += other.delta_;
+ return *this;
+ }
+ TimeDelta& operator-=(const TimeDelta& other) {
+ delta_ -= other.delta_;
+ return *this;
+ }
+ TimeDelta operator-() const {
+ return TimeDelta(-delta_);
+ }
+
+ double TimesOf(const TimeDelta& other) const {
+ return static_cast<double>(delta_) / static_cast<double>(other.delta_);
+ }
+ double PercentOf(const TimeDelta& other) const {
+ return TimesOf(other) * 100.0;
+ }
+
+ // Computations with ints, note that we only allow multiplicative operations
+ // with ints, and additive operations with other deltas.
+ TimeDelta operator*(int64_t a) const {
+ return TimeDelta(delta_ * a);
+ }
+ TimeDelta operator/(int64_t a) const {
+ return TimeDelta(delta_ / a);
+ }
+ TimeDelta& operator*=(int64_t a) {
+ delta_ *= a;
+ return *this;
+ }
+ TimeDelta& operator/=(int64_t a) {
+ delta_ /= a;
+ return *this;
+ }
+ int64_t operator/(const TimeDelta& other) const {
+ return delta_ / other.delta_;
+ }
+
+ // Comparison operators.
+ bool operator==(const TimeDelta& other) const {
+ return delta_ == other.delta_;
+ }
+ bool operator!=(const TimeDelta& other) const {
+ return delta_ != other.delta_;
+ }
+ bool operator<(const TimeDelta& other) const {
+ return delta_ < other.delta_;
+ }
+ bool operator<=(const TimeDelta& other) const {
+ return delta_ <= other.delta_;
+ }
+ bool operator>(const TimeDelta& other) const {
+ return delta_ > other.delta_;
+ }
+ bool operator>=(const TimeDelta& other) const {
+ return delta_ >= other.delta_;
+ }
+
+ private:
+ // Constructs a delta given the duration in microseconds. This is private
+ // to avoid confusion by callers with an integer constructor. Use
+ // FromSeconds, FromMilliseconds, etc. instead.
+ explicit TimeDelta(int64_t delta) : delta_(delta) {}
+
+ // Delta in microseconds.
+ int64_t delta_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Time
+//
+// This class represents an absolute point in time, internally represented as
+// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
+
+class Time V8_FINAL BASE_EMBEDDED {
+ public:
+ static const int64_t kMillisecondsPerSecond = 1000;
+ static const int64_t kMicrosecondsPerMillisecond = 1000;
+ static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond *
+ kMillisecondsPerSecond;
+ static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+ static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+ static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24;
+ static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+ static const int64_t kNanosecondsPerMicrosecond = 1000;
+ static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond *
+ kMicrosecondsPerSecond;
+
+ // Contains the NULL time. Use Time::Now() to get the current time.
+ Time() : us_(0) {}
+
+ // Returns true if the time object has not been initialized.
+ bool IsNull() const { return us_ == 0; }
+
+ // Returns true if the time object is the maximum time.
+ bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
+
+ // Returns the current time. Watch out, the system might adjust its clock
+ // in which case time will actually go backwards. We don't guarantee that
+ // times are increasing, or that two calls to Now() won't be the same.
+ static Time Now();
+
+ // Returns the current time. Same as Now() except that this function always
+ // uses system time so that there are no discrepancies between the returned
+ // time and system time even on virtual environments including our test bot.
+ // For timing sensitive unittests, this function should be used.
+ static Time NowFromSystemTime();
+
+ // Returns the time for epoch in Unix-like system (Jan 1, 1970).
+ static Time UnixEpoch() { return Time(0); }
+
+ // Returns the maximum time, which should be greater than any reasonable time
+ // with which we might compare it.
+ static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
+
+ // Converts to/from internal values. The meaning of the "internal value" is
+ // completely up to the implementation, so it should be treated as opaque.
+ static Time FromInternalValue(int64_t value) {
+ return Time(value);
+ }
+ int64_t ToInternalValue() const {
+ return us_;
+ }
+
+ // Converts to/from POSIX time specs.
+ static Time FromTimespec(struct timespec ts);
+ struct timespec ToTimespec() const;
+
+ // Converts to/from POSIX time values.
+ static Time FromTimeval(struct timeval tv);
+ struct timeval ToTimeval() const;
+
+ // Converts to/from Windows file times.
+ static Time FromFiletime(struct _FILETIME ft);
+ struct _FILETIME ToFiletime() const;
+
+ // Converts to/from the Javascript convention for times, a number of
+ // milliseconds since the epoch:
+ static Time FromJsTime(double ms_since_epoch);
+ double ToJsTime() const;
+
+ Time& operator=(const Time& other) {
+ us_ = other.us_;
+ return *this;
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(const Time& other) const {
+ return TimeDelta::FromMicroseconds(us_ - other.us_);
+ }
+
+ // Modify by some time delta.
+ Time& operator+=(const TimeDelta& delta) {
+ us_ += delta.InMicroseconds();
+ return *this;
+ }
+ Time& operator-=(const TimeDelta& delta) {
+ us_ -= delta.InMicroseconds();
+ return *this;
+ }
+
+ // Return a new time modified by some delta.
+ Time operator+(const TimeDelta& delta) const {
+ return Time(us_ + delta.InMicroseconds());
+ }
+ Time operator-(const TimeDelta& delta) const {
+ return Time(us_ - delta.InMicroseconds());
+ }
+
+ // Comparison operators
+ bool operator==(const Time& other) const {
+ return us_ == other.us_;
+ }
+ bool operator!=(const Time& other) const {
+ return us_ != other.us_;
+ }
+ bool operator<(const Time& other) const {
+ return us_ < other.us_;
+ }
+ bool operator<=(const Time& other) const {
+ return us_ <= other.us_;
+ }
+ bool operator>(const Time& other) const {
+ return us_ > other.us_;
+ }
+ bool operator>=(const Time& other) const {
+ return us_ >= other.us_;
+ }
+
+ private:
+ explicit Time(int64_t us) : us_(us) {}
+
+ // Time in microseconds in UTC.
+ int64_t us_;
+};
+
+inline Time operator+(const TimeDelta& delta, const Time& time) {
+ return time + delta;
+}
+
+
+// -----------------------------------------------------------------------------
+// TimeTicks
+//
+// This class represents an abstract time that is most of the time incrementing
+// for use in measuring time durations. It is internally represented in
+// microseconds. It can not be converted to a human-readable time, but is
+// guaranteed not to decrease (if the user changes the computer clock,
+// Time::Now() may actually decrease or jump). But note that TimeTicks may
+// "stand still", for example if the computer suspended.
+
+class TimeTicks V8_FINAL BASE_EMBEDDED {
+ public:
+ TimeTicks() : ticks_(0) {}
+
+ // Platform-dependent tick count representing "right now."
+ // The resolution of this clock is ~1-15ms. Resolution varies depending
+ // on hardware/operating system configuration.
+ // This method never returns a null TimeTicks.
+ static TimeTicks Now();
+
+ // Returns a platform-dependent high-resolution tick count. Implementation
+ // is hardware dependent and may or may not return sub-millisecond
+ // resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
+ // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
+ // This method never returns a null TimeTicks.
+ static TimeTicks HighResNow();
+
+ // Returns true if this object has not been initialized.
+ bool IsNull() const { return ticks_ == 0; }
+
+ // Converts to/from internal values. The meaning of the "internal value" is
+ // completely up to the implementation, so it should be treated as opaque.
+ static TimeTicks FromInternalValue(int64_t value) {
+ return TimeTicks(value);
+ }
+ int64_t ToInternalValue() const {
+ return ticks_;
+ }
+
+ TimeTicks& operator=(const TimeTicks other) {
+ ticks_ = other.ticks_;
+ return *this;
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(const TimeTicks other) const {
+ return TimeDelta::FromMicroseconds(ticks_ - other.ticks_);
+ }
+
+ // Modify by some time delta.
+ TimeTicks& operator+=(const TimeDelta& delta) {
+ ticks_ += delta.InMicroseconds();
+ return *this;
+ }
+ TimeTicks& operator-=(const TimeDelta& delta) {
+ ticks_ -= delta.InMicroseconds();
+ return *this;
+ }
+
+ // Return a new TimeTicks modified by some delta.
+ TimeTicks operator+(const TimeDelta& delta) const {
+ return TimeTicks(ticks_ + delta.InMicroseconds());
+ }
+ TimeTicks operator-(const TimeDelta& delta) const {
+ return TimeTicks(ticks_ - delta.InMicroseconds());
+ }
+
+ // Comparison operators
+ bool operator==(const TimeTicks& other) const {
+ return ticks_ == other.ticks_;
+ }
+ bool operator!=(const TimeTicks& other) const {
+ return ticks_ != other.ticks_;
+ }
+ bool operator<(const TimeTicks& other) const {
+ return ticks_ < other.ticks_;
+ }
+ bool operator<=(const TimeTicks& other) const {
+ return ticks_ <= other.ticks_;
+ }
+ bool operator>(const TimeTicks& other) const {
+ return ticks_ > other.ticks_;
+ }
+ bool operator>=(const TimeTicks& other) const {
+ return ticks_ >= other.ticks_;
+ }
+
+ private:
+ // Please use Now() to create a new object. This is for internal use
+ // and testing. Ticks is in microseconds.
+ explicit TimeTicks(int64_t ticks) : ticks_(ticks) {}
+
+ // Tick count in microseconds.
+ int64_t ticks_;
+};
+
+inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
+ return ticks + delta;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_TIME_H_
diff --git a/chromium/v8/src/preparser.h b/chromium/v8/src/preparser.h
index faddecc5622..9358d6bd189 100644
--- a/chromium/v8/src/preparser.h
+++ b/chromium/v8/src/preparser.h
@@ -104,11 +104,6 @@ class DuplicateFinder {
};
-#ifdef WIN32
-#undef Yield
-#endif
-
-
class PreParser {
public:
enum PreParseResult {
diff --git a/chromium/v8/src/prettyprinter.cc b/chromium/v8/src/prettyprinter.cc
index 1824efa7f59..b1bac4cd4a7 100644
--- a/chromium/v8/src/prettyprinter.cc
+++ b/chromium/v8/src/prettyprinter.cc
@@ -38,11 +38,11 @@ namespace internal {
#ifdef DEBUG
-PrettyPrinter::PrettyPrinter() {
+PrettyPrinter::PrettyPrinter(Isolate* isolate) {
output_ = NULL;
size_ = 0;
pos_ = 0;
- InitializeAstVisitor();
+ InitializeAstVisitor(isolate);
}
@@ -480,8 +480,8 @@ const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
}
-void PrettyPrinter::PrintOut(AstNode* node) {
- PrettyPrinter printer;
+void PrettyPrinter::PrintOut(Isolate* isolate, AstNode* node) {
+ PrettyPrinter printer(isolate);
PrintF("%s", printer.Print(node));
}
@@ -658,7 +658,7 @@ class IndentedScope BASE_EMBEDDED {
//-----------------------------------------------------------------------------
-AstPrinter::AstPrinter() : indent_(0) {
+AstPrinter::AstPrinter(Isolate* isolate) : PrettyPrinter(isolate), indent_(0) {
}
diff --git a/chromium/v8/src/prettyprinter.h b/chromium/v8/src/prettyprinter.h
index 6657ecd1458..b7ff2af5fa1 100644
--- a/chromium/v8/src/prettyprinter.h
+++ b/chromium/v8/src/prettyprinter.h
@@ -38,7 +38,7 @@ namespace internal {
class PrettyPrinter: public AstVisitor {
public:
- PrettyPrinter();
+ explicit PrettyPrinter(Isolate* isolate);
virtual ~PrettyPrinter();
// The following routines print a node into a string.
@@ -50,7 +50,7 @@ class PrettyPrinter: public AstVisitor {
void Print(const char* format, ...);
// Print a node to stdout.
- static void PrintOut(AstNode* node);
+ static void PrintOut(Isolate* isolate, AstNode* node);
// Individual nodes
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
@@ -82,7 +82,7 @@ class PrettyPrinter: public AstVisitor {
// Prints the AST structure
class AstPrinter: public PrettyPrinter {
public:
- AstPrinter();
+ explicit AstPrinter(Isolate* isolate);
virtual ~AstPrinter();
const char* PrintProgram(FunctionLiteral* program);
diff --git a/chromium/v8/src/profile-generator-inl.h b/chromium/v8/src/profile-generator-inl.h
index d92085ac32d..f2feb73fc91 100644
--- a/chromium/v8/src/profile-generator-inl.h
+++ b/chromium/v8/src/profile-generator-inl.h
@@ -56,8 +56,8 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
line_number_(line_number),
shared_id_(0),
script_id_(v8::Script::kNoScriptId),
- no_frame_ranges_(NULL) {
-}
+ no_frame_ranges_(NULL),
+ bailout_reason_(kEmptyBailoutReason) { }
bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
@@ -73,11 +73,9 @@ bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
: tree_(tree),
entry_(entry),
- total_ticks_(0),
self_ticks_(0),
children_(CodeEntriesMatch),
- id_(tree->next_node_id()) {
-}
+ id_(tree->next_node_id()) { }
CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
@@ -92,6 +90,8 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
case OTHER:
case EXTERNAL:
return program_entry_;
+ case IDLE:
+ return idle_entry_;
default: return NULL;
}
}
diff --git a/chromium/v8/src/profile-generator.cc b/chromium/v8/src/profile-generator.cc
index e772a546471..38c1f785d9c 100644
--- a/chromium/v8/src/profile-generator.cc
+++ b/chromium/v8/src/profile-generator.cc
@@ -41,8 +41,8 @@ namespace v8 {
namespace internal {
-StringsStorage::StringsStorage()
- : names_(StringsMatch) {
+StringsStorage::StringsStorage(Heap* heap)
+ : hash_seed_(heap->HashSeed()), names_(StringsMatch) {
}
@@ -61,7 +61,7 @@ const char* StringsStorage::GetCopy(const char* src) {
OS::StrNCpy(dst, src, len);
dst[len] = '\0';
uint32_t hash =
- StringHasher::HashSequentialString(dst.start(), len, HEAP->HashSeed());
+ StringHasher::HashSequentialString(dst.start(), len, hash_seed_);
return AddOrDisposeString(dst.start(), hash);
}
@@ -95,7 +95,7 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
return format;
}
uint32_t hash = StringHasher::HashSequentialString(
- str.start(), len, HEAP->HashSeed());
+ str.start(), len, hash_seed_);
return AddOrDisposeString(str.start(), hash);
}
@@ -133,6 +133,7 @@ size_t StringsStorage::GetUsedMemorySize() const {
const char* const CodeEntry::kEmptyNamePrefix = "";
const char* const CodeEntry::kEmptyResourceName = "";
+const char* const CodeEntry::kEmptyBailoutReason = "";
CodeEntry::~CodeEntry() {
@@ -209,24 +210,15 @@ ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
}
-double ProfileNode::GetSelfMillis() const {
- return tree_->TicksToMillis(self_ticks_);
-}
-
-
-double ProfileNode::GetTotalMillis() const {
- return tree_->TicksToMillis(total_ticks_);
-}
-
-
void ProfileNode::Print(int indent) {
- OS::Print("%5u %5u %*c %s%s #%d %d",
- total_ticks_, self_ticks_,
+ OS::Print("%5u %*c %s%s %d #%d %s",
+ self_ticks_,
indent, ' ',
entry_->name_prefix(),
entry_->name(),
entry_->script_id(),
- id());
+ id(),
+ entry_->bailout_reason());
if (entry_->resource_name()[0] != '\0')
OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
OS::Print("\n");
@@ -298,11 +290,6 @@ struct NodesPair {
};
-void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
- ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0;
-}
-
-
class Position {
public:
explicit Position(ProfileNode* node)
@@ -345,39 +332,12 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
-class CalculateTotalTicksCallback {
- public:
- void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
-
- void AfterAllChildrenTraversed(ProfileNode* node) {
- node->IncreaseTotalTicks(node->self_ticks());
- }
-
- void AfterChildTraversed(ProfileNode* parent, ProfileNode* child) {
- parent->IncreaseTotalTicks(child->total_ticks());
- }
-};
-
-
-void ProfileTree::CalculateTotalTicks() {
- CalculateTotalTicksCallback cb;
- TraverseDepthFirst(&cb);
-}
-
-
-void ProfileTree::ShortPrint() {
- OS::Print("root: %u %u %.2fms %.2fms\n",
- root_->total_ticks(), root_->self_ticks(),
- root_->GetTotalMillis(), root_->GetSelfMillis());
-}
-
-
CpuProfile::CpuProfile(const char* title, unsigned uid, bool record_samples)
: title_(title),
uid_(uid),
record_samples_(record_samples),
- start_time_us_(OS::Ticks()),
- end_time_us_(0) {
+ start_time_(Time::NowFromSystemTime()) {
+ timer_.Start();
}
@@ -388,20 +348,7 @@ void CpuProfile::AddPath(const Vector<CodeEntry*>& path) {
void CpuProfile::CalculateTotalTicksAndSamplingRate() {
- end_time_us_ = OS::Ticks();
- top_down_.CalculateTotalTicks();
-
- double duration_ms = (end_time_us_ - start_time_us_) / 1000.;
- if (duration_ms < 1) duration_ms = 1;
- unsigned ticks = top_down_.root()->total_ticks();
- double rate = ticks / duration_ms;
- top_down_.SetTickRatePerMs(rate);
-}
-
-
-void CpuProfile::ShortPrint() {
- OS::Print("top down ");
- top_down_.ShortPrint();
+ end_time_ = start_time_ + timer_.Elapsed();
}
@@ -496,8 +443,9 @@ void CodeMap::Print() {
}
-CpuProfilesCollection::CpuProfilesCollection()
- : current_profiles_semaphore_(OS::CreateSemaphore(1)) {
+CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
+ : function_and_resource_names_(heap),
+ current_profiles_semaphore_(1) {
}
@@ -512,7 +460,6 @@ static void DeleteCpuProfile(CpuProfile** profile_ptr) {
CpuProfilesCollection::~CpuProfilesCollection() {
- delete current_profiles_semaphore_;
finished_profiles_.Iterate(DeleteCpuProfile);
current_profiles_.Iterate(DeleteCpuProfile);
code_entries_.Iterate(DeleteCodeEntry);
@@ -522,20 +469,20 @@ CpuProfilesCollection::~CpuProfilesCollection() {
bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid,
bool record_samples) {
ASSERT(uid > 0);
- current_profiles_semaphore_->Wait();
+ current_profiles_semaphore_.Wait();
if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
- current_profiles_semaphore_->Signal();
+ current_profiles_semaphore_.Signal();
return false;
}
for (int i = 0; i < current_profiles_.length(); ++i) {
if (strcmp(current_profiles_[i]->title(), title) == 0) {
// Ignore attempts to start profile with the same title.
- current_profiles_semaphore_->Signal();
+ current_profiles_semaphore_.Signal();
return false;
}
}
current_profiles_.Add(new CpuProfile(title, uid, record_samples));
- current_profiles_semaphore_->Signal();
+ current_profiles_semaphore_.Signal();
return true;
}
@@ -543,14 +490,14 @@ bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid,
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
const int title_len = StrLength(title);
CpuProfile* profile = NULL;
- current_profiles_semaphore_->Wait();
+ current_profiles_semaphore_.Wait();
for (int i = current_profiles_.length() - 1; i >= 0; --i) {
if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
profile = current_profiles_.Remove(i);
break;
}
}
- current_profiles_semaphore_->Signal();
+ current_profiles_semaphore_.Signal();
if (profile == NULL) return NULL;
profile->CalculateTotalTicksAndSamplingRate();
@@ -586,11 +533,11 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
- current_profiles_semaphore_->Wait();
+ current_profiles_semaphore_.Wait();
for (int i = 0; i < current_profiles_.length(); ++i) {
current_profiles_[i]->AddPath(path);
}
- current_profiles_semaphore_->Signal();
+ current_profiles_semaphore_.Signal();
}
@@ -614,6 +561,8 @@ const char* const ProfileGenerator::kAnonymousFunctionName =
"(anonymous function)";
const char* const ProfileGenerator::kProgramEntryName =
"(program)";
+const char* const ProfileGenerator::kIdleEntryName =
+ "(idle)";
const char* const ProfileGenerator::kGarbageCollectorEntryName =
"(garbage collector)";
const char* const ProfileGenerator::kUnresolvedFunctionName =
@@ -624,6 +573,8 @@ ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
: profiles_(profiles),
program_entry_(
profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
+ idle_entry_(
+ profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
gc_entry_(
profiles->NewCodeEntry(Logger::BUILTIN_TAG,
kGarbageCollectorEntryName)),
diff --git a/chromium/v8/src/profile-generator.h b/chromium/v8/src/profile-generator.h
index 0cc397ed9bf..0a4502cc1b3 100644
--- a/chromium/v8/src/profile-generator.h
+++ b/chromium/v8/src/profile-generator.h
@@ -41,7 +41,7 @@ struct OffsetRange;
// forever, even if they disappear from JS heap or external storage.
class StringsStorage {
public:
- StringsStorage();
+ explicit StringsStorage(Heap* heap);
~StringsStorage();
const char* GetCopy(const char* src);
@@ -63,6 +63,7 @@ class StringsStorage {
const char* AddOrDisposeString(char* str, uint32_t hash);
// Mapping of strings by String::Hash to const char* strings.
+ uint32_t hash_seed_;
HashMap names_;
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
@@ -88,6 +89,10 @@ class CodeEntry {
INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
INLINE(int script_id() const) { return script_id_; }
INLINE(void set_script_id(int script_id)) { script_id_ = script_id; }
+ INLINE(void set_bailout_reason(const char* bailout_reason)) {
+ bailout_reason_ = bailout_reason;
+ }
+ INLINE(const char* bailout_reason() const) { return bailout_reason_; }
INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
@@ -105,6 +110,7 @@ class CodeEntry {
static const char* const kEmptyNamePrefix;
static const char* const kEmptyResourceName;
+ static const char* const kEmptyBailoutReason;
private:
Logger::LogEventsAndTags tag_ : 8;
@@ -116,6 +122,7 @@ class CodeEntry {
int shared_id_;
int script_id_;
List<OffsetRange>* no_frame_ranges_;
+ const char* bailout_reason_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
@@ -131,14 +138,10 @@ class ProfileNode {
ProfileNode* FindOrAddChild(CodeEntry* entry);
INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
INLINE(void IncreaseSelfTicks(unsigned amount)) { self_ticks_ += amount; }
- INLINE(void IncreaseTotalTicks(unsigned amount)) { total_ticks_ += amount; }
INLINE(CodeEntry* entry() const) { return entry_; }
INLINE(unsigned self_ticks() const) { return self_ticks_; }
- INLINE(unsigned total_ticks() const) { return total_ticks_; }
INLINE(const List<ProfileNode*>* children() const) { return &children_list_; }
- double GetSelfMillis() const;
- double GetTotalMillis() const;
unsigned id() const { return id_; }
void Print(int indent);
@@ -155,7 +158,6 @@ class ProfileNode {
ProfileTree* tree_;
CodeEntry* entry_;
- unsigned total_ticks_;
unsigned self_ticks_;
// Mapping from CodeEntry* to ProfileNode*
HashMap children_;
@@ -173,17 +175,9 @@ class ProfileTree {
ProfileNode* AddPathFromEnd(const Vector<CodeEntry*>& path);
void AddPathFromStart(const Vector<CodeEntry*>& path);
- void CalculateTotalTicks();
-
- double TicksToMillis(unsigned ticks) const {
- return ticks * ms_to_ticks_scale_;
- }
ProfileNode* root() const { return root_; }
- void SetTickRatePerMs(double ticks_per_ms);
-
unsigned next_node_id() { return next_node_id_++; }
- void ShortPrint();
void Print() {
root_->Print(0);
}
@@ -195,7 +189,6 @@ class ProfileTree {
CodeEntry root_entry_;
unsigned next_node_id_;
ProfileNode* root_;
- double ms_to_ticks_scale_;
DISALLOW_COPY_AND_ASSIGN(ProfileTree);
};
@@ -216,20 +209,20 @@ class CpuProfile {
int samples_count() const { return samples_.length(); }
ProfileNode* sample(int index) const { return samples_.at(index); }
- int64_t start_time_us() const { return start_time_us_; }
- int64_t end_time_us() const { return end_time_us_; }
+ Time start_time() const { return start_time_; }
+ Time end_time() const { return end_time_; }
void UpdateTicksScale();
- void ShortPrint();
void Print();
private:
const char* title_;
unsigned uid_;
bool record_samples_;
- int64_t start_time_us_;
- int64_t end_time_us_;
+ Time start_time_;
+ Time end_time_;
+ ElapsedTimer timer_;
List<ProfileNode*> samples_;
ProfileTree top_down_;
@@ -285,7 +278,7 @@ class CodeMap {
class CpuProfilesCollection {
public:
- CpuProfilesCollection();
+ explicit CpuProfilesCollection(Heap* heap);
~CpuProfilesCollection();
bool StartProfiling(const char* title, unsigned uid, bool record_samples);
@@ -326,7 +319,7 @@ class CpuProfilesCollection {
// Accessed by VM thread and profile generator thread.
List<CpuProfile*> current_profiles_;
- Semaphore* current_profiles_semaphore_;
+ Semaphore current_profiles_semaphore_;
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
};
@@ -342,6 +335,7 @@ class ProfileGenerator {
static const char* const kAnonymousFunctionName;
static const char* const kProgramEntryName;
+ static const char* const kIdleEntryName;
static const char* const kGarbageCollectorEntryName;
// Used to represent frames for which we have no reliable way to
// detect function.
@@ -353,6 +347,7 @@ class ProfileGenerator {
CpuProfilesCollection* profiles_;
CodeMap code_map_;
CodeEntry* program_entry_;
+ CodeEntry* idle_entry_;
CodeEntry* gc_entry_;
CodeEntry* unresolved_entry_;
diff --git a/chromium/v8/src/property-details.h b/chromium/v8/src/property-details.h
index 6b62ddb18e5..7f44b79277c 100644
--- a/chromium/v8/src/property-details.h
+++ b/chromium/v8/src/property-details.h
@@ -148,7 +148,7 @@ class Representation {
bool IsHeapObject() const { return kind_ == kHeapObject; }
bool IsExternal() const { return kind_ == kExternal; }
bool IsSpecialization() const {
- return kind_ == kInteger32 || kind_ == kDouble;
+ return kind_ == kInteger32 || kind_ == kDouble || kind_ == kSmi;
}
const char* Mnemonic() const;
diff --git a/chromium/v8/src/property.h b/chromium/v8/src/property.h
index d109de91d10..0f78ba478ec 100644
--- a/chromium/v8/src/property.h
+++ b/chromium/v8/src/property.h
@@ -46,7 +46,8 @@ class Descriptor BASE_EMBEDDED {
public:
MUST_USE_RESULT MaybeObject* KeyToUniqueName() {
if (!key_->IsUniqueName()) {
- MaybeObject* maybe_result = HEAP->InternalizeString(String::cast(key_));
+ MaybeObject* maybe_result =
+ key_->GetIsolate()->heap()->InternalizeString(String::cast(key_));
if (!maybe_result->To(&key_)) return maybe_result;
}
return key_;
@@ -225,14 +226,14 @@ class LookupResult BASE_EMBEDDED {
void HandlerResult(JSProxy* proxy) {
lookup_type_ = HANDLER_TYPE;
holder_ = proxy;
- details_ = PropertyDetails(NONE, HANDLER, Representation::None());
+ details_ = PropertyDetails(NONE, HANDLER, Representation::Tagged());
cacheable_ = false;
}
void InterceptorResult(JSObject* holder) {
lookup_type_ = INTERCEPTOR_TYPE;
holder_ = holder;
- details_ = PropertyDetails(NONE, INTERCEPTOR, Representation::None());
+ details_ = PropertyDetails(NONE, INTERCEPTOR, Representation::Tagged());
}
void NotFound() {
@@ -422,12 +423,10 @@ class LookupResult BASE_EMBEDDED {
PropertyIndex GetFieldIndex() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(IsField());
return PropertyIndex::NewFieldIndex(GetFieldIndexFromMap(holder()->map()));
}
int GetLocalFieldIndexFromMap(Map* map) {
- ASSERT(IsField());
return GetFieldIndexFromMap(map) - map->inobject_properties();
}
diff --git a/chromium/v8/src/regexp-macro-assembler.cc b/chromium/v8/src/regexp-macro-assembler.cc
index fa792768bc3..7d027f880fa 100644
--- a/chromium/v8/src/regexp-macro-assembler.cc
+++ b/chromium/v8/src/regexp-macro-assembler.cc
@@ -163,7 +163,6 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
int* output,
int output_size,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
// Ensure that the minimum stack has been allocated.
RegExpStackScope stack_scope(isolate);
Address stack_base = stack_scope.stack()->stack_base();
@@ -238,7 +237,6 @@ int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
Address byte_offset2,
size_t byte_length,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
isolate->regexp_macro_assembler_canonicalize();
// This function is not allowed to cause a garbage collection.
@@ -271,7 +269,6 @@ int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
Address* stack_base,
Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
RegExpStack* regexp_stack = isolate->regexp_stack();
size_t size = regexp_stack->stack_capacity();
Address old_stack_base = regexp_stack->stack_base();
diff --git a/chromium/v8/src/regexp-stack.cc b/chromium/v8/src/regexp-stack.cc
index fc4114af5de..f3af490f1e4 100644
--- a/chromium/v8/src/regexp-stack.cc
+++ b/chromium/v8/src/regexp-stack.cc
@@ -39,7 +39,6 @@ RegExpStackScope::RegExpStackScope(Isolate* isolate)
RegExpStackScope::~RegExpStackScope() {
- ASSERT(Isolate::Current() == regexp_stack_->isolate_);
// Reset the buffer if it has grown.
regexp_stack_->Reset();
}
diff --git a/chromium/v8/src/rewriter.cc b/chromium/v8/src/rewriter.cc
index df5c353415d..06335a80c7b 100644
--- a/chromium/v8/src/rewriter.cc
+++ b/chromium/v8/src/rewriter.cc
@@ -43,8 +43,8 @@ class Processor: public AstVisitor {
result_assigned_(false),
is_set_(false),
in_try_(false),
- factory_(Isolate::Current(), zone) {
- InitializeAstVisitor();
+ factory_(zone->isolate(), zone) {
+ InitializeAstVisitor(zone->isolate());
}
virtual ~Processor() { }
diff --git a/chromium/v8/src/runtime-profiler.cc b/chromium/v8/src/runtime-profiler.cc
index 0e99650ed73..95dcc4f983b 100644
--- a/chromium/v8/src/runtime-profiler.cc
+++ b/chromium/v8/src/runtime-profiler.cc
@@ -127,7 +127,7 @@ static void GetICCounts(Code* shared_code,
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
ASSERT(function->IsOptimizable());
- if (FLAG_trace_opt && function->PassesHydrogenFilter()) {
+ if (FLAG_trace_opt && function->PassesFilter(FLAG_hydrogen_filter)) {
PrintF("[marking ");
function->ShortPrint();
PrintF(" for recompilation, reason: %s", reason);
@@ -139,10 +139,18 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
PrintF("]\n");
}
- if (FLAG_parallel_recompilation && !isolate_->bootstrapper()->IsActive()) {
- ASSERT(!function->IsMarkedForInstallingRecompiledCode());
+
+ if (FLAG_concurrent_recompilation && !isolate_->bootstrapper()->IsActive()) {
+ if (FLAG_concurrent_osr &&
+ isolate_->optimizing_compiler_thread()->IsQueuedForOSR(function)) {
+ // Do not attempt regular recompilation if we already queued this for OSR.
+ // TODO(yangguo): This is necessary so that we don't install optimized
+ // code on a function that is already optimized, since OSR and regular
+ // recompilation race. This goes away as soon as OSR becomes one-shot.
+ return;
+ }
ASSERT(!function->IsInRecompileQueue());
- function->MarkForParallelRecompilation();
+ function->MarkForConcurrentRecompilation();
} else {
// The next call to the function will trigger optimization.
function->MarkForLazyRecompilation();
@@ -172,23 +180,12 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// any back edge in any unoptimized frame will trigger on-stack
// replacement for that frame.
if (FLAG_trace_osr) {
- PrintF("[patching back edges in ");
+ PrintF("[OSR - patching back edges in ");
function->PrintName();
- PrintF(" for on-stack replacement]\n");
+ PrintF("]\n");
}
- // Get the interrupt stub code object to match against. We aren't
- // prepared to generate it, but we don't expect to have to.
- Code* interrupt_code = NULL;
- InterruptStub interrupt_stub;
- bool found_code = interrupt_stub.FindCodeInCache(&interrupt_code, isolate_);
- if (found_code) {
- Code* replacement_code =
- isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
- Code* unoptimized_code = shared->code();
- Deoptimizer::PatchInterruptCode(
- unoptimized_code, interrupt_code, replacement_code);
- }
+ Deoptimizer::PatchInterruptCode(isolate_, shared->code());
}
@@ -229,11 +226,7 @@ void RuntimeProfiler::OptimizeNow() {
if (isolate_->DebuggerHasBreakPoints()) return;
- if (FLAG_parallel_recompilation) {
- // Take this as opportunity to process the optimizing compiler thread's
- // output queue so that it does not unnecessarily keep objects alive.
- isolate_->optimizing_compiler_thread()->InstallOptimizedFunctions();
- }
+ DisallowHeapAllocation no_gc;
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
@@ -283,7 +276,7 @@ void RuntimeProfiler::OptimizeNow() {
// Fall through and do a normal optimized compile as well.
} else if (!frame->is_optimized() &&
(function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForParallelRecompilation() ||
+ function->IsMarkedForConcurrentRecompilation() ||
function->IsOptimized())) {
// Attempt OSR if we are still running unoptimized code even though the
// the function has long been marked or even already been optimized.
diff --git a/chromium/v8/src/runtime.cc b/chromium/v8/src/runtime.cc
index ef6eeb3a497..3f7c0b98499 100644
--- a/chromium/v8/src/runtime.cc
+++ b/chromium/v8/src/runtime.cc
@@ -66,6 +66,31 @@
#include "v8threads.h"
#include "vm-state-inl.h"
+#ifdef V8_I18N_SUPPORT
+#include "i18n.h"
+#include "unicode/brkiter.h"
+#include "unicode/calendar.h"
+#include "unicode/coll.h"
+#include "unicode/curramt.h"
+#include "unicode/datefmt.h"
+#include "unicode/dcfmtsym.h"
+#include "unicode/decimfmt.h"
+#include "unicode/dtfmtsym.h"
+#include "unicode/dtptngen.h"
+#include "unicode/locid.h"
+#include "unicode/numfmt.h"
+#include "unicode/numsys.h"
+#include "unicode/rbbi.h"
+#include "unicode/smpdtfmt.h"
+#include "unicode/timezone.h"
+#include "unicode/uchar.h"
+#include "unicode/ucol.h"
+#include "unicode/ucurr.h"
+#include "unicode/uloc.h"
+#include "unicode/unum.h"
+#include "unicode/uversion.h"
+#endif
+
#ifndef _STLP_VENDOR_CSTD
// STLPort doesn't import fpclassify and isless into the std namespace.
using std::fpclassify;
@@ -265,9 +290,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
}
Handle<Object> result;
uint32_t element_index = 0;
- JSReceiver::StoreMode mode = value->IsJSObject()
- ? JSReceiver::FORCE_FIELD
- : JSReceiver::ALLOW_AS_CONSTANT;
+ StoreMode mode = value->IsJSObject() ? FORCE_FIELD : ALLOW_AS_CONSTANT;
if (key->IsInternalizedString()) {
if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
// Array index as string (uint32).
@@ -662,10 +685,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
- proxy->Fix();
+ CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, 0);
+ JSProxy::Fix(proxy);
return isolate->heap()->undefined_value();
}
@@ -885,6 +908,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
ASSERT(byte_length % element_size == 0);
size_t length = byte_length / element_size;
+ if (length > static_cast<unsigned>(Smi::kMaxValue)) {
+ return isolate->Throw(*isolate->factory()->
+ NewRangeError("invalid_typed_array_length",
+ HandleVector<Object>(NULL, 0)));
+ }
+
Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
holder->set_length(*length_obj);
holder->set_weak_next(buffer->weak_first_view());
@@ -924,12 +953,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
size_t length = NumberToSize(isolate, *length_obj);
- size_t byte_length = length * element_size;
- if (byte_length < length) { // Overflow
+
+ if ((length > static_cast<unsigned>(Smi::kMaxValue)) ||
+ (length > (kMaxInt / element_size))) {
return isolate->Throw(*isolate->factory()->
- NewRangeError("invalid_array_buffer_length",
+ NewRangeError("invalid_typed_array_length",
HandleVector<Object>(NULL, 0)));
}
+ size_t byte_length = length * element_size;
// We assume that the caller of this function will initialize holder
// with the loop
@@ -971,7 +1002,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) {
JSArrayBuffer::cast(typed_array->buffer())->backing_store());
size_t source_byte_offset =
NumberToSize(isolate, typed_array->byte_offset());
- OS::MemCopy(
+ memcpy(
buffer->backing_store(),
backing_store + source_byte_offset,
byte_length);
@@ -1639,6 +1670,14 @@ static bool CheckAccessException(Object* callback,
(access_type == v8::ACCESS_GET && info->all_can_read()) ||
(access_type == v8::ACCESS_SET && info->all_can_write());
}
+ if (callback->IsAccessorPair()) {
+ AccessorPair* info = AccessorPair::cast(callback);
+ return
+ (access_type == v8::ACCESS_HAS &&
+ (info->all_can_read() || info->all_can_write())) ||
+ (access_type == v8::ACCESS_GET && info->all_can_read()) ||
+ (access_type == v8::ACCESS_SET && info->all_can_write());
+ }
return false;
}
@@ -1920,6 +1959,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
}
+// Transform getter or setter into something DefineAccessor can handle.
+static Handle<Object> InstantiateAccessorComponent(Isolate* isolate,
+ Handle<Object> component) {
+ if (component->IsUndefined()) return isolate->factory()->null_value();
+ Handle<FunctionTemplateInfo> info =
+ Handle<FunctionTemplateInfo>::cast(component);
+ return Utils::OpenHandle(*Utils::ToLocal(info)->GetFunction());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAccessorProperty) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 6);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
+ CONVERT_SMI_ARG_CHECKED(attribute, 4);
+ CONVERT_SMI_ARG_CHECKED(access_control, 5);
+ JSObject::DefineAccessor(object,
+ name,
+ InstantiateAccessorComponent(isolate, getter),
+ InstantiateAccessorComponent(isolate, setter),
+ static_cast<PropertyAttributes>(attribute),
+ static_cast<v8::AccessControl>(access_control));
+ return isolate->heap()->undefined_value();
+}
+
+
static Failure* ThrowRedeclarationError(Isolate* isolate,
const char* type,
Handle<String> name) {
@@ -2216,9 +2284,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
LookupResult lookup(isolate);
global->LocalLookup(*name, &lookup);
if (!lookup.IsFound()) {
- return global->SetLocalPropertyIgnoreAttributes(*name,
- *value,
- attributes);
+ HandleScope handle_scope(isolate);
+ Handle<GlobalObject> global(isolate->context()->global_object());
+ RETURN_IF_EMPTY_HANDLE(
+ isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(global, name, value,
+ attributes));
+ return *value;
}
if (!lookup.IsReadOnly()) {
@@ -2409,7 +2481,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
}
Object* new_object;
{ MaybeObject* maybe_new_object =
- isolate->heap()->AllocateFixedArrayWithHoles(elements_count);
+ isolate->heap()->AllocateFixedArray(elements_count);
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
FixedArray* elements = FixedArray::cast(new_object);
@@ -2435,41 +2507,41 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
DisallowHeapAllocation no_allocation;
ASSERT(args.length() == 5);
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_CHECKED(String, source, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
// If source is the empty string we set it to "(?:)" instead as
// suggested by ECMA-262, 5th, section 15.10.4.1.
- if (source->length() == 0) source = isolate->heap()->query_colon_string();
+ if (source->length() == 0) source = isolate->factory()->query_colon_string();
- Object* global = args[2];
- if (!global->IsTrue()) global = isolate->heap()->false_value();
+ CONVERT_ARG_HANDLE_CHECKED(Object, global, 2);
+ if (!global->IsTrue()) global = isolate->factory()->false_value();
- Object* ignoreCase = args[3];
- if (!ignoreCase->IsTrue()) ignoreCase = isolate->heap()->false_value();
+ CONVERT_ARG_HANDLE_CHECKED(Object, ignoreCase, 3);
+ if (!ignoreCase->IsTrue()) ignoreCase = isolate->factory()->false_value();
- Object* multiline = args[4];
- if (!multiline->IsTrue()) multiline = isolate->heap()->false_value();
+ CONVERT_ARG_HANDLE_CHECKED(Object, multiline, 4);
+ if (!multiline->IsTrue()) multiline = isolate->factory()->false_value();
Map* map = regexp->map();
Object* constructor = map->constructor();
if (constructor->IsJSFunction() &&
JSFunction::cast(constructor)->initial_map() == map) {
// If we still have the original map, set in-object properties directly.
- regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, source);
+ regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, *source);
// Both true and false are immovable immortal objects so no need for write
// barrier.
regexp->InObjectPropertyAtPut(
- JSRegExp::kGlobalFieldIndex, global, SKIP_WRITE_BARRIER);
+ JSRegExp::kGlobalFieldIndex, *global, SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(
- JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER);
+ JSRegExp::kIgnoreCaseFieldIndex, *ignoreCase, SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(
- JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER);
+ JSRegExp::kMultilineFieldIndex, *multiline, SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(
JSRegExp::kLastIndexFieldIndex, Smi::FromInt(0), SKIP_WRITE_BARRIER);
- return regexp;
+ return *regexp;
}
// Map has changed, so use generic, but slower, method.
@@ -2477,34 +2549,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- Heap* heap = isolate->heap();
- MaybeObject* result;
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->source_string(),
- source,
- final);
- // TODO(jkummerow): Turn these back into ASSERTs when we can be certain
- // that it never fires in Release mode in the wild.
- CHECK(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->global_string(),
- global,
- final);
- CHECK(!result->IsFailure());
- result =
- regexp->SetLocalPropertyIgnoreAttributes(heap->ignore_case_string(),
- ignoreCase,
- final);
- CHECK(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->multiline_string(),
- multiline,
- final);
- CHECK(!result->IsFailure());
- result =
- regexp->SetLocalPropertyIgnoreAttributes(heap->last_index_string(),
- Smi::FromInt(0),
- writable);
- CHECK(!result->IsFailure());
- USE(result);
- return regexp;
+ Handle<Object> zero(Smi::FromInt(0), isolate);
+ Factory* factory = isolate->factory();
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->source_string(), source, final));
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->global_string(), global, final));
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->ignore_case_string(), ignoreCase, final));
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->multiline_string(), multiline, final));
+ CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes(
+ regexp, factory->last_index_string(), zero, writable));
+ return *regexp;
}
@@ -2561,8 +2618,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
if (!callable->IsJSFunction()) {
HandleScope scope(isolate);
bool threw = false;
- Handle<Object> delegate =
- Execution::TryGetFunctionDelegate(Handle<JSReceiver>(callable), &threw);
+ Handle<Object> delegate = Execution::TryGetFunctionDelegate(
+ isolate, Handle<JSReceiver>(callable), &threw);
if (threw) return Failure::Exception();
callable = JSFunction::cast(*delegate);
}
@@ -2580,8 +2637,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
if (!callable->IsJSFunction()) {
HandleScope scope(isolate);
bool threw = false;
- Handle<Object> delegate =
- Execution::TryGetFunctionDelegate(Handle<JSReceiver>(callable), &threw);
+ Handle<Object> delegate = Execution::TryGetFunctionDelegate(
+ isolate, Handle<JSReceiver>(callable), &threw);
if (threw) return Failure::Exception();
callable = JSFunction::cast(*delegate);
}
@@ -2756,16 +2813,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
ASSERT(fun->should_have_prototype());
- Object* obj;
- { MaybeObject* maybe_obj =
- Accessors::FunctionSetPrototype(fun, args[1], NULL);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ Accessors::FunctionSetPrototype(fun, value);
return args[0]; // return TOS
}
@@ -2996,6 +3050,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
JavaScriptFrame* frame = stack_iterator.frame();
ASSERT_EQ(frame->function(), generator_object->function());
+ ASSERT(frame->function()->is_compiled());
STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
@@ -4664,6 +4719,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsValidSmi) {
+ HandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+
+ CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]);
+ if (Smi::IsValid(number)) {
+ return isolate->heap()->true_value();
+ } else {
+ return isolate->heap()->false_value();
+ }
+}
+
+
// Returns a single character string where first character equals
// string->Get(index).
static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
@@ -4703,10 +4771,10 @@ MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
}
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- return object->GetPrototype(isolate)->GetElement(index);
+ return object->GetPrototype(isolate)->GetElement(isolate, index);
}
- return object->GetElement(index);
+ return object->GetElement(isolate, index);
}
@@ -4728,7 +4796,7 @@ MaybeObject* Runtime::HasObjectProperty(Isolate* isolate,
} else {
bool has_pending_exception = false;
Handle<Object> converted =
- Execution::ToString(key, &has_pending_exception);
+ Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
name = Handle<Name>::cast(converted);
}
@@ -4770,7 +4838,7 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
} else {
bool has_pending_exception = false;
Handle<Object> converted =
- Execution::ToString(key, &has_pending_exception);
+ Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
name = Handle<Name>::cast(converted);
}
@@ -4993,9 +5061,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
JSObject::NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
// Use IgnoreAttributes version since a readonly property may be
// overridden and SetProperty does not allow this.
- return js_object->SetLocalPropertyIgnoreAttributes(*name,
- *obj_value,
- attr);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
+ js_object, name, obj_value, attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
return Runtime::ForceSetObjectProperty(isolate,
@@ -5068,7 +5137,7 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
if (object->IsJSProxy()) {
bool has_pending_exception = false;
Handle<Object> name = key->IsSymbol()
- ? key : Execution::ToString(key, &has_pending_exception);
+ ? key : Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
return JSProxy::cast(*object)->SetProperty(
Name::cast(*name), *value, attr, strict_mode);
@@ -5097,7 +5166,8 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
if (js_object->HasExternalArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
- Handle<Object> number = Execution::ToNumber(value, &has_exception);
+ Handle<Object> number =
+ Execution::ToNumber(isolate, value, &has_exception);
if (has_exception) return Failure::Exception();
value = number;
}
@@ -5116,7 +5186,8 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
if (js_object->HasExternalArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
bool has_exception;
- Handle<Object> number = Execution::ToNumber(value, &has_exception);
+ Handle<Object> number =
+ Execution::ToNumber(isolate, value, &has_exception);
if (has_exception) return Failure::Exception();
value = number;
}
@@ -5133,7 +5204,8 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
// Call-back into JavaScript to convert the key to a string.
bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+ Handle<Object> converted =
+ Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
Handle<String> name = Handle<String>::cast(converted);
@@ -5178,13 +5250,17 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
} else {
if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
+ js_object, name, value, attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
// Call-back into JavaScript to convert the key to a string.
bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+ Handle<Object> converted =
+ Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
Handle<String> name = Handle<String>::cast(converted);
@@ -5192,7 +5268,10 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
return js_object->SetElement(
index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
} else {
- return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
+ js_object, name, value, attr);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
@@ -5227,7 +5306,8 @@ MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
} else {
// Call-back into JavaScript to convert the key to a string.
bool has_pending_exception = false;
- Handle<Object> converted = Execution::ToString(key, &has_pending_exception);
+ Handle<Object> converted = Execution::ToString(
+ isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
name = Handle<String>::cast(converted);
}
@@ -5394,10 +5474,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
// Set a local property, even if it is READ_ONLY. If the property does not
// exist, it will be added with attributes NONE.
RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_ARG_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
// Compute attributes.
PropertyAttributes attributes = NONE;
if (args.length() == 4) {
@@ -5407,9 +5488,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
(unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
attributes = static_cast<PropertyAttributes>(unchecked_value);
}
-
- return object->
- SetLocalPropertyIgnoreAttributes(name, args[2], attributes);
+ Handle<Object> result = JSObject::SetLocalPropertyIgnoreAttributes(
+ object, name, value, attributes);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -5819,7 +5901,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
HandleScope scope(isolate);
bool exception = false;
Handle<Object> converted =
- Execution::ToString(args.at<Object>(0), &exception);
+ Execution::ToString(isolate, args.at<Object>(0), &exception);
if (exception) return Failure::Exception();
Handle<String> key = Handle<String>::cast(converted);
@@ -5828,7 +5910,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
if (index < n) {
return frame->GetParameter(index);
} else {
- return isolate->initial_object_prototype()->GetElement(index);
+ return isolate->initial_object_prototype()->GetElement(isolate, index);
}
}
@@ -6087,6 +6169,7 @@ template <class Converter>
MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
Isolate* isolate,
String* s,
+ String::Encoding result_encoding,
int length,
int input_string_length,
unibrow::Mapping<Converter, 128>* mapping) {
@@ -6102,7 +6185,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
Object* o;
- { MaybeObject* maybe_o = s->IsOneByteRepresentation()
+ { MaybeObject* maybe_o = result_encoding == String::ONE_BYTE_ENCODING
? isolate->heap()->AllocateRawOneByteString(length)
: isolate->heap()->AllocateRawTwoByteString(length);
if (!maybe_o->ToObject(&o)) return maybe_o;
@@ -6110,6 +6193,8 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
String* result = String::cast(o);
bool has_changed_character = false;
+ DisallowHeapAllocation no_gc;
+
// Convert all characters to upper case, assuming that they will fit
// in the buffer
Access<ConsStringIteratorOp> op(
@@ -6118,6 +6203,10 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
unibrow::uchar chars[Converter::kMaxWidth];
// We can assume that the string is not empty
uc32 current = stream.GetNext();
+ // y with umlauts is the only character that stops fitting into one-byte
+ // when converting to uppercase.
+ static const uc32 yuml_code = 0xff;
+ bool ignore_yuml = result->IsSeqTwoByteString() || Converter::kIsToLower;
for (int i = 0; i < length;) {
bool has_next = stream.HasMore();
uc32 next = has_next ? stream.GetNext() : 0;
@@ -6126,13 +6215,14 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// The case conversion of this character is the character itself.
result->Set(i, current);
i++;
- } else if (char_length == 1) {
+ } else if (char_length == 1 && (ignore_yuml || current != yuml_code)) {
// Common case: converting the letter resulted in one character.
ASSERT(static_cast<uc32>(chars[0]) != current);
result->Set(i, chars[0]);
has_changed_character = true;
i++;
} else if (length == input_string_length) {
+ bool found_yuml = (current == yuml_code);
// We've assumed that the result would be as long as the
// input but here is a character that converts to several
// characters. No matter, we calculate the exact length
@@ -6152,6 +6242,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
int current_length = i + char_length + next_length;
while (stream.HasMore()) {
current = stream.GetNext();
+ found_yuml |= (current == yuml_code);
// NOTE: we use 0 as the next character here because, while
// the next character may affect what a character converts to,
// it does not in any case affect the length of what it convert
@@ -6164,8 +6255,10 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
return Failure::OutOfMemoryException(0x13);
}
}
- // Try again with the real length.
- return Smi::FromInt(current_length);
+ // Try again with the real length. Return signed if we need
+ // to allocate a two-byte string for y-umlaut to uppercase.
+ return (found_yuml && !ignore_yuml) ? Smi::FromInt(-current_length)
+ : Smi::FromInt(current_length);
} else {
for (int j = 0; j < char_length; j++) {
result->Set(i, chars[j]);
@@ -6211,121 +6304,107 @@ static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
}
-enum AsciiCaseConversion {
- ASCII_TO_LOWER,
- ASCII_TO_UPPER
-};
-
-
-template <AsciiCaseConversion dir>
-struct FastAsciiConverter {
- static bool Convert(char* dst, char* src, int length, bool* changed_out) {
+template<class Converter>
+static bool FastAsciiConvert(char* dst,
+ char* src,
+ int length,
+ bool* changed_out) {
#ifdef DEBUG
char* saved_dst = dst;
char* saved_src = src;
#endif
- // We rely on the distance between upper and lower case letters
- // being a known power of 2.
- ASSERT('a' - 'A' == (1 << 5));
- // Boundaries for the range of input characters than require conversion.
- const char lo = (dir == ASCII_TO_LOWER) ? 'A' - 1 : 'a' - 1;
- const char hi = (dir == ASCII_TO_LOWER) ? 'Z' + 1 : 'z' + 1;
- bool changed = false;
- uintptr_t or_acc = 0;
- char* const limit = src + length;
+ DisallowHeapAllocation no_gc;
+ // We rely on the distance between upper and lower case letters
+ // being a known power of 2.
+ ASSERT('a' - 'A' == (1 << 5));
+ // Boundaries for the range of input characters than require conversion.
+ static const char lo = Converter::kIsToLower ? 'A' - 1 : 'a' - 1;
+ static const char hi = Converter::kIsToLower ? 'Z' + 1 : 'z' + 1;
+ bool changed = false;
+ uintptr_t or_acc = 0;
+ char* const limit = src + length;
#ifdef V8_HOST_CAN_READ_UNALIGNED
- // Process the prefix of the input that requires no conversion one
- // (machine) word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
- or_acc |= w;
- if (AsciiRangeMask(w, lo, hi) != 0) {
- changed = true;
- break;
- }
- *reinterpret_cast<uintptr_t*>(dst) = w;
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
- }
- // Process the remainder of the input performing conversion when
- // required one word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
- or_acc |= w;
- uintptr_t m = AsciiRangeMask(w, lo, hi);
- // The mask has high (7th) bit set in every byte that needs
- // conversion and we know that the distance between cases is
- // 1 << 5.
- *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
- }
-#endif
- // Process the last few bytes of the input (or the whole input if
- // unaligned access is not supported).
- while (src < limit) {
- char c = *src;
- or_acc |= c;
- if (lo < c && c < hi) {
- c ^= (1 << 5);
- changed = true;
- }
- *dst = c;
- ++src;
- ++dst;
- }
- if ((or_acc & kAsciiMask) != 0) {
- return false;
+ // Process the prefix of the input that requires no conversion one
+ // (machine) word at a time.
+ while (src <= limit - sizeof(uintptr_t)) {
+ uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
+ or_acc |= w;
+ if (AsciiRangeMask(w, lo, hi) != 0) {
+ changed = true;
+ break;
}
-#ifdef DEBUG
- CheckConvert(saved_dst, saved_src, length, changed);
+ *reinterpret_cast<uintptr_t*>(dst) = w;
+ src += sizeof(uintptr_t);
+ dst += sizeof(uintptr_t);
+ }
+ // Process the remainder of the input performing conversion when
+ // required one word at a time.
+ while (src <= limit - sizeof(uintptr_t)) {
+ uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
+ or_acc |= w;
+ uintptr_t m = AsciiRangeMask(w, lo, hi);
+ // The mask has high (7th) bit set in every byte that needs
+ // conversion and we know that the distance between cases is
+ // 1 << 5.
+ *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
+ src += sizeof(uintptr_t);
+ dst += sizeof(uintptr_t);
+ }
#endif
- *changed_out = changed;
- return true;
+ // Process the last few bytes of the input (or the whole input if
+ // unaligned access is not supported).
+ while (src < limit) {
+ char c = *src;
+ or_acc |= c;
+ if (lo < c && c < hi) {
+ c ^= (1 << 5);
+ changed = true;
+ }
+ *dst = c;
+ ++src;
+ ++dst;
+ }
+ if ((or_acc & kAsciiMask) != 0) {
+ return false;
}
+ ASSERT(CheckFastAsciiConvert(
+ saved_dst, saved_src, length, changed, Converter::kIsToLower));
+
+ *changed_out = changed;
+ return true;
+}
+
#ifdef DEBUG
- static void CheckConvert(char* dst, char* src, int length, bool changed) {
- bool expected_changed = false;
- for (int i = 0; i < length; i++) {
- if (dst[i] == src[i]) continue;
- expected_changed = true;
- if (dir == ASCII_TO_LOWER) {
- ASSERT('A' <= src[i] && src[i] <= 'Z');
- ASSERT(dst[i] == src[i] + ('a' - 'A'));
- } else {
- ASSERT(dir == ASCII_TO_UPPER);
- ASSERT('a' <= src[i] && src[i] <= 'z');
- ASSERT(dst[i] == src[i] - ('a' - 'A'));
- }
+static bool CheckFastAsciiConvert(char* dst,
+ char* src,
+ int length,
+ bool changed,
+ bool is_to_lower) {
+ bool expected_changed = false;
+ for (int i = 0; i < length; i++) {
+ if (dst[i] == src[i]) continue;
+ expected_changed = true;
+ if (is_to_lower) {
+ ASSERT('A' <= src[i] && src[i] <= 'Z');
+ ASSERT(dst[i] == src[i] + ('a' - 'A'));
+ } else {
+ ASSERT('a' <= src[i] && src[i] <= 'z');
+ ASSERT(dst[i] == src[i] - ('a' - 'A'));
}
- ASSERT(expected_changed == changed);
}
+ return (expected_changed == changed);
+}
#endif
-};
-
-
-struct ToLowerTraits {
- typedef unibrow::ToLowercase UnibrowConverter;
-
- typedef FastAsciiConverter<ASCII_TO_LOWER> AsciiConverter;
-};
-
-
-struct ToUpperTraits {
- typedef unibrow::ToUppercase UnibrowConverter;
-
- typedef FastAsciiConverter<ASCII_TO_UPPER> AsciiConverter;
-};
} // namespace
-template <typename ConvertTraits>
+template <class Converter>
MUST_USE_RESULT static MaybeObject* ConvertCase(
Arguments args,
Isolate* isolate,
- unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
+ unibrow::Mapping<Converter, 128>* mapping) {
SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, s, 0);
s = s->TryFlattenGetString();
@@ -6347,7 +6426,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
}
SeqOneByteString* result = SeqOneByteString::cast(o);
bool has_changed_character;
- bool is_ascii = ConvertTraits::AsciiConverter::Convert(
+ bool is_ascii = FastAsciiConvert<Converter>(
reinterpret_cast<char*>(result->GetChars()),
reinterpret_cast<char*>(SeqOneByteString::cast(s)->GetChars()),
length,
@@ -6358,31 +6437,35 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
}
}
+ String::Encoding result_encoding = s->IsOneByteRepresentation()
+ ? String::ONE_BYTE_ENCODING : String::TWO_BYTE_ENCODING;
Object* answer;
- { MaybeObject* maybe_answer =
- ConvertCaseHelper(isolate, s, length, length, mapping);
+ { MaybeObject* maybe_answer = ConvertCaseHelper(
+ isolate, s, result_encoding, length, length, mapping);
if (!maybe_answer->ToObject(&answer)) return maybe_answer;
}
if (answer->IsSmi()) {
- // Retry with correct length.
- { MaybeObject* maybe_answer =
- ConvertCaseHelper(isolate,
- s, Smi::cast(answer)->value(), length, mapping);
- if (!maybe_answer->ToObject(&answer)) return maybe_answer;
+ int new_length = Smi::cast(answer)->value();
+ if (new_length < 0) {
+ result_encoding = String::TWO_BYTE_ENCODING;
+ new_length = -new_length;
}
+ MaybeObject* maybe_answer = ConvertCaseHelper(
+ isolate, s, result_encoding, new_length, length, mapping);
+ if (!maybe_answer->ToObject(&answer)) return maybe_answer;
}
return answer;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToLowerCase) {
- return ConvertCase<ToLowerTraits>(
+ return ConvertCase(
args, isolate, isolate->runtime_state()->to_lower_mapping());
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
- return ConvertCase<ToUpperTraits>(
+ return ConvertCase(
args, isolate, isolate->runtime_state()->to_upper_mapping());
}
@@ -6475,8 +6558,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
int part_count = indices.length();
Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
- MaybeObject* maybe_result = result->EnsureCanContainHeapObjectElements();
- if (maybe_result->IsFailure()) return maybe_result;
+ JSObject::EnsureCanContainHeapObjectElements(result);
result->set_length(Smi::FromInt(part_count));
ASSERT(result->HasFastObjectElements());
@@ -6603,7 +6685,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, value, 0);
- return value->ToObject();
+ return value->ToObject(isolate);
}
@@ -6869,21 +6951,20 @@ static inline void StringBuilderConcatHelper(String* special,
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(JSArray, array, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
if (!args[1]->IsSmi()) {
isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException(0x14);
}
int array_length = args.smi_at(1);
- CONVERT_ARG_CHECKED(String, special, 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
// This assumption is used by the slice encoding in one or two smis.
ASSERT(Smi::kMaxValue >= String::kMaxLength);
- MaybeObject* maybe_result = array->EnsureCanContainHeapObjectElements();
- if (maybe_result->IsFailure()) return maybe_result;
+ JSObject::EnsureCanContainHeapObjectElements(array);
int special_length = special->length();
if (!array->HasFastObjectElements()) {
@@ -6965,7 +7046,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
if (!maybe_object->ToObject(&object)) return maybe_object;
}
SeqOneByteString* answer = SeqOneByteString::cast(object);
- StringBuilderConcatHelper(special,
+ StringBuilderConcatHelper(*special,
answer->GetChars(),
fixed_array,
array_length);
@@ -6976,7 +7057,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
if (!maybe_object->ToObject(&object)) return maybe_object;
}
SeqTwoByteString* answer = SeqTwoByteString::cast(object);
- StringBuilderConcatHelper(special,
+ StringBuilderConcatHelper(*special,
answer->GetChars(),
fixed_array,
array_length);
@@ -7236,15 +7317,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 1);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- return isolate->heap()->NumberFromInt32(~x);
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
@@ -7461,7 +7533,7 @@ static Object* FlatStringCompare(String* x, String* y) {
result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
}
ASSERT(result ==
- StringCharacterStreamCompare(Isolate::Current()->runtime_state(), x, y));
+ StringCharacterStreamCompare(x->GetIsolate()->runtime_state(), x, y));
return result;
}
@@ -7902,6 +7974,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosureFromStubFailure) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
+ Handle<Context> context(isolate->context());
+ PretenureFlag pretenure_flag = NOT_TENURED;
+ Handle<JSFunction> result =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
+ context,
+ pretenure_flag);
+ return *result;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
@@ -8075,7 +8161,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
if (!bound_function->IsJSFunction()) {
bool exception_thrown;
- bound_function = Execution::TryGetConstructorDelegate(bound_function,
+ bound_function = Execution::TryGetConstructorDelegate(isolate,
+ bound_function,
&exception_thrown);
if (exception_thrown) return Failure::Exception();
}
@@ -8236,9 +8323,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
return function->code();
}
function->shared()->code()->set_profiler_ticks(0);
- if (JSFunction::CompileOptimized(function,
- BailoutId::None(),
- CLEAR_EXCEPTION)) {
+ if (JSFunction::CompileOptimized(function, CLEAR_EXCEPTION)) {
return function->code();
}
if (FLAG_trace_opt) {
@@ -8251,7 +8336,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ConcurrentRecompile) {
HandleScope handle_scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -8260,44 +8345,34 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
return isolate->heap()->undefined_value();
}
function->shared()->code()->set_profiler_ticks(0);
- ASSERT(FLAG_parallel_recompilation);
- Compiler::RecompileParallel(function);
+ ASSERT(FLAG_concurrent_recompilation);
+ if (!Compiler::RecompileConcurrent(function)) {
+ function->ReplaceCode(function->shared()->code());
+ }
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InstallRecompiledCode) {
- HandleScope handle_scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- ASSERT(V8::UseCrankshaft() && FLAG_parallel_recompilation);
- isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
- return function->code();
-}
-
-
class ActivationsFinder : public ThreadVisitor {
public:
- explicit ActivationsFinder(JSFunction* function)
- : function_(function), has_activations_(false) {}
+ Code* code_;
+ bool has_code_activations_;
+
+ explicit ActivationsFinder(Code* code)
+ : code_(code),
+ has_code_activations_(false) { }
void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- if (has_activations_) return;
+ JavaScriptFrameIterator it(isolate, top);
+ VisitFrames(&it);
+ }
- for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized() && frame->function() == function_) {
- has_activations_ = true;
- return;
- }
+ void VisitFrames(JavaScriptFrameIterator* it) {
+ for (; !it->done(); it->Advance()) {
+ JavaScriptFrame* frame = it->frame();
+ if (code_->contains(frame->pc())) has_code_activations_ = true;
}
}
-
- bool has_activations() { return has_activations_; }
-
- private:
- JSFunction* function_;
- bool has_activations_;
};
@@ -8320,7 +8395,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
ASSERT(AllowHeapAllocation::IsAllowed());
- ASSERT(deoptimizer->compiled_code_kind() == Code::OPTIMIZED_FUNCTION);
+ Handle<JSFunction> function = deoptimizer->function();
+ Handle<Code> optimized_code = deoptimizer->compiled_code();
+
+ ASSERT(optimized_code->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(type == deoptimizer->bailout_type());
// Make sure to materialize objects before causing any allocation.
JavaScriptFrameIterator it(isolate);
@@ -8329,10 +8408,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
JavaScriptFrame* frame = it.frame();
RUNTIME_ASSERT(frame->function()->IsJSFunction());
- Handle<JSFunction> function(frame->function(), isolate);
- Handle<Code> optimized_code(function->code());
- RUNTIME_ASSERT((type != Deoptimizer::EAGER &&
- type != Deoptimizer::SOFT) || function->IsOptimized());
+ ASSERT(frame->function() == *function);
// Avoid doing too much work when running with --always-opt and keep
// the optimized code around.
@@ -8340,33 +8416,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
return isolate->heap()->undefined_value();
}
- // Find other optimized activations of the function or functions that
- // share the same optimized code.
- bool has_other_activations = false;
- while (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- JSFunction* other_function = frame->function();
- if (frame->is_optimized() && other_function->code() == function->code()) {
- has_other_activations = true;
- break;
- }
- it.Advance();
- }
+ // Search for other activations of the same function and code.
+ ActivationsFinder activations_finder(*optimized_code);
+ activations_finder.VisitFrames(&it);
+ isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
- if (!has_other_activations) {
- ActivationsFinder activations_finder(*function);
- isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
- has_other_activations = activations_finder.has_activations();
- }
-
- if (!has_other_activations) {
- if (FLAG_trace_deopt) {
- PrintF("[removing optimized code for: ");
- function->PrintName();
- PrintF("]\n");
+ if (!activations_finder.has_code_activations_) {
+ if (function->code() == *optimized_code) {
+ if (FLAG_trace_deopt) {
+ PrintF("[removing optimized code for: ");
+ function->PrintName();
+ PrintF("]\n");
+ }
+ function->ReplaceCode(function->shared()->code());
}
- function->ReplaceCode(function->shared()->code());
} else {
+ // TODO(titzer): we should probably do DeoptimizeCodeList(code)
+ // unconditionally if the code is not already marked for deoptimization.
+ // If there is an index by shared function info, all the better.
Deoptimizer::DeoptimizeFunction(*function);
}
// Evict optimized code for this function from the cache so that it doesn't
@@ -8421,9 +8488,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsParallelRecompilationSupported) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConcurrentRecompilationSupported) {
HandleScope scope(isolate);
- return FLAG_parallel_recompilation
+ return FLAG_concurrent_recompilation
? isolate->heap()->true_value() : isolate->heap()->false_value();
}
@@ -8441,12 +8508,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
unoptimized->kind() == Code::FUNCTION) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) {
- for (int i = 0; i <= Code::kMaxLoopNestingMarker; i++) {
+ // Start patching from the currently patched loop nesting level.
+ int current_level = unoptimized->allow_osr_at_loop_nesting_level();
+ ASSERT(Deoptimizer::VerifyInterruptCode(
+ isolate, unoptimized, current_level));
+ for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) {
unoptimized->set_allow_osr_at_loop_nesting_level(i);
isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
}
- } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("parallel"))) {
- function->MarkForParallelRecompilation();
+ } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent"))) {
+ function->MarkForConcurrentRecompilation();
}
}
@@ -8467,7 +8538,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NeverOptimizeFunction) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
- if (!V8::UseCrankshaft()) {
+ if (!isolate->use_crankshaft()) {
return Smi::FromInt(4); // 4 == "never".
}
bool sync_with_compiler_thread = true;
@@ -8478,10 +8549,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
}
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_parallel_recompilation && sync_with_compiler_thread) {
- while (function->IsMarkedForParallelRecompilation() ||
- function->IsInRecompileQueue() ||
- function->IsMarkedForInstallingRecompiledCode()) {
+ if (FLAG_concurrent_recompilation && sync_with_compiler_thread) {
+ while (function->IsInRecompileQueue()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
OS::Sleep(50);
}
@@ -8508,117 +8577,138 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
}
+static bool IsSuitableForOnStackReplacement(Isolate* isolate,
+ Handle<JSFunction> function,
+ Handle<Code> unoptimized) {
+ // Keep track of whether we've succeeded in optimizing.
+ if (!unoptimized->optimizable()) return false;
+ // If we are trying to do OSR when there are already optimized
+ // activations of the function, it means (a) the function is directly or
+ // indirectly recursive and (b) an optimized invocation has been
+ // deoptimized so that we are currently in an unoptimized activation.
+ // Check for optimized activations of this function.
+ for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ if (frame->is_optimized() && frame->function() == *function) return false;
+ }
+
+ return true;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
HandleScope scope(isolate);
- ASSERT(args.length() == 1);
+ ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_NUMBER_CHECKED(uint32_t, pc_offset, Uint32, args[1]);
+ Handle<Code> unoptimized(function->shared()->code(), isolate);
+
+#ifdef DEBUG
+ JavaScriptFrameIterator it(isolate);
+ JavaScriptFrame* frame = it.frame();
+ ASSERT_EQ(frame->function(), *function);
+ ASSERT_EQ(frame->LookupCode(), *unoptimized);
+ ASSERT(unoptimized->contains(frame->pc()));
+
+ ASSERT(pc_offset ==
+ static_cast<uint32_t>(frame->pc() - unoptimized->instruction_start()));
+#endif // DEBUG
// We're not prepared to handle a function with arguments object.
ASSERT(!function->shared()->uses_arguments());
- // We have hit a back edge in an unoptimized frame for a function that was
- // selected for on-stack replacement. Find the unoptimized code object.
- Handle<Code> unoptimized(function->shared()->code(), isolate);
- // Keep track of whether we've succeeded in optimizing.
- bool succeeded = unoptimized->optimizable();
- if (succeeded) {
- // If we are trying to do OSR when there are already optimized
- // activations of the function, it means (a) the function is directly or
- // indirectly recursive and (b) an optimized invocation has been
- // deoptimized so that we are currently in an unoptimized activation.
- // Check for optimized activations of this function.
- JavaScriptFrameIterator it(isolate);
- while (succeeded && !it.done()) {
- JavaScriptFrame* frame = it.frame();
- succeeded = !frame->is_optimized() || frame->function() != *function;
- it.Advance();
+ Handle<Code> result = Handle<Code>::null();
+ BailoutId ast_id = BailoutId::None();
+
+ if (FLAG_concurrent_recompilation && FLAG_concurrent_osr) {
+ if (isolate->optimizing_compiler_thread()->
+ IsQueuedForOSR(function, pc_offset)) {
+ // Still waiting for the optimizing compiler thread to finish. Carry on.
+ if (FLAG_trace_osr) {
+ PrintF("[COSR - polling recompile tasks for ");
+ function->PrintName();
+ PrintF("]\n");
+ }
+ return NULL;
}
- }
- BailoutId ast_id = BailoutId::None();
- if (succeeded) {
- // The top JS function is this one, the PC is somewhere in the
- // unoptimized code.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- ASSERT(frame->function() == *function);
- ASSERT(frame->LookupCode() == *unoptimized);
- ASSERT(unoptimized->contains(frame->pc()));
-
- // Use linear search of the unoptimized code's back edge table to find
- // the AST id matching the PC.
- Address start = unoptimized->instruction_start();
- unsigned target_pc_offset = static_cast<unsigned>(frame->pc() - start);
- Address table_cursor = start + unoptimized->back_edge_table_offset();
- uint32_t table_length = Memory::uint32_at(table_cursor);
- table_cursor += kIntSize;
- uint32_t loop_depth = 0;
- for (unsigned i = 0; i < table_length; ++i) {
- // Table entries are (AST id, pc offset) pairs.
- uint32_t pc_offset = Memory::uint32_at(table_cursor + kIntSize);
- if (pc_offset == target_pc_offset) {
- ast_id = BailoutId(static_cast<int>(Memory::uint32_at(table_cursor)));
- loop_depth = Memory::uint32_at(table_cursor + 2 * kIntSize);
- break;
+ OptimizingCompiler* compiler = isolate->optimizing_compiler_thread()->
+ FindReadyOSRCandidate(function, pc_offset);
+
+ if (compiler == NULL) {
+ if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
+ Compiler::RecompileConcurrent(function, pc_offset)) {
+ if (function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForConcurrentRecompilation()) {
+ // Prevent regular recompilation if we queue this for OSR.
+ // TODO(yangguo): remove this as soon as OSR becomes one-shot.
+ function->ReplaceCode(function->shared()->code());
+ }
+ return NULL;
}
- table_cursor += FullCodeGenerator::kBackEdgeEntrySize;
+ // Fall through to the end in case of failure.
+ } else {
+ // TODO(titzer): don't install the OSR code into the function.
+ ast_id = compiler->info()->osr_ast_id();
+ result = Compiler::InstallOptimizedCode(compiler);
}
+ } else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
+ ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
ASSERT(!ast_id.IsNone());
if (FLAG_trace_osr) {
- PrintF("[replacing on-stack at AST id %d, loop depth %d in ",
- ast_id.ToInt(), loop_depth);
+ PrintF("[OSR - replacing at AST id %d in ", ast_id.ToInt());
function->PrintName();
PrintF("]\n");
}
+ // Attempt OSR compilation.
+ result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION);
+ }
- // Try to compile the optimized code. A true return value from
- // CompileOptimized means that compilation succeeded, not necessarily
- // that optimization succeeded.
- if (JSFunction::CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
- function->IsOptimized()) {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- function->code()->deoptimization_data());
- if (data->OsrPcOffset()->value() >= 0) {
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement offset %d in optimized code]\n",
- data->OsrPcOffset()->value());
- }
- ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
- } else {
- // We may never generate the desired OSR entry if we emit an
- // early deoptimize.
- succeeded = false;
+ // Revert the patched interrupt now, regardless of whether OSR succeeds.
+ Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
+
+ // Check whether we ended up with usable optimized code.
+ if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(result->deoptimization_data());
+
+ if (data->OsrPcOffset()->value() >= 0) {
+ ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - entry at AST id %d, offset %d in optimized code]\n",
+ ast_id.ToInt(), data->OsrPcOffset()->value());
}
- } else {
- succeeded = false;
+ // TODO(titzer): this is a massive hack to make the deopt counts
+ // match. Fix heuristics for reenabling optimizations!
+ function->shared()->increment_deopt_count();
+ return *result;
}
}
- // Revert to the original interrupt calls in the original unoptimized code.
if (FLAG_trace_osr) {
- PrintF("[restoring original interrupt calls in ");
+ PrintF("[OSR - optimization failed for ");
function->PrintName();
PrintF("]\n");
}
- InterruptStub interrupt_stub;
- Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
- Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertInterruptCode(*unoptimized,
- *interrupt_code,
- *replacement_code);
-
- // If the optimization attempt succeeded, return the AST id tagged as a
- // smi. This tells the builtin that we need to translate the unoptimized
- // frame to an optimized one.
- if (succeeded) {
- ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- return Smi::FromInt(ast_id.ToInt());
- } else {
- if (function->IsMarkedForLazyRecompilation()) {
- function->ReplaceCode(function->shared()->code());
- }
- return Smi::FromInt(-1);
+
+ if (function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForConcurrentRecompilation()) {
+ function->ReplaceCode(function->shared()->code());
}
+ return NULL;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAllocationTimeout) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 2);
+#ifdef DEBUG
+ CONVERT_SMI_ARG_CHECKED(interval, 0);
+ CONVERT_SMI_ARG_CHECKED(timeout, 1);
+ isolate->heap()->set_allocation_timeout(timeout);
+ FLAG_gc_interval = interval;
+#endif
+ return isolate->heap()->undefined_value();
}
@@ -8664,8 +8754,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
bool threw;
Handle<JSReceiver> hfun(fun);
Handle<Object> hreceiver(receiver, isolate);
- Handle<Object> result =
- Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
+ Handle<Object> result = Execution::Call(
+ isolate, hfun, hreceiver, argc, argv, &threw, true);
if (threw) return Failure::Exception();
return *result;
@@ -8680,8 +8770,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, arguments, 2);
CONVERT_SMI_ARG_CHECKED(offset, 3);
CONVERT_SMI_ARG_CHECKED(argc, 4);
- ASSERT(offset >= 0);
- ASSERT(argc >= 0);
+ RUNTIME_ASSERT(offset >= 0);
+ RUNTIME_ASSERT(argc >= 0);
// If there are too many arguments, allocate argv via malloc.
const int argv_small_size = 10;
@@ -8695,12 +8785,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
}
for (int i = 0; i < argc; ++i) {
- argv[i] = Object::GetElement(arguments, offset + i);
+ argv[i] = Object::GetElement(isolate, arguments, offset + i);
}
bool threw;
- Handle<Object> result =
- Execution::Call(fun, receiver, argc, argv, &threw, true);
+ Handle<Object> result = Execution::Call(
+ isolate, fun, receiver, argc, argv, &threw, true);
if (threw) return Failure::Exception();
return *result;
@@ -8711,7 +8801,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionDelegate) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(!args[0]->IsJSFunction());
- return *Execution::GetFunctionDelegate(args.at<Object>(0));
+ return *Execution::GetFunctionDelegate(isolate, args.at<Object>(0));
}
@@ -8719,7 +8809,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
RUNTIME_ASSERT(!args[0]->IsJSFunction());
- return *Execution::GetConstructorDelegate(args.at<Object>(0));
+ return *Execution::GetConstructorDelegate(isolate, args.at<Object>(0));
}
@@ -8768,7 +8858,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
extension_object = JSReceiver::cast(args[0]);
} else {
// Convert the object to a proper JavaScript object.
- MaybeObject* maybe_js_object = args[0]->ToObject();
+ MaybeObject* maybe_js_object = args[0]->ToObject(isolate);
if (!maybe_js_object->To(&extension_object)) {
if (Failure::cast(maybe_js_object)->IsInternalError()) {
HandleScope scope(isolate);
@@ -8922,7 +9012,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
IsImmutableVariableMode(mode) ? FROZEN : SEALED;
Handle<AccessorInfo> info =
Accessors::MakeModuleExport(name, index, attr);
- Handle<Object> result = SetAccessor(module, info);
+ Handle<Object> result = JSObject::SetAccessor(module, info);
ASSERT(!(result.is_null() || result->IsUndefined()));
USE(result);
break;
@@ -9288,7 +9378,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
// First check if this is a real stack overflow.
if (isolate->stack_guard()->IsStackOverflow()) {
- SealHandleScope shs(isolate);
return isolate->StackOverflow();
}
@@ -9296,6 +9385,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallRecompiledCode) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+ // First check if this is a real stack overflow.
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ SealHandleScope shs(isolate);
+ return isolate->StackOverflow();
+ }
+
+ isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ return (function->IsOptimized()) ? function->code()
+ : function->shared()->code();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
@@ -9408,9 +9514,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1);
- MaybeObject* maybe_result_array =
- output->EnsureCanContainHeapObjectElements();
- if (maybe_result_array->IsFailure()) return maybe_result_array;
+ JSObject::EnsureCanContainHeapObjectElements(output);
RUNTIME_ASSERT(output->HasFastObjectElements());
DisallowHeapAllocation no_gc;
@@ -9474,7 +9578,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
ASSERT_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- source = Handle<String>(source->TryFlattenGetString());
+ source = Handle<String>(FlattenGetString(source));
// Optimized fast case where we only have ASCII characters.
Handle<Object> result;
if (source->IsSeqOneByteString()) {
@@ -10082,7 +10186,7 @@ static bool IterateElements(Isolate* isolate,
} else if (receiver->HasElement(j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
- element_value = Object::GetElement(receiver, j);
+ element_value = Object::GetElement(isolate, receiver, j);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
visitor->visit(j, element_value);
}
@@ -10107,7 +10211,8 @@ static bool IterateElements(Isolate* isolate,
} else if (receiver->HasElement(j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
- Handle<Object> element_value = Object::GetElement(receiver, j);
+ Handle<Object> element_value =
+ Object::GetElement(isolate, receiver, j);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
visitor->visit(j, element_value);
}
@@ -10126,7 +10231,7 @@ static bool IterateElements(Isolate* isolate,
while (j < n) {
HandleScope loop_scope(isolate);
uint32_t index = indices[j];
- Handle<Object> element = Object::GetElement(receiver, index);
+ Handle<Object> element = Object::GetElement(isolate, receiver, index);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element, false);
visitor->visit(index, element);
// Skip to next different index (i.e., omit duplicates).
@@ -10506,7 +10611,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
- return Execution::DebugBreakHelper();
+ return Execution::DebugBreakHelper(isolate);
}
@@ -11214,6 +11319,7 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
? frame_inspector->GetParameter(i)
: isolate->heap()->undefined_value(),
isolate);
+ ASSERT(!value->IsTheHole());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
@@ -11228,12 +11334,15 @@ static Handle<JSObject> MaterializeStackLocalsWithFrameInspector(
// Second fill all stack locals.
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ Handle<Object> value(frame_inspector->GetExpression(i), isolate);
+ if (value->IsTheHole()) continue;
+
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(isolate,
target,
Handle<String>(scope_info->StackLocalName(i)),
- Handle<Object>(frame_inspector->GetExpression(i), isolate),
+ value,
NONE,
kNonStrictMode),
Handle<JSObject>());
@@ -11260,6 +11369,7 @@ static void UpdateStackLocalsFromMaterializedObject(Isolate* isolate,
// Parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ ASSERT(!frame->GetParameter(i)->IsTheHole());
HandleScope scope(isolate);
Handle<Object> value = GetProperty(
isolate, target, Handle<String>(scope_info->ParameterName(i)));
@@ -11268,6 +11378,7 @@ static void UpdateStackLocalsFromMaterializedObject(Isolate* isolate,
// Stack locals.
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ if (frame->GetExpression(i)->IsTheHole()) continue;
HandleScope scope(isolate);
Handle<Object> value = GetProperty(
isolate, target, Handle<String>(scope_info->StackLocalName(i)));
@@ -12000,10 +12111,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
// Get the frame where the debugging is performed.
StackFrame::Id id = UnwrapFrameId(wrapped_id);
JavaScriptFrameIterator frame_it(isolate, id);
+ RUNTIME_ASSERT(!frame_it.done());
+
JavaScriptFrame* frame = frame_it.frame();
+ Handle<JSFunction> fun =
+ Handle<JSFunction>(frame->function());
Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(frame->function()->shared());
+ Handle<SharedFunctionInfo>(fun->shared());
+
+ if (!isolate->debug()->EnsureDebugInfo(shared, fun)) {
+ return isolate->heap()->undefined_value();
+ }
+
Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared);
int len = 0;
@@ -12012,16 +12132,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetStepInPositions) {
BreakLocationIterator break_location_iterator(debug_info,
ALL_BREAK_LOCATIONS);
- break_location_iterator.FindBreakLocationFromAddress(frame->pc());
+ break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
int current_statement_pos = break_location_iterator.statement_position();
while (!break_location_iterator.Done()) {
- if (break_location_iterator.IsStepInLocation(isolate)) {
- Smi* position_value = Smi::FromInt(break_location_iterator.position());
- JSObject::SetElement(array, len,
- Handle<Object>(position_value, isolate),
- NONE, kNonStrictMode);
- len++;
+ bool accept;
+ if (break_location_iterator.pc() > frame->pc()) {
+ accept = true;
+ } else {
+ StackFrame::Id break_frame_id = isolate->debug()->break_frame_id();
+ // The break point is near our pc. Could be a step-in possibility,
+ // that is currently taken by active debugger call.
+ if (break_frame_id == StackFrame::NO_ID) {
+ // We are not stepping.
+ accept = false;
+ } else {
+ JavaScriptFrameIterator additional_frame_it(isolate, break_frame_id);
+ // If our frame is a top frame and we are stepping, we can do step-in
+ // at this place.
+ accept = additional_frame_it.frame()->id() == id;
+ }
+ }
+ if (accept) {
+ if (break_location_iterator.IsStepInLocation(isolate)) {
+ Smi* position_value = Smi::FromInt(break_location_iterator.position());
+ JSObject::SetElement(array, len,
+ Handle<Object>(position_value, isolate),
+ NONE, kNonStrictMode);
+ len++;
+ }
}
// Advance iterator.
break_location_iterator.Next();
@@ -12446,7 +12585,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsBreakOnException) {
// of frames to step down.
RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
HandleScope scope(isolate);
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
// Check arguments.
Object* check;
{ MaybeObject* maybe_check = Runtime_CheckExecutionState(
@@ -12457,6 +12596,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
+ CONVERT_NUMBER_CHECKED(int, wrapped_frame_id, Int32, args[3]);
+
+ StackFrame::Id frame_id;
+ if (wrapped_frame_id == 0) {
+ frame_id = StackFrame::NO_ID;
+ } else {
+ frame_id = UnwrapFrameId(wrapped_frame_id);
+ }
+
// Get the step action and check validity.
StepAction step_action = static_cast<StepAction>(NumberToInt32(args[1]));
if (step_action != StepIn &&
@@ -12467,6 +12615,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
+ if (frame_id != StackFrame::NO_ID && step_action != StepNext &&
+ step_action != StepMin && step_action != StepOut) {
+ return isolate->ThrowIllegalOperation();
+ }
+
// Get the number of steps.
int step_count = NumberToInt32(args[2]);
if (step_count < 1) {
@@ -12478,7 +12631,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
// Prepare step.
isolate->debug()->PrepareStep(static_cast<StepAction>(step_action),
- step_count);
+ step_count,
+ frame_id);
return isolate->heap()->undefined_value();
}
@@ -12544,7 +12698,7 @@ static MaybeObject* DebugEvaluate(Isolate* isolate,
shared, context, NOT_TENURED);
bool pending_exception;
Handle<Object> result = Execution::Call(
- eval_fun, receiver, 0, NULL, &pending_exception);
+ isolate, eval_fun, receiver, 0, NULL, &pending_exception);
if (pending_exception) return Failure::Exception();
@@ -12583,7 +12737,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<Object> context_extension(args[5], isolate);
// Handle the processing of break.
- DisableBreak disable_break_save(disable_break);
+ DisableBreak disable_break_save(isolate, disable_break);
// Get the frame where the debugging is performed.
StackFrame::Id id = UnwrapFrameId(wrapped_id);
@@ -12650,7 +12804,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
Handle<Object> context_extension(args[3], isolate);
// Handle the processing of break.
- DisableBreak disable_break_save(disable_break);
+ DisableBreak disable_break_save(isolate, disable_break);
// Enter the top context from before the debugger was invoked.
SaveContext save(isolate);
@@ -12895,7 +13049,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
}
FixedArray* instances = FixedArray::cast(object);
- ASSERT(HEAP->IsHeapIterable());
+ ASSERT(isolate->heap()->IsHeapIterable());
// Fill the referencing objects.
HeapIterator heap_iterator2(heap);
count = DebugConstructedBy(&heap_iterator2,
@@ -12946,7 +13100,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
- CPU::DebugBreak();
+ OS::DebugBreak();
return isolate->heap()->undefined_value();
}
@@ -13315,11 +13469,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
bool pending_exception;
{
if (without_debugger) {
- result = Execution::Call(function, isolate->global_object(), 0, NULL,
+ result = Execution::Call(isolate,
+ function,
+ isolate->global_object(),
+ 0,
+ NULL,
&pending_exception);
} else {
- EnterDebugger enter_debugger;
- result = Execution::Call(function, isolate->global_object(), 0, NULL,
+ EnterDebugger enter_debugger(isolate);
+ result = Execution::Call(isolate,
+ function,
+ isolate->global_object(),
+ 0,
+ NULL,
&pending_exception);
}
}
@@ -13364,6 +13526,641 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
#endif // ENABLE_DEBUGGER_SUPPORT
+#ifdef V8_I18N_SUPPORT
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CanonicalizeLanguageTag) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, locale_id_str, 0);
+
+ v8::String::Utf8Value locale_id(v8::Utils::ToLocal(locale_id_str));
+
+ // Return value which denotes invalid language tag.
+ const char* const kInvalidTag = "invalid-tag";
+
+ UErrorCode error = U_ZERO_ERROR;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ int icu_length = 0;
+
+ uloc_forLanguageTag(*locale_id, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &error);
+ if (U_FAILURE(error) || icu_length == 0) {
+ return isolate->heap()->AllocateStringFromOneByte(CStrVector(kInvalidTag));
+ }
+
+ char result[ULOC_FULLNAME_CAPACITY];
+
+ // Force strict BCP47 rules.
+ uloc_toLanguageTag(icu_result, result, ULOC_FULLNAME_CAPACITY, TRUE, &error);
+
+ if (U_FAILURE(error)) {
+ return isolate->heap()->AllocateStringFromOneByte(CStrVector(kInvalidTag));
+ }
+
+ return isolate->heap()->AllocateStringFromOneByte(CStrVector(result));
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AvailableLocalesOf) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
+
+ const icu::Locale* available_locales = NULL;
+ int32_t count = 0;
+
+ if (service->IsUtf8EqualTo(CStrVector("collator"))) {
+ available_locales = icu::Collator::getAvailableLocales(count);
+ } else if (service->IsUtf8EqualTo(CStrVector("numberformat"))) {
+ available_locales = icu::NumberFormat::getAvailableLocales(count);
+ } else if (service->IsUtf8EqualTo(CStrVector("dateformat"))) {
+ available_locales = icu::DateFormat::getAvailableLocales(count);
+ } else if (service->IsUtf8EqualTo(CStrVector("breakiterator"))) {
+ available_locales = icu::BreakIterator::getAvailableLocales(count);
+ }
+
+ UErrorCode error = U_ZERO_ERROR;
+ char result[ULOC_FULLNAME_CAPACITY];
+ Handle<JSObject> locales =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ for (int32_t i = 0; i < count; ++i) {
+ const char* icu_name = available_locales[i].getName();
+
+ error = U_ZERO_ERROR;
+ // No need to force strict BCP47 rules.
+ uloc_toLanguageTag(icu_name, result, ULOC_FULLNAME_CAPACITY, FALSE, &error);
+ if (U_FAILURE(error)) {
+ // This shouldn't happen, but lets not break the user.
+ continue;
+ }
+
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ locales,
+ isolate->factory()->NewStringFromAscii(CStrVector(result)),
+ isolate->factory()->NewNumber(i),
+ NONE));
+ }
+
+ return *locales;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultICULocale) {
+ SealHandleScope shs(isolate);
+
+ ASSERT(args.length() == 0);
+
+ icu::Locale default_locale;
+
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ UErrorCode status = U_ZERO_ERROR;
+ uloc_toLanguageTag(
+ default_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ if (U_SUCCESS(status)) {
+ return isolate->heap()->AllocateStringFromOneByte(CStrVector(result));
+ }
+
+ return isolate->heap()->AllocateStringFromOneByte(CStrVector("und"));
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLanguageTagVariants) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, input, 0);
+
+ uint32_t length = static_cast<uint32_t>(input->length()->Number());
+ Handle<FixedArray> output = isolate->factory()->NewFixedArray(length);
+ Handle<Name> maximized =
+ isolate->factory()->NewStringFromAscii(CStrVector("maximized"));
+ Handle<Name> base =
+ isolate->factory()->NewStringFromAscii(CStrVector("base"));
+ for (unsigned int i = 0; i < length; ++i) {
+ MaybeObject* maybe_string = input->GetElement(isolate, i);
+ Object* locale_id;
+ if (!maybe_string->ToObject(&locale_id) || !locale_id->IsString()) {
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
+ }
+
+ v8::String::Utf8Value utf8_locale_id(
+ v8::Utils::ToLocal(Handle<String>(String::cast(locale_id))));
+
+ UErrorCode error = U_ZERO_ERROR;
+
+ // Convert from BCP47 to ICU format.
+ // de-DE-u-co-phonebk -> de_DE@collation=phonebook
+ char icu_locale[ULOC_FULLNAME_CAPACITY];
+ int icu_locale_length = 0;
+ uloc_forLanguageTag(*utf8_locale_id, icu_locale, ULOC_FULLNAME_CAPACITY,
+ &icu_locale_length, &error);
+ if (U_FAILURE(error) || icu_locale_length == 0) {
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
+ }
+
+ // Maximize the locale.
+ // de_DE@collation=phonebook -> de_Latn_DE@collation=phonebook
+ char icu_max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_addLikelySubtags(
+ icu_locale, icu_max_locale, ULOC_FULLNAME_CAPACITY, &error);
+
+ // Remove extensions from maximized locale.
+ // de_Latn_DE@collation=phonebook -> de_Latn_DE
+ char icu_base_max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_getBaseName(
+ icu_max_locale, icu_base_max_locale, ULOC_FULLNAME_CAPACITY, &error);
+
+ // Get original name without extensions.
+ // de_DE@collation=phonebook -> de_DE
+ char icu_base_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_getBaseName(
+ icu_locale, icu_base_locale, ULOC_FULLNAME_CAPACITY, &error);
+
+ // Convert from ICU locale format to BCP47 format.
+ // de_Latn_DE -> de-Latn-DE
+ char base_max_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_toLanguageTag(icu_base_max_locale, base_max_locale,
+ ULOC_FULLNAME_CAPACITY, FALSE, &error);
+
+ // de_DE -> de-DE
+ char base_locale[ULOC_FULLNAME_CAPACITY];
+ uloc_toLanguageTag(
+ icu_base_locale, base_locale, ULOC_FULLNAME_CAPACITY, FALSE, &error);
+
+ if (U_FAILURE(error)) {
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
+ }
+
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ result,
+ maximized,
+ isolate->factory()->NewStringFromAscii(CStrVector(base_max_locale)),
+ NONE));
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ result,
+ base,
+ isolate->factory()->NewStringFromAscii(CStrVector(base_locale)),
+ NONE));
+ output->set(i, *result);
+ }
+
+ Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(output);
+ result->set_length(Smi::FromInt(length));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateDateTimeFormat) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> date_format_template =
+ I18N::GetTemplate(isolate);
+
+ // Create an empty object wrapper.
+ bool has_pending_exception = false;
+ Handle<JSObject> local_object = Execution::InstantiateObject(
+ date_format_template, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
+ }
+
+ // Set date time formatter as internal field of the resulting JS object.
+ icu::SimpleDateFormat* date_format = DateFormat::InitializeDateTimeFormat(
+ isolate, locale, options, resolved);
+
+ if (!date_format) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(date_format));
+
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ local_object,
+ isolate->factory()->NewStringFromAscii(CStrVector("dateFormat")),
+ isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ NONE));
+
+ // Make object handle weak so we can delete the data format once GC kicks in.
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
+ NULL,
+ DateFormat::DeleteDateFormat);
+ return *local_object;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateFormat) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
+
+ bool has_pending_exception = false;
+ Handle<Object> value =
+ Execution::ToNumber(isolate, date, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
+ }
+
+ icu::SimpleDateFormat* date_format =
+ DateFormat::UnpackDateFormat(isolate, date_format_holder);
+ if (!date_format) return isolate->ThrowIllegalOperation();
+
+ icu::UnicodeString result;
+ date_format->format(value->Number(), result);
+
+ return *isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length()));
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalDateParse) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, date_string, 1);
+
+ v8::String::Utf8Value utf8_date(v8::Utils::ToLocal(date_string));
+ icu::UnicodeString u_date(icu::UnicodeString::fromUTF8(*utf8_date));
+ icu::SimpleDateFormat* date_format =
+ DateFormat::UnpackDateFormat(isolate, date_format_holder);
+ if (!date_format) return isolate->ThrowIllegalOperation();
+
+ UErrorCode status = U_ZERO_ERROR;
+ UDate date = date_format->parse(u_date, status);
+ if (U_FAILURE(status)) return isolate->heap()->undefined_value();
+
+ bool has_pending_exception = false;
+ Handle<JSDate> result = Handle<JSDate>::cast(
+ Execution::NewDate(
+ isolate, static_cast<double>(date), &has_pending_exception));
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
+ }
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateNumberFormat) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> number_format_template =
+ I18N::GetTemplate(isolate);
+
+ // Create an empty object wrapper.
+ bool has_pending_exception = false;
+ Handle<JSObject> local_object = Execution::InstantiateObject(
+ number_format_template, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
+ }
+
+ // Set number formatter as internal field of the resulting JS object.
+ icu::DecimalFormat* number_format = NumberFormat::InitializeNumberFormat(
+ isolate, locale, options, resolved);
+
+ if (!number_format) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(number_format));
+
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ local_object,
+ isolate->factory()->NewStringFromAscii(CStrVector("numberFormat")),
+ isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ NONE));
+
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
+ NULL,
+ NumberFormat::DeleteNumberFormat);
+ return *local_object;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberFormat) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
+
+ bool has_pending_exception = false;
+ Handle<Object> value = Execution::ToNumber(
+ isolate, number, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
+ }
+
+ icu::DecimalFormat* number_format =
+ NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
+ if (!number_format) return isolate->ThrowIllegalOperation();
+
+ icu::UnicodeString result;
+ number_format->format(value->Number(), result);
+
+ return *isolate->factory()->NewStringFromTwoByte(
+ Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length()));
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalNumberParse) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, number_string, 1);
+
+ v8::String::Utf8Value utf8_number(v8::Utils::ToLocal(number_string));
+ icu::UnicodeString u_number(icu::UnicodeString::fromUTF8(*utf8_number));
+ icu::DecimalFormat* number_format =
+ NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
+ if (!number_format) return isolate->ThrowIllegalOperation();
+
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Formattable result;
+ // ICU 4.6 doesn't support parseCurrency call. We need to wait for ICU49
+ // to be part of Chrome.
+ // TODO(cira): Include currency parsing code using parseCurrency call.
+ // We need to check if the formatter parses all currencies or only the
+ // one it was constructed with (it will impact the API - how to return ISO
+ // code and the value).
+ number_format->parse(u_number, result, status);
+ if (U_FAILURE(status)) return isolate->heap()->undefined_value();
+
+ switch (result.getType()) {
+ case icu::Formattable::kDouble:
+ return *isolate->factory()->NewNumber(result.getDouble());
+ case icu::Formattable::kLong:
+ return *isolate->factory()->NewNumberFromInt(result.getLong());
+ case icu::Formattable::kInt64:
+ return *isolate->factory()->NewNumber(
+ static_cast<double>(result.getInt64()));
+ default:
+ return isolate->heap()->undefined_value();
+ }
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCollator) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> collator_template = I18N::GetTemplate(isolate);
+
+ // Create an empty object wrapper.
+ bool has_pending_exception = false;
+ Handle<JSObject> local_object = Execution::InstantiateObject(
+ collator_template, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
+ }
+
+ // Set collator as internal field of the resulting JS object.
+ icu::Collator* collator = Collator::InitializeCollator(
+ isolate, locale, options, resolved);
+
+ if (!collator) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(collator));
+
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ local_object,
+ isolate->factory()->NewStringFromAscii(CStrVector("collator")),
+ isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ NONE));
+
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
+ NULL,
+ Collator::DeleteCollator);
+ return *local_object;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalCompare) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, collator_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, string1, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, string2, 2);
+
+ icu::Collator* collator = Collator::UnpackCollator(isolate, collator_holder);
+ if (!collator) return isolate->ThrowIllegalOperation();
+
+ v8::String::Value string_value1(v8::Utils::ToLocal(string1));
+ v8::String::Value string_value2(v8::Utils::ToLocal(string2));
+ const UChar* u_string1 = reinterpret_cast<const UChar*>(*string_value1);
+ const UChar* u_string2 = reinterpret_cast<const UChar*>(*string_value2);
+ UErrorCode status = U_ZERO_ERROR;
+ UCollationResult result = collator->compare(u_string1,
+ string_value1.length(),
+ u_string2,
+ string_value2.length(),
+ status);
+ if (U_FAILURE(status)) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(result);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateBreakIterator) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
+
+ Handle<ObjectTemplateInfo> break_iterator_template =
+ I18N::GetTemplate2(isolate);
+
+ // Create an empty object wrapper.
+ bool has_pending_exception = false;
+ Handle<JSObject> local_object = Execution::InstantiateObject(
+ break_iterator_template, &has_pending_exception);
+ if (has_pending_exception) {
+ ASSERT(isolate->has_pending_exception());
+ return Failure::Exception();
+ }
+
+ // Set break iterator as internal field of the resulting JS object.
+ icu::BreakIterator* break_iterator = BreakIterator::InitializeBreakIterator(
+ isolate, locale, options, resolved);
+
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ local_object->SetInternalField(0, reinterpret_cast<Smi*>(break_iterator));
+ // Make sure that the pointer to adopted text is NULL.
+ local_object->SetInternalField(1, reinterpret_cast<Smi*>(NULL));
+
+ RETURN_IF_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ local_object,
+ isolate->factory()->NewStringFromAscii(CStrVector("breakIterator")),
+ isolate->factory()->NewStringFromAscii(CStrVector("valid")),
+ NONE));
+
+ // Make object handle weak so we can delete the break iterator once GC kicks
+ // in.
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(wrapper.location()),
+ NULL,
+ BreakIterator::DeleteBreakIterator);
+ return *local_object;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorAdoptText) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, text, 1);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ icu::UnicodeString* u_text = reinterpret_cast<icu::UnicodeString*>(
+ break_iterator_holder->GetInternalField(1));
+ delete u_text;
+
+ v8::String::Value text_value(v8::Utils::ToLocal(text));
+ u_text = new icu::UnicodeString(
+ reinterpret_cast<const UChar*>(*text_value), text_value.length());
+ break_iterator_holder->SetInternalField(1, reinterpret_cast<Smi*>(u_text));
+
+ break_iterator->setText(*u_text);
+
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorFirst) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->first());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorNext) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->next());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorCurrent) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ return *isolate->factory()->NewNumberFromInt(break_iterator->current());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BreakIteratorBreakType) {
+ HandleScope scope(isolate);
+
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
+
+ icu::BreakIterator* break_iterator =
+ BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ if (!break_iterator) return isolate->ThrowIllegalOperation();
+
+ // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
+ icu::RuleBasedBreakIterator* rule_based_iterator =
+ static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
+ int32_t status = rule_based_iterator->getRuleStatus();
+ // Keep return values in sync with JavaScript BreakType enum.
+ if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("none"));
+ } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("number"));
+ } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("letter"));
+ } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("kana"));
+ } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("ideo"));
+ } else {
+ return *isolate->factory()->NewStringFromAscii(CStrVector("unknown"));
+ }
+}
+#endif // V8_I18N_SUPPORT
+
+
// Finds the script object from the script data. NOTE: This operation uses
// heap traversal to find the function generated for the source position
// for the requested break point. For lazily compiled functions several heap
@@ -13439,13 +14236,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetAndClearOverflowedStackTrace) {
HandleScope scope(isolate);
ASSERT_EQ(args.length(), 1);
- CONVERT_ARG_CHECKED(JSObject, error_object, 0);
- String* key = isolate->heap()->hidden_stack_trace_string();
- Object* result = error_object->GetHiddenProperty(key);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
+ Handle<String> key = isolate->factory()->hidden_stack_trace_string();
+ Handle<Object> result(error_object->GetHiddenProperty(*key), isolate);
if (result->IsTheHole()) return isolate->heap()->undefined_value();
RUNTIME_ASSERT(result->IsJSArray() || result->IsUndefined());
- error_object->DeleteHiddenProperty(key);
- return result;
+ JSObject::DeleteHiddenProperty(error_object, key);
+ return *result;
}
@@ -13482,6 +14279,26 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FlattenString) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyContextDisposed) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ isolate->heap()->NotifyContextDisposed();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MigrateInstance) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ if (!object->IsJSObject()) return Smi::FromInt(0);
+ Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+ if (!js_object->map()->is_deprecated()) return Smi::FromInt(0);
+ JSObject::MigrateInstance(js_object);
+ return *object;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
SealHandleScope shs(isolate);
// This is only called from codegen, so checks might be more lax.
@@ -13531,7 +14348,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
// This handle is nor shared, nor used later, so it's safe.
Handle<Object> argv[] = { key_handle };
bool pending_exception;
- value = Execution::Call(factory,
+ value = Execution::Call(isolate,
+ factory,
receiver,
ARRAY_SIZE(argv),
argv,
@@ -13699,6 +14517,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessCheckNeeded) {
+ SealHandleScope shs(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(HeapObject, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsAccessCheckNeeded());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
@@ -13725,6 +14551,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
ASSERT(proto->IsJSGlobalObject());
obj = JSReceiver::cast(proto);
}
+ if (obj->IsJSProxy())
+ return isolate->heap()->undefined_value();
+
ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() &&
JSObject::cast(obj)->HasFastElements()));
ASSERT(obj->IsJSObject());
@@ -13773,6 +14602,34 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_UnwrapGlobalProxy) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsAccessAllowedForObserver) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
+ ASSERT(object->IsAccessCheckNeeded());
+ Handle<Object> key = args.at<Object>(2);
+ SaveContext save(isolate);
+ isolate->set_context(observer->context());
+ if (!isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(),
+ v8::ACCESS_KEYS)) {
+ return isolate->heap()->false_value();
+ }
+ bool access_allowed = false;
+ uint32_t index = 0;
+ if (key->ToArrayIndex(&index) ||
+ (key->IsString() && String::cast(*key)->AsArrayIndex(&index))) {
+ access_allowed =
+ isolate->MayIndexedAccess(*object, index, v8::ACCESS_GET) &&
+ isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS);
+ } else {
+ access_allowed = isolate->MayNamedAccess(*object, *key, v8::ACCESS_GET) &&
+ isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS);
+ }
+ return isolate->heap()->ToBoolean(access_allowed);
+}
+
+
static MaybeObject* ArrayConstructorCommon(Isolate* isolate,
Handle<JSFunction> constructor,
Handle<Object> type_info,
@@ -13876,6 +14733,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InternalArrayConstructor) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MaxSmi) {
+ return Smi::FromInt(Smi::kMaxValue);
+}
+
+
// ----------------------------------------------------------------------------
// Implementation of Runtime
@@ -13897,7 +14759,6 @@ static const Runtime::Function kIntrinsicFunctions[] = {
MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
Object* dictionary) {
- ASSERT(Isolate::Current()->heap() == heap);
ASSERT(dictionary != NULL);
ASSERT(NameDictionary::cast(dictionary)->NumberOfElements() == 0);
for (int i = 0; i < kNumFunctions; ++i) {
diff --git a/chromium/v8/src/runtime.h b/chromium/v8/src/runtime.h
index 398cb3bcc24..1ad9b3d6663 100644
--- a/chromium/v8/src/runtime.h
+++ b/chromium/v8/src/runtime.h
@@ -87,20 +87,21 @@ namespace internal {
F(NewStrictArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \
- F(ParallelRecompile, 1, 1) \
- F(InstallRecompiledCode, 1, 1) \
+ F(ConcurrentRecompile, 1, 1) \
+ F(TryInstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyStubFailure, 0, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
- F(IsParallelRecompilationSupported, 0, 1) \
+ F(IsConcurrentRecompilationSupported, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
F(NeverOptimizeFunction, 1, 1) \
F(GetOptimizationStatus, -1, 1) \
F(GetOptimizationCount, 1, 1) \
- F(CompileForOnStackReplacement, 1, 1) \
+ F(CompileForOnStackReplacement, 2, 1) \
+ F(SetAllocationTimeout, 2, 1) \
F(AllocateInNewSpace, 1, 1) \
F(AllocateInOldPointerSpace, 1, 1) \
F(AllocateInOldDataSpace, 1, 1) \
@@ -109,6 +110,9 @@ namespace internal {
F(DebugCallbackSupportsStepping, 1, 1) \
F(DebugPrepareStepInIfStepping, 1, 1) \
F(FlattenString, 1, 1) \
+ F(MigrateInstance, 1, 1) \
+ F(NotifyContextDisposed, 0, 1) \
+ F(MaxSmi, 0, 1) \
\
/* Array join support */ \
F(PushIfAbsent, 2, 1) \
@@ -157,7 +161,6 @@ namespace internal {
F(NumberOr, 2, 1) \
F(NumberAnd, 2, 1) \
F(NumberXor, 2, 1) \
- F(NumberNot, 1, 1) \
\
F(NumberShl, 2, 1) \
F(NumberShr, 2, 1) \
@@ -219,7 +222,8 @@ namespace internal {
F(NumberToRadixString, 2, 1) \
F(NumberToFixed, 2, 1) \
F(NumberToExponential, 2, 1) \
- F(NumberToPrecision, 2, 1)
+ F(NumberToPrecision, 2, 1) \
+ F(IsValidSmi, 1, 1)
#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
@@ -256,6 +260,7 @@ namespace internal {
F(GetTemplateField, 2, 1) \
F(DisableAccessChecks, 1, 1) \
F(EnableAccessChecks, 1, 1) \
+ F(SetAccessorProperty, 6, 1) \
\
/* Dates */ \
F(DateCurrentTime, 0, 1) \
@@ -354,6 +359,7 @@ namespace internal {
F(GetObservationState, 0, 1) \
F(ObservationWeakMapCreate, 0, 1) \
F(UnwrapGlobalProxy, 1, 1) \
+ F(IsAccessAllowedForObserver, 3, 1) \
\
/* Harmony typed arrays */ \
F(ArrayBufferInitialize, 2, 1)\
@@ -392,6 +398,7 @@ namespace internal {
\
/* Statements */ \
F(NewClosure, 3, 1) \
+ F(NewClosureFromStubFailure, 1, 1) \
F(NewObject, 1, 1) \
F(NewObjectFromBound, 1, 1) \
F(FinalizeInstanceSize, 1, 1) \
@@ -464,7 +471,8 @@ namespace internal {
F(HasExternalDoubleElements, 1, 1) \
F(HasFastProperties, 1, 1) \
F(TransitionElementsKind, 2, 1) \
- F(HaveSameMap, 2, 1)
+ F(HaveSameMap, 2, 1) \
+ F(IsAccessCheckNeeded, 1, 1)
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -499,7 +507,7 @@ namespace internal {
F(ClearBreakPoint, 1, 1) \
F(ChangeBreakOnException, 2, 1) \
F(IsBreakOnException, 1, 1) \
- F(PrepareStep, 3, 1) \
+ F(PrepareStep, 4, 1) \
F(ClearStepping, 0, 1) \
F(DebugEvaluate, 6, 1) \
F(DebugEvaluateGlobal, 4, 1) \
@@ -534,6 +542,43 @@ namespace internal {
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
#endif
+
+#ifdef V8_I18N_SUPPORT
+#define RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F) \
+ /* i18n support */ \
+ /* Standalone, helper methods. */ \
+ F(CanonicalizeLanguageTag, 1, 1) \
+ F(AvailableLocalesOf, 1, 1) \
+ F(GetDefaultICULocale, 0, 1) \
+ F(GetLanguageTagVariants, 1, 1) \
+ \
+ /* Date format and parse. */ \
+ F(CreateDateTimeFormat, 3, 1) \
+ F(InternalDateFormat, 2, 1) \
+ F(InternalDateParse, 2, 1) \
+ \
+ /* Number format and parse. */ \
+ F(CreateNumberFormat, 3, 1) \
+ F(InternalNumberFormat, 2, 1) \
+ F(InternalNumberParse, 2, 1) \
+ \
+ /* Collator. */ \
+ F(CreateCollator, 3, 1) \
+ F(InternalCompare, 3, 1) \
+ \
+ /* Break iterator. */ \
+ F(CreateBreakIterator, 3, 1) \
+ F(BreakIteratorAdoptText, 2, 1) \
+ F(BreakIteratorFirst, 1, 1) \
+ F(BreakIteratorNext, 1, 1) \
+ F(BreakIteratorCurrent, 1, 1) \
+ F(BreakIteratorBreakType, 1, 1) \
+
+#else
+#define RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
+#endif
+
+
#ifdef DEBUG
#define RUNTIME_FUNCTION_LIST_DEBUG(F) \
/* Testing */ \
@@ -551,7 +596,8 @@ namespace internal {
RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
RUNTIME_FUNCTION_LIST_DEBUG(F) \
- RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
+ RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
+ RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
// ----------------------------------------------------------------------------
// INLINE_FUNCTION_LIST defines all inlined functions accessed
diff --git a/chromium/v8/src/runtime.js b/chromium/v8/src/runtime.js
index 90fb36b4223..5339570ef6e 100644
--- a/chromium/v8/src/runtime.js
+++ b/chromium/v8/src/runtime.js
@@ -294,20 +294,6 @@ function BIT_XOR(y) {
}
-// ECMA-262, section 11.4.7, page 47.
-function UNARY_MINUS() {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- return %NumberUnaryMinus(x);
-}
-
-
-// ECMA-262, section 11.4.8, page 48.
-function BIT_NOT() {
- var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
- return %NumberNot(x);
-}
-
-
// ECMA-262, section 11.7.1, page 51.
function SHL(y) {
var x = IS_NUMBER(this) ? this : %NonNumberToNumber(this);
diff --git a/chromium/v8/src/sampler.cc b/chromium/v8/src/sampler.cc
index d72ed1acdb8..0aaa1e9b77e 100644
--- a/chromium/v8/src/sampler.cc
+++ b/chromium/v8/src/sampler.cc
@@ -27,9 +27,7 @@
#include "sampler.h"
-#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) \
- || defined(__NetBSD__) || defined(__sun) || defined(__ANDROID__) \
- || defined(__native_client__)
+#if V8_OS_POSIX && !V8_OS_CYGWIN
#define USE_SIGNALS
@@ -38,23 +36,25 @@
#include <signal.h>
#include <sys/time.h>
#include <sys/syscall.h>
-#if !defined(__ANDROID__) || defined(__BIONIC_HAVE_UCONTEXT_T)
+
+#if V8_OS_MACOSX
+#include <mach/mach.h>
+// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
+// and is a typedef for struct sigcontext. There is no uc_mcontext.
+#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) \
+ && !V8_OS_OPENBSD
#include <ucontext.h>
#endif
#include <unistd.h>
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
-#elif defined(__MACH__)
-
-#include <mach/mach.h>
-
-#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+#elif V8_OS_WIN || V8_OS_CYGWIN
#include "win32-headers.h"
@@ -62,7 +62,7 @@
#include "v8.h"
-#include "cpu-profiler.h"
+#include "cpu-profiler-inl.h"
#include "flags.h"
#include "frames-inl.h"
#include "log.h"
@@ -72,7 +72,7 @@
#include "vm-state-inl.h"
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
// Not all versions of Android's C library provide ucontext_t.
// Detect this and provide custom but compatible definitions. Note that these
@@ -144,7 +144,7 @@ typedef struct ucontext {
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
#endif
-#endif // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
+#endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
namespace v8 {
@@ -177,27 +177,7 @@ class Sampler::PlatformData : public PlatformDataCommon {
pthread_t vm_tid_;
};
-#elif defined(__MACH__)
-
-class Sampler::PlatformData : public PlatformDataCommon {
- public:
- PlatformData() : profiled_thread_(mach_thread_self()) {}
-
- ~PlatformData() {
- // Deallocate Mach port for thread.
- mach_port_deallocate(mach_task_self(), profiled_thread_);
- }
-
- thread_act_t profiled_thread() { return profiled_thread_; }
-
- private:
- // Note: for profiled_thread_ Mach primitives are used instead of PThread's
- // because the latter doesn't provide thread manipulation primitives required.
- // For details, consult "Mac OS X Internals" book, Section 7.3.
- thread_act_t profiled_thread_;
-};
-
-#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+#elif V8_OS_WIN || V8_OS_CYGWIN
// ----------------------------------------------------------------------------
// Win32 profiler support. On Cygwin we use the same sampler implementation as
@@ -268,8 +248,25 @@ class SimulatorHelper {
class SignalHandler : public AllStatic {
public:
- static inline void EnsureInstalled() {
- if (signal_handler_installed_) return;
+ static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
+ static void TearDown() { delete mutex_; }
+
+ static void IncreaseSamplerCount() {
+ LockGuard<Mutex> lock_guard(mutex_);
+ if (++client_count_ == 1) Install();
+ }
+
+ static void DecreaseSamplerCount() {
+ LockGuard<Mutex> lock_guard(mutex_);
+ if (--client_count_ == 0) Restore();
+ }
+
+ static bool Installed() {
+ return signal_handler_installed_;
+ }
+
+ private:
+ static void Install() {
struct sigaction sa;
sa.sa_sigaction = &HandleProfilerSignal;
sigemptyset(&sa.sa_mask);
@@ -278,30 +275,31 @@ class SignalHandler : public AllStatic {
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
- static inline void Restore() {
+ static void Restore() {
if (signal_handler_installed_) {
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
}
}
- static inline bool Installed() {
- return signal_handler_installed_;
- }
-
- private:
static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static int client_count_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
};
+
+Mutex* SignalHandler::mutex_ = NULL;
+int SignalHandler::client_count_ = 0;
struct sigaction SignalHandler::old_signal_handler_;
bool SignalHandler::signal_handler_installed_ = false;
void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
void* context) {
-#if defined(__native_client__)
+#if V8_OS_NACL
// As Native Client does not support signal handling, profiling
// is disabled.
return;
@@ -319,7 +317,7 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
}
Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
+ if (sampler == NULL) return;
RegisterState state;
@@ -330,8 +328,10 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+#if !V8_OS_OPENBSD
mcontext_t& mcontext = ucontext->uc_mcontext;
-#if defined(__linux__) || defined(__ANDROID__)
+#endif
+#if V8_OS_LINUX
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
@@ -359,7 +359,29 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif // V8_HOST_ARCH_*
-#elif defined(__FreeBSD__)
+#elif V8_OS_MACOSX
+#if V8_HOST_ARCH_X64
+#if __DARWIN_UNIX03
+ state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip);
+ state.sp = reinterpret_cast<Address>(mcontext->__ss.__rsp);
+ state.fp = reinterpret_cast<Address>(mcontext->__ss.__rbp);
+#else // !__DARWIN_UNIX03
+ state.pc = reinterpret_cast<Address>(mcontext->ss.rip);
+ state.sp = reinterpret_cast<Address>(mcontext->ss.rsp);
+ state.fp = reinterpret_cast<Address>(mcontext->ss.rbp);
+#endif // __DARWIN_UNIX03
+#elif V8_HOST_ARCH_IA32
+#if __DARWIN_UNIX03
+ state.pc = reinterpret_cast<Address>(mcontext->__ss.__eip);
+ state.sp = reinterpret_cast<Address>(mcontext->__ss.__esp);
+ state.fp = reinterpret_cast<Address>(mcontext->__ss.__ebp);
+#else // !__DARWIN_UNIX03
+ state.pc = reinterpret_cast<Address>(mcontext->ss.eip);
+ state.sp = reinterpret_cast<Address>(mcontext->ss.esp);
+ state.fp = reinterpret_cast<Address>(mcontext->ss.ebp);
+#endif // __DARWIN_UNIX03
+#endif // V8_HOST_ARCH_IA32
+#elif V8_OS_FREEBSD
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.mc_eip);
state.sp = reinterpret_cast<Address>(mcontext.mc_esp);
@@ -373,7 +395,7 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.sp = reinterpret_cast<Address>(mcontext.mc_r13);
state.fp = reinterpret_cast<Address>(mcontext.mc_r11);
#endif // V8_HOST_ARCH_*
-#elif defined(__NetBSD__)
+#elif V8_OS_NETBSD
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
@@ -383,8 +405,7 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
#endif // V8_HOST_ARCH_*
-#elif defined(__OpenBSD__)
- USE(mcontext);
+#elif V8_OS_OPENBSD
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(ucontext->sc_eip);
state.sp = reinterpret_cast<Address>(ucontext->sc_esp);
@@ -394,14 +415,14 @@ void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
state.sp = reinterpret_cast<Address>(ucontext->sc_rsp);
state.fp = reinterpret_cast<Address>(ucontext->sc_rbp);
#endif // V8_HOST_ARCH_*
-#elif defined(__sun)
+#elif V8_OS_SOLARIS
state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
-#endif // __sun
+#endif // V8_OS_SOLARIS
#endif // USE_SIMULATOR
sampler->SampleStack(state);
-#endif // __native_client__
+#endif // V8_OS_NACL
}
#endif
@@ -415,12 +436,12 @@ class SamplerThread : public Thread {
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
+ static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
+ static void TearDown() { delete mutex_; mutex_ = NULL; }
static void AddActiveSampler(Sampler* sampler) {
bool need_to_start = false;
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(mutex_);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
// when CPU profiling will be enabled.
@@ -433,16 +454,13 @@ class SamplerThread : public Thread {
ASSERT(instance_->interval_ == sampler->interval());
instance_->active_samplers_.Add(sampler);
-#if defined(USE_SIGNALS)
- SignalHandler::EnsureInstalled();
-#endif
if (need_to_start) instance_->StartSynchronously();
}
static void RemoveActiveSampler(Sampler* sampler) {
SamplerThread* instance_to_remove = NULL;
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(mutex_);
ASSERT(sampler->IsActive());
bool removed = instance_->active_samplers_.RemoveElement(sampler);
@@ -454,9 +472,6 @@ class SamplerThread : public Thread {
if (instance_->active_samplers_.is_empty()) {
instance_to_remove = instance_;
instance_ = NULL;
-#if defined(USE_SIGNALS)
- SignalHandler::Restore();
-#endif
}
}
@@ -469,7 +484,7 @@ class SamplerThread : public Thread {
virtual void Run() {
while (true) {
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(mutex_);
if (active_samplers_.is_empty()) break;
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
@@ -477,7 +492,7 @@ class SamplerThread : public Thread {
Sampler* sampler = active_samplers_.at(i);
if (!sampler->isolate()->IsInitialized()) continue;
if (!sampler->IsProfiling()) continue;
- SampleContext(sampler);
+ sampler->DoSample();
}
}
OS::Sleep(interval_);
@@ -485,109 +500,6 @@ class SamplerThread : public Thread {
}
private:
-#if defined(USE_SIGNALS)
-
- void SampleContext(Sampler* sampler) {
- if (!SignalHandler::Installed()) return;
- pthread_t tid = sampler->platform_data()->vm_tid();
- pthread_kill(tid, SIGPROF);
- }
-
-#elif defined(__MACH__)
-
- void SampleContext(Sampler* sampler) {
- thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
-
-#if defined(USE_SIMULATOR)
- SimulatorHelper helper;
- Isolate* isolate = sampler->isolate();
- if (!helper.Init(sampler, isolate)) return;
-#endif
-
- if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
-
-#if V8_HOST_ARCH_X64
- thread_state_flavor_t flavor = x86_THREAD_STATE64;
- x86_thread_state64_t thread_state;
- mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
-#if __DARWIN_UNIX03
-#define REGISTER_FIELD(name) __r ## name
-#else
-#define REGISTER_FIELD(name) r ## name
-#endif // __DARWIN_UNIX03
-#elif V8_HOST_ARCH_IA32
- thread_state_flavor_t flavor = i386_THREAD_STATE;
- i386_thread_state_t thread_state;
- mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
-#if __DARWIN_UNIX03
-#define REGISTER_FIELD(name) __e ## name
-#else
-#define REGISTER_FIELD(name) e ## name
-#endif // __DARWIN_UNIX03
-#else
-#error Unsupported Mac OS X host architecture.
-#endif // V8_HOST_ARCH
-
- if (thread_get_state(profiled_thread,
- flavor,
- reinterpret_cast<natural_t*>(&thread_state),
- &count) == KERN_SUCCESS) {
- RegisterState state;
-#if defined(USE_SIMULATOR)
- helper.FillRegisters(&state);
-#else
- state.pc = reinterpret_cast<Address>(thread_state.REGISTER_FIELD(ip));
- state.sp = reinterpret_cast<Address>(thread_state.REGISTER_FIELD(sp));
- state.fp = reinterpret_cast<Address>(thread_state.REGISTER_FIELD(bp));
-#endif // USE_SIMULATOR
-#undef REGISTER_FIELD
- sampler->SampleStack(state);
- }
- thread_resume(profiled_thread);
- }
-
-#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
-
- void SampleContext(Sampler* sampler) {
- HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
- if (profiled_thread == NULL) return;
-
- Isolate* isolate = sampler->isolate();
-#if defined(USE_SIMULATOR)
- SimulatorHelper helper;
- if (!helper.Init(sampler, isolate)) return;
-#endif
-
- const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread) == kSuspendFailed) return;
-
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread, &context) != 0) {
- RegisterState state;
-#if defined(USE_SIMULATOR)
- helper.FillRegisters(&state);
-#else
-#if V8_HOST_ARCH_X64
- state.pc = reinterpret_cast<Address>(context.Rip);
- state.sp = reinterpret_cast<Address>(context.Rsp);
- state.fp = reinterpret_cast<Address>(context.Rbp);
-#else
- state.pc = reinterpret_cast<Address>(context.Eip);
- state.sp = reinterpret_cast<Address>(context.Esp);
- state.fp = reinterpret_cast<Address>(context.Ebp);
-#endif
-#endif // USE_SIMULATOR
- sampler->SampleStack(state);
- }
- ResumeThread(profiled_thread);
- }
-
-#endif // USE_SIGNALS
-
-
// Protects the process wide state below.
static Mutex* mutex_;
static SamplerThread* instance_;
@@ -615,8 +527,7 @@ DISABLE_ASAN void TickSample::Init(Isolate* isolate,
// Avoid collecting traces while doing GC.
if (state == GC) return;
- const Address js_entry_sp =
- Isolate::js_entry_sp(isolate->thread_local_top());
+ Address js_entry_sp = isolate->js_entry_sp();
if (js_entry_sp == 0) {
// Not executing JS now.
return;
@@ -649,12 +560,18 @@ DISABLE_ASAN void TickSample::Init(Isolate* isolate,
void Sampler::SetUp() {
+#if defined(USE_SIGNALS)
+ SignalHandler::SetUp();
+#endif
SamplerThread::SetUp();
}
void Sampler::TearDown() {
SamplerThread::TearDown();
+#if defined(USE_SIGNALS)
+ SignalHandler::TearDown();
+#endif
}
@@ -662,6 +579,7 @@ Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
profiling_(false),
+ has_processing_thread_(false),
active_(false),
is_counting_samples_(false),
js_and_external_sample_count_(0) {
@@ -689,8 +607,24 @@ void Sampler::Stop() {
}
+void Sampler::IncreaseProfilingDepth() {
+ NoBarrier_AtomicIncrement(&profiling_, 1);
+#if defined(USE_SIGNALS)
+ SignalHandler::IncreaseSamplerCount();
+#endif
+}
+
+
+void Sampler::DecreaseProfilingDepth() {
+#if defined(USE_SIGNALS)
+ SignalHandler::DecreaseSamplerCount();
+#endif
+ NoBarrier_AtomicIncrement(&profiling_, -1);
+}
+
+
void Sampler::SampleStack(const RegisterState& state) {
- TickSample* sample = isolate_->cpu_profiler()->TickSampleEvent();
+ TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
TickSample sample_obj;
if (sample == NULL) sample = &sample_obj;
sample->Init(isolate_, state);
@@ -700,6 +634,58 @@ void Sampler::SampleStack(const RegisterState& state) {
}
}
Tick(sample);
+ if (sample != &sample_obj) {
+ isolate_->cpu_profiler()->FinishTickSample();
+ }
+}
+
+
+#if defined(USE_SIGNALS)
+
+void Sampler::DoSample() {
+ if (!SignalHandler::Installed()) return;
+ pthread_kill(platform_data()->vm_tid(), SIGPROF);
}
+#elif V8_OS_WIN || V8_OS_CYGWIN
+
+void Sampler::DoSample() {
+ HANDLE profiled_thread = platform_data()->profiled_thread();
+ if (profiled_thread == NULL) return;
+
+#if defined(USE_SIMULATOR)
+ SimulatorHelper helper;
+ if (!helper.Init(this, isolate())) return;
+#endif
+
+ const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+ if (SuspendThread(profiled_thread) == kSuspendFailed) return;
+
+ // Context used for sampling the register state of the profiled thread.
+ CONTEXT context;
+ memset(&context, 0, sizeof(context));
+ context.ContextFlags = CONTEXT_FULL;
+ if (GetThreadContext(profiled_thread, &context) != 0) {
+ RegisterState state;
+#if defined(USE_SIMULATOR)
+ helper.FillRegisters(&state);
+#else
+#if V8_HOST_ARCH_X64
+ state.pc = reinterpret_cast<Address>(context.Rip);
+ state.sp = reinterpret_cast<Address>(context.Rsp);
+ state.fp = reinterpret_cast<Address>(context.Rbp);
+#else
+ state.pc = reinterpret_cast<Address>(context.Eip);
+ state.sp = reinterpret_cast<Address>(context.Esp);
+ state.fp = reinterpret_cast<Address>(context.Ebp);
+#endif
+#endif // USE_SIMULATOR
+ SampleStack(state);
+ }
+ ResumeThread(profiled_thread);
+}
+
+#endif // USE_SIGNALS
+
+
} } // namespace v8::internal
diff --git a/chromium/v8/src/sampler.h b/chromium/v8/src/sampler.h
index 80ccc087cad..b17a2ed8d50 100644
--- a/chromium/v8/src/sampler.h
+++ b/chromium/v8/src/sampler.h
@@ -94,14 +94,24 @@ class Sampler {
void Start();
void Stop();
- // Is the sampler used for profiling?
- bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
- void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
- void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
+ // Whether the sampling thread should use this Sampler for CPU profiling?
+ bool IsProfiling() const {
+ return NoBarrier_Load(&profiling_) > 0 &&
+ !NoBarrier_Load(&has_processing_thread_);
+ }
+ void IncreaseProfilingDepth();
+ void DecreaseProfilingDepth();
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
+ void DoSample();
+ // If true next sample must be initiated on the profiler event processor
+ // thread right after latest sample is processed.
+ void SetHasProcessingThread(bool value) {
+ NoBarrier_Store(&has_processing_thread_, value);
+ }
+
// Used in tests to make sure that stack sampling is performed.
unsigned js_and_external_sample_count() const {
return js_and_external_sample_count_;
@@ -125,6 +135,7 @@ class Sampler {
Isolate* isolate_;
const int interval_;
Atomic32 profiling_;
+ Atomic32 has_processing_thread_;
Atomic32 active_;
PlatformData* data_; // Platform specific data.
bool is_counting_samples_;
diff --git a/chromium/v8/src/scopeinfo.cc b/chromium/v8/src/scopeinfo.cc
index c9df1fb5801..ba138f2adda 100644
--- a/chromium/v8/src/scopeinfo.cc
+++ b/chromium/v8/src/scopeinfo.cc
@@ -74,7 +74,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
+ parameter_count + stack_local_count + 2 * context_local_count
+ (has_function_name ? 2 : 0);
- Factory* factory = Isolate::Current()->factory();
+ Factory* factory = zone->isolate()->factory();
Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
// Encode the flags.
@@ -445,7 +445,8 @@ void ContextSlotCache::Update(Object* data,
int slot_index) {
String* internalized_name;
ASSERT(slot_index > kNotFound);
- if (HEAP->InternalizeStringIfExists(name, &internalized_name)) {
+ if (name->GetIsolate()->heap()->InternalizeStringIfExists(
+ name, &internalized_name)) {
int index = Hash(data, internalized_name);
Key& key = keys_[index];
key.data = data;
@@ -472,7 +473,8 @@ void ContextSlotCache::ValidateEntry(Object* data,
InitializationFlag init_flag,
int slot_index) {
String* internalized_name;
- if (HEAP->InternalizeStringIfExists(name, &internalized_name)) {
+ if (name->GetIsolate()->heap()->InternalizeStringIfExists(
+ name, &internalized_name)) {
int index = Hash(data, name);
Key& key = keys_[index];
ASSERT(key.data == data);
diff --git a/chromium/v8/src/scopes.cc b/chromium/v8/src/scopes.cc
index e631332d5c3..ce1741a623a 100644
--- a/chromium/v8/src/scopes.cc
+++ b/chromium/v8/src/scopes.cc
@@ -129,7 +129,7 @@ Scope::Scope(Scope* inner_scope,
ScopeType scope_type,
Handle<ScopeInfo> scope_info,
Zone* zone)
- : isolate_(Isolate::Current()),
+ : isolate_(zone->isolate()),
inner_scopes_(4, zone),
variables_(zone),
internals_(4, zone),
@@ -152,7 +152,7 @@ Scope::Scope(Scope* inner_scope,
Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone)
- : isolate_(Isolate::Current()),
+ : isolate_(zone->isolate()),
inner_scopes_(1, zone),
variables_(zone),
internals_(0, zone),
@@ -907,26 +907,32 @@ void Scope::Print(int n) {
PrintF("%d heap slots\n", num_heap_slots_); }
// Print locals.
- Indent(n1, "// function var\n");
if (function_ != NULL) {
+ Indent(n1, "// function var:\n");
PrintVar(n1, function_->proxy()->var());
}
- Indent(n1, "// temporary vars\n");
- for (int i = 0; i < temps_.length(); i++) {
- PrintVar(n1, temps_[i]);
+ if (temps_.length() > 0) {
+ Indent(n1, "// temporary vars:\n");
+ for (int i = 0; i < temps_.length(); i++) {
+ PrintVar(n1, temps_[i]);
+ }
}
- Indent(n1, "// internal vars\n");
- for (int i = 0; i < internals_.length(); i++) {
- PrintVar(n1, internals_[i]);
+ if (internals_.length() > 0) {
+ Indent(n1, "// internal vars:\n");
+ for (int i = 0; i < internals_.length(); i++) {
+ PrintVar(n1, internals_[i]);
+ }
}
- Indent(n1, "// local vars\n");
- PrintMap(n1, &variables_);
+ if (variables_.Start() != NULL) {
+ Indent(n1, "// local vars:\n");
+ PrintMap(n1, &variables_);
+ }
- Indent(n1, "// dynamic vars\n");
if (dynamics_ != NULL) {
+ Indent(n1, "// dynamic vars:\n");
PrintMap(n1, dynamics_->GetMap(DYNAMIC));
PrintMap(n1, dynamics_->GetMap(DYNAMIC_LOCAL));
PrintMap(n1, dynamics_->GetMap(DYNAMIC_GLOBAL));
@@ -1086,7 +1092,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
// Assignment to const. Throw a syntax error.
MessageLocation location(
info->script(), proxy->position(), proxy->position());
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = info->isolate();
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(0);
Handle<Object> result =
@@ -1117,7 +1123,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
// TODO(rossberg): generate more helpful error message.
MessageLocation location(
info->script(), proxy->position(), proxy->position());
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = info->isolate();
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(1);
USE(JSObject::SetElement(array, 0, var->name(), NONE, kStrictMode));
diff --git a/chromium/v8/src/serialize.cc b/chromium/v8/src/serialize.cc
index 6c5a620a418..d05dd261227 100644
--- a/chromium/v8/src/serialize.cc
+++ b/chromium/v8/src/serialize.cc
@@ -596,9 +596,9 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
}
-ExternalReferenceEncoder::ExternalReferenceEncoder()
+ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate)
: encodings_(Match),
- isolate_(Isolate::Current()) {
+ isolate_(isolate) {
ExternalReferenceTable* external_references =
ExternalReferenceTable::instance(isolate_);
for (int i = 0; i < external_references->size(); ++i) {
@@ -638,9 +638,9 @@ void ExternalReferenceEncoder::Put(Address key, int index) {
}
-ExternalReferenceDecoder::ExternalReferenceDecoder()
+ExternalReferenceDecoder::ExternalReferenceDecoder(Isolate* isolate)
: encodings_(NewArray<Address*>(kTypeCodeCount)),
- isolate_(Isolate::Current()) {
+ isolate_(isolate) {
ExternalReferenceTable* external_references =
ExternalReferenceTable::instance(isolate_);
for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
@@ -780,13 +780,12 @@ class CodeAddressMap: public CodeEventLogger {
CodeAddressMap* Serializer::code_address_map_ = NULL;
-void Serializer::Enable() {
+void Serializer::Enable(Isolate* isolate) {
if (!serialization_enabled_) {
ASSERT(!too_late_to_enable_now_);
}
if (serialization_enabled_) return;
serialization_enabled_ = true;
- i::Isolate* isolate = Isolate::Current();
isolate->InitializeLoggingAndCounters();
code_address_map_ = new CodeAddressMap(isolate);
}
@@ -810,8 +809,8 @@ Deserializer::Deserializer(SnapshotByteSource* source)
}
-void Deserializer::Deserialize() {
- isolate_ = Isolate::Current();
+void Deserializer::Deserialize(Isolate* isolate) {
+ isolate_ = isolate;
ASSERT(isolate_ != NULL);
isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
// No active threads.
@@ -819,7 +818,7 @@ void Deserializer::Deserialize() {
// No active handles.
ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
ASSERT_EQ(NULL, external_reference_decoder_);
- external_reference_decoder_ = new ExternalReferenceDecoder();
+ external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
isolate_->heap()->RepairFreeListsAfterBoot();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
@@ -850,14 +849,14 @@ void Deserializer::Deserialize() {
}
-void Deserializer::DeserializePartial(Object** root) {
- isolate_ = Isolate::Current();
+void Deserializer::DeserializePartial(Isolate* isolate, Object** root) {
+ isolate_ = isolate;
for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
ASSERT(reservations_[i] != kUninitializedReservation);
}
isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
if (external_reference_decoder_ == NULL) {
- external_reference_decoder_ = new ExternalReferenceDecoder();
+ external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
}
// Keep track of the code space start and end pointers in case new
@@ -1277,12 +1276,12 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
}
-Serializer::Serializer(SnapshotByteSink* sink)
- : sink_(sink),
+Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
+ : isolate_(isolate),
+ sink_(sink),
current_root_index_(0),
- external_reference_encoder_(new ExternalReferenceEncoder),
+ external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
root_index_wave_front_(0) {
- isolate_ = Isolate::Current();
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
ASSERT(isolate_->IsDefaultIsolate());
@@ -1298,16 +1297,17 @@ Serializer::~Serializer() {
void StartupSerializer::SerializeStrongReferences() {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = this->isolate();
// No active threads.
- CHECK_EQ(NULL, Isolate::Current()->thread_manager()->FirstThreadStateInUse());
+ CHECK_EQ(NULL, isolate->thread_manager()->FirstThreadStateInUse());
// No active or weak handles.
CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
+ CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
// We don't support serializing installed extensions.
CHECK(!isolate->has_installed_extensions());
- HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
}
@@ -1318,7 +1318,7 @@ void PartialSerializer::Serialize(Object** object) {
void Serializer::VisitPointers(Object** start, Object** end) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = this->isolate();;
for (Object** current = start; current < end; current++) {
if (start == isolate->heap()->roots_array_start()) {
@@ -1349,9 +1349,9 @@ void Serializer::VisitPointers(Object** start, Object** end) {
// that correspond to the elements of this cache array. On deserialization we
// therefore need to visit the cache array. This fills it up with pointers to
// deserialized objects.
-void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
+void SerializerDeserializer::Iterate(Isolate* isolate,
+ ObjectVisitor* visitor) {
if (Serializer::enabled()) return;
- Isolate* isolate = Isolate::Current();
for (int i = 0; ; i++) {
if (isolate->serialize_partial_snapshot_cache_length() <= i) {
// Extend the array ready to get a value from the visitor when
@@ -1370,7 +1370,7 @@ void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = this->isolate();
for (int i = 0;
i < isolate->serialize_partial_snapshot_cache_length();
@@ -1393,7 +1393,7 @@ int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
- Heap* heap = HEAP;
+ Heap* heap = isolate()->heap();
if (heap->InNewSpace(heap_object)) return kInvalidRootIndex;
for (int i = 0; i < root_index_wave_front_; i++) {
Object* root = heap->roots_array_start()[i];
@@ -1483,10 +1483,9 @@ void StartupSerializer::SerializeWeakReferences() {
// will contain some references needed to decode the partial snapshot. We
// add one entry with 'undefined' which is the sentinel that the deserializer
// uses to know it is done deserializing the array.
- Isolate* isolate = Isolate::Current();
- Object* undefined = isolate->heap()->undefined_value();
+ Object* undefined = isolate()->heap()->undefined_value();
VisitPointer(&undefined);
- HEAP->IterateWeakRoots(this, VISIT_ALL);
+ isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
Pad();
}
@@ -1499,7 +1498,7 @@ void Serializer::PutRoot(int root_index,
if (how_to_code == kPlain &&
where_to_point == kStartOfObject &&
root_index < kRootArrayNumberOfConstantEncodings &&
- !HEAP->InNewSpace(object)) {
+ !isolate()->heap()->InNewSpace(object)) {
if (skip == 0) {
sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index,
"RootConstant");
@@ -1632,7 +1631,7 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
root_index != kInvalidRootIndex &&
root_index < kRootArrayNumberOfConstantEncodings &&
current_contents == current[-1]) {
- ASSERT(!HEAP->InNewSpace(current_contents));
+ ASSERT(!serializer_->isolate()->heap()->InNewSpace(current_contents));
int repeat_count = 1;
while (current < end - 1 && current[repeat_count] == current_contents) {
repeat_count++;
@@ -1749,7 +1748,8 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString(
Address references_start = reinterpret_cast<Address>(resource_pointer);
OutputRawData(references_start);
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Object* source = HEAP->natives_source_cache()->get(i);
+ Object* source =
+ serializer_->isolate()->heap()->natives_source_cache()->get(i);
if (!source->IsUndefined()) {
ExternalAsciiString* string = ExternalAsciiString::cast(source);
typedef v8::String::ExternalAsciiStringResource Resource;
@@ -1818,7 +1818,7 @@ int Serializer::ObjectSerializer::OutputRawData(
int Serializer::SpaceOfObject(HeapObject* object) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
AllocationSpace s = static_cast<AllocationSpace>(i);
- if (HEAP->InSpace(object, s)) {
+ if (object->GetHeap()->InSpace(object, s)) {
ASSERT(i < kNumberOfSpaces);
return i;
}
diff --git a/chromium/v8/src/serialize.h b/chromium/v8/src/serialize.h
index 563f0a06d02..020a744fc0e 100644
--- a/chromium/v8/src/serialize.h
+++ b/chromium/v8/src/serialize.h
@@ -110,7 +110,7 @@ class ExternalReferenceTable {
class ExternalReferenceEncoder {
public:
- ExternalReferenceEncoder();
+ explicit ExternalReferenceEncoder(Isolate* isolate);
uint32_t Encode(Address key) const;
@@ -134,7 +134,7 @@ class ExternalReferenceEncoder {
class ExternalReferenceDecoder {
public:
- ExternalReferenceDecoder();
+ explicit ExternalReferenceDecoder(Isolate* isolate);
~ExternalReferenceDecoder();
Address Decode(uint32_t key) const {
@@ -208,7 +208,7 @@ class SnapshotByteSource {
// both.
class SerializerDeserializer: public ObjectVisitor {
public:
- static void Iterate(ObjectVisitor* visitor);
+ static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
static int nop() { return kNop; }
@@ -325,10 +325,10 @@ class Deserializer: public SerializerDeserializer {
virtual ~Deserializer();
// Deserialize the snapshot into an empty heap.
- void Deserialize();
+ void Deserialize(Isolate* isolate);
// Deserialize a single object and the objects reachable from it.
- void DeserializePartial(Object** root);
+ void DeserializePartial(Isolate* isolate, Object** root);
void set_reservation(int space_number, int reservation) {
ASSERT(space_number >= 0);
@@ -464,7 +464,7 @@ class CodeAddressMap;
// There can be only one serializer per V8 process.
class Serializer : public SerializerDeserializer {
public:
- explicit Serializer(SnapshotByteSink* sink);
+ Serializer(Isolate* isolate, SnapshotByteSink* sink);
~Serializer();
void VisitPointers(Object** start, Object** end);
// You can call this after serialization to find out how much space was used
@@ -474,7 +474,8 @@ class Serializer : public SerializerDeserializer {
return fullness_[space];
}
- static void Enable();
+ Isolate* isolate() const { return isolate_; }
+ static void Enable(Isolate* isolate);
static void Disable();
// Call this when you have made use of the fact that there is no serialization
@@ -593,9 +594,10 @@ class Serializer : public SerializerDeserializer {
class PartialSerializer : public Serializer {
public:
- PartialSerializer(Serializer* startup_snapshot_serializer,
+ PartialSerializer(Isolate* isolate,
+ Serializer* startup_snapshot_serializer,
SnapshotByteSink* sink)
- : Serializer(sink),
+ : Serializer(isolate, sink),
startup_serializer_(startup_snapshot_serializer) {
set_root_index_wave_front(Heap::kStrongRootListLength);
}
@@ -618,7 +620,8 @@ class PartialSerializer : public Serializer {
return o->IsName() || o->IsSharedFunctionInfo() ||
o->IsHeapNumber() || o->IsCode() ||
o->IsScopeInfo() ||
- o->map() == HEAP->fixed_cow_array_map();
+ o->map() ==
+ startup_serializer_->isolate()->heap()->fixed_cow_array_map();
}
private:
@@ -629,12 +632,13 @@ class PartialSerializer : public Serializer {
class StartupSerializer : public Serializer {
public:
- explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
+ StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
+ : Serializer(isolate, sink) {
// Clear the cache of objects used by the partial snapshot. After the
// strong roots have been serialized we can create a partial snapshot
// which will repopulate the cache with objects needed by that partial
// snapshot.
- Isolate::Current()->set_serialize_partial_snapshot_cache_length(0);
+ isolate->set_serialize_partial_snapshot_cache_length(0);
}
// Serialize the current state of the heap. The order is:
// 1) Strong references.
diff --git a/chromium/v8/src/snapshot-common.cc b/chromium/v8/src/snapshot-common.cc
index 576269df9e4..96034e352bc 100644
--- a/chromium/v8/src/snapshot-common.cc
+++ b/chromium/v8/src/snapshot-common.cc
@@ -116,7 +116,7 @@ bool Snapshot::HaveASnapshotToStartFrom() {
}
-Handle<Context> Snapshot::NewContextFromSnapshot() {
+Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
if (context_size_ == 0) {
return Handle<Context>();
}
@@ -132,7 +132,7 @@ Handle<Context> Snapshot::NewContextFromSnapshot() {
deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
deserializer.set_reservation(PROPERTY_CELL_SPACE,
context_property_cell_space_used_);
- deserializer.DeserializePartial(&root);
+ deserializer.DeserializePartial(isolate, &root);
CHECK(root->IsContext());
return Handle<Context>(Context::cast(root));
}
diff --git a/chromium/v8/src/snapshot.h b/chromium/v8/src/snapshot.h
index 149306e4421..4041f2925e5 100644
--- a/chromium/v8/src/snapshot.h
+++ b/chromium/v8/src/snapshot.h
@@ -43,7 +43,7 @@ class Snapshot {
static bool HaveASnapshotToStartFrom();
// Create a new context using the internal partial snapshot.
- static Handle<Context> NewContextFromSnapshot();
+ static Handle<Context> NewContextFromSnapshot(Isolate* isolate);
// Returns whether or not the snapshot is enabled.
static bool IsEnabled() { return size_ != 0; }
diff --git a/chromium/v8/src/spaces-inl.h b/chromium/v8/src/spaces-inl.h
index 77117b8a4e4..be2ae2a57db 100644
--- a/chromium/v8/src/spaces-inl.h
+++ b/chromium/v8/src/spaces-inl.h
@@ -194,11 +194,11 @@ void MemoryChunk::set_scan_on_scavenge(bool scan) {
}
-MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
+MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
OffsetFrom(addr) & ~Page::kPageAlignmentMask);
if (maybe->owner() != NULL) return maybe;
- LargeObjectIterator iterator(HEAP->lo_space());
+ LargeObjectIterator iterator(heap->lo_space());
for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
// Fixed arrays are the only pointer-containing objects in large object
// space.
@@ -315,12 +315,12 @@ MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
#ifdef DEBUG
// If we are stressing compaction we waste some memory in new space
// in order to get more frequent GCs.
- if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
+ if (FLAG_stress_compaction && !heap()->linear_allocation()) {
if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
int filler_size = size_in_bytes * 4;
for (int i = 0; i < filler_size; i += kPointerSize) {
*(reinterpret_cast<Object**>(old_top + i)) =
- HEAP->one_pointer_filler_map();
+ heap()->one_pointer_filler_map();
}
old_top += filler_size;
allocation_info_.top += filler_size;
diff --git a/chromium/v8/src/spaces.cc b/chromium/v8/src/spaces.cc
index 5935c4a0ea9..2faf41912e8 100644
--- a/chromium/v8/src/spaces.cc
+++ b/chromium/v8/src/spaces.cc
@@ -228,10 +228,10 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
}
ASSERT(*allocated <= current.size);
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
- if (!MemoryAllocator::CommitExecutableMemory(code_range_,
- current.start,
- commit_size,
- *allocated)) {
+ if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
+ current.start,
+ commit_size,
+ *allocated)) {
*allocated = 0;
return NULL;
}
@@ -245,7 +245,7 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
bool CodeRange::CommitRawMemory(Address start, size_t length) {
- return code_range_->Commit(start, length, true);
+ return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
}
@@ -278,7 +278,9 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
capacity_(0),
capacity_executable_(0),
size_(0),
- size_executable_(0) {
+ size_executable_(0),
+ lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
+ highest_ever_allocated_(reinterpret_cast<void*>(0)) {
}
@@ -304,6 +306,17 @@ void MemoryAllocator::TearDown() {
}
+bool MemoryAllocator::CommitMemory(Address base,
+ size_t size,
+ Executability executable) {
+ if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
+ return false;
+ }
+ UpdateAllocatedSpaceLimits(base, base + size);
+ return true;
+}
+
+
void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
@@ -383,7 +396,9 @@ Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
base = NULL;
}
} else {
- if (!reservation.Commit(base, commit_size, false)) {
+ if (reservation.Commit(base, commit_size, false)) {
+ UpdateAllocatedSpaceLimits(base, base + commit_size);
+ } else {
base = NULL;
}
}
@@ -509,7 +524,10 @@ bool MemoryChunk::CommitArea(size_t requested) {
Address start = address() + committed_size + guard_size;
size_t length = commit_size - committed_size;
if (reservation_.IsReserved()) {
- if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
+ Executability executable = IsFlagSet(IS_EXECUTABLE)
+ ? EXECUTABLE : NOT_EXECUTABLE;
+ if (!heap()->isolate()->memory_allocator()->CommitMemory(
+ start, length, executable)) {
return false;
}
} else {
@@ -763,7 +781,7 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
bool MemoryAllocator::CommitBlock(Address start,
size_t size,
Executability executable) {
- if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
+ if (!CommitMemory(start, size, executable)) return false;
if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size);
@@ -899,6 +917,9 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
return false;
}
+ UpdateAllocatedSpaceLimits(start,
+ start + CodePageAreaStartOffset() +
+ commit_size - CodePageGuardStartOffset());
return true;
}
@@ -1777,8 +1798,7 @@ void SemiSpaceIterator::Initialize(Address start,
#ifdef DEBUG
// heap_histograms is shared, always clear it before using it.
-static void ClearHistograms() {
- Isolate* isolate = Isolate::Current();
+static void ClearHistograms(Isolate* isolate) {
// We reset the name each time, though it hasn't changed.
#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
@@ -1829,8 +1849,7 @@ static int CollectHistogramInfo(HeapObject* obj) {
}
-static void ReportHistogram(bool print_spill) {
- Isolate* isolate = Isolate::Current();
+static void ReportHistogram(Isolate* isolate, bool print_spill) {
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
if (isolate->heap_histograms()[i].number() > 0) {
@@ -1931,7 +1950,7 @@ void NewSpace::ReportStatistics() {
#endif // DEBUG
if (FLAG_log_gc) {
- Isolate* isolate = ISOLATE;
+ Isolate* isolate = heap()->isolate();
DoReportStatistics(isolate, allocated_histogram_, "allocated");
DoReportStatistics(isolate, promoted_histogram_, "promoted");
}
@@ -2043,8 +2062,8 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order.
- ScopedLock lock_target(mutex_);
- ScopedLock lock_source(category->mutex());
+ LockGuard<Mutex> target_lock_guard(mutex());
+ LockGuard<Mutex> source_lock_guard(category->mutex());
free_bytes = category->available();
if (end_ == NULL) {
end_ = category->end();
@@ -2685,8 +2704,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
#ifdef DEBUG
-void PagedSpace::ReportCodeStatistics() {
- Isolate* isolate = Isolate::Current();
+void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
CommentStatistic* comments_statistics =
isolate->paged_space_comments_statistics();
ReportCodeKindStatistics(isolate->code_kind_statistics());
@@ -2703,8 +2721,7 @@ void PagedSpace::ReportCodeStatistics() {
}
-void PagedSpace::ResetCodeStatistics() {
- Isolate* isolate = Isolate::Current();
+void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
CommentStatistic* comments_statistics =
isolate->paged_space_comments_statistics();
ClearCodeKindStatistics(isolate->code_kind_statistics());
@@ -2819,11 +2836,11 @@ void PagedSpace::ReportStatistics() {
Capacity(), Waste(), Available(), pct);
if (was_swept_conservatively_) return;
- ClearHistograms();
+ ClearHistograms(heap()->isolate());
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
CollectHistogramInfo(obj);
- ReportHistogram(true);
+ ReportHistogram(heap()->isolate(), true);
}
#endif
@@ -2852,8 +2869,7 @@ void FixedSpace::PrepareForMarkCompact() {
// the VerifyObject definition behind VERIFY_HEAP.
void MapSpace::VerifyObject(HeapObject* object) {
- // The object should be a map or a free-list node.
- CHECK(object->IsMap() || object->IsFreeSpace());
+ CHECK(object->IsMap());
}
@@ -2864,16 +2880,12 @@ void MapSpace::VerifyObject(HeapObject* object) {
// the VerifyObject definition behind VERIFY_HEAP.
void CellSpace::VerifyObject(HeapObject* object) {
- // The object should be a global object property cell or a free-list node.
- CHECK(object->IsCell() ||
- object->map() == heap()->two_pointer_filler_map());
+ CHECK(object->IsCell());
}
void PropertyCellSpace::VerifyObject(HeapObject* object) {
- // The object should be a global object property cell or a free-list node.
- CHECK(object->IsPropertyCell() ||
- object->map() == heap()->two_pointer_filler_map());
+ CHECK(object->IsPropertyCell());
}
@@ -3165,7 +3177,7 @@ void LargeObjectSpace::Print() {
void LargeObjectSpace::ReportStatistics() {
PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
int num_objects = 0;
- ClearHistograms();
+ ClearHistograms(heap()->isolate());
LargeObjectIterator it(this);
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
num_objects++;
@@ -3174,7 +3186,7 @@ void LargeObjectSpace::ReportStatistics() {
PrintF(" number of objects %d, "
"size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
- if (num_objects > 0) ReportHistogram(false);
+ if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
}
diff --git a/chromium/v8/src/spaces.h b/chromium/v8/src/spaces.h
index b47452e421f..43f44a5c707 100644
--- a/chromium/v8/src/spaces.h
+++ b/chromium/v8/src/spaces.h
@@ -32,6 +32,7 @@
#include "hashmap.h"
#include "list.h"
#include "log.h"
+#include "platform/mutex.h"
#include "v8utils.h"
namespace v8 {
@@ -306,7 +307,7 @@ class MemoryChunk {
}
// Only works for addresses in pointer spaces, not data or code spaces.
- static inline MemoryChunk* FromAnyPointerAddress(Address addr);
+ static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
Address address() { return reinterpret_cast<Address>(this); }
@@ -784,8 +785,9 @@ class Page : public MemoryChunk {
// Maximum object size that fits in a page. Objects larger than that size
// are allocated in large object space and are never moved in memory. This
// also applies to new space allocation, since objects are never migrated
- // from new space to large object space.
- static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
+ // from new space to large object space. Takes double alignment into account.
+ static const int kMaxNonCodeHeapObjectSize =
+ kNonCodeObjectAreaSize - kPointerSize;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
@@ -1081,6 +1083,13 @@ class MemoryAllocator {
return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
}
+ // Returns an indication of whether a pointer is in a space that has
+ // been allocated by this MemoryAllocator.
+ V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
+ return address < lowest_ever_allocated_ ||
+ address >= highest_ever_allocated_;
+ }
+
#ifdef DEBUG
// Reports statistic info of the space.
void ReportStatistics();
@@ -1103,6 +1112,8 @@ class MemoryAllocator {
Executability executable,
VirtualMemory* controller);
+ bool CommitMemory(Address addr, size_t size, Executability executable);
+
void FreeMemory(VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
@@ -1148,10 +1159,10 @@ class MemoryAllocator {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
- MUST_USE_RESULT static bool CommitExecutableMemory(VirtualMemory* vm,
- Address start,
- size_t commit_size,
- size_t reserved_size);
+ MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size);
private:
Isolate* isolate_;
@@ -1166,6 +1177,14 @@ class MemoryAllocator {
// Allocated executable space size in bytes.
size_t size_executable_;
+ // We keep the lowest and highest addresses allocated as a quick way
+ // of determining that pointers are outside the heap. The estimate is
+ // conservative, i.e. not all addrsses in 'allocated' space are allocated
+ // to our heap. The range is [lowest, highest[, inclusive on the low end
+ // and exclusive on the high end.
+ void* lowest_ever_allocated_;
+ void* highest_ever_allocated_;
+
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
ObjectSpace space,
@@ -1188,6 +1207,11 @@ class MemoryAllocator {
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner);
+ void UpdateAllocatedSpaceLimits(void* low, void* high) {
+ lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
+ highest_ever_allocated_ = Max(highest_ever_allocated_, high);
+ }
+
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
@@ -1444,13 +1468,8 @@ class FreeListCategory {
FreeListCategory() :
top_(NULL),
end_(NULL),
- mutex_(OS::CreateMutex()),
available_(0) {}
- ~FreeListCategory() {
- delete mutex_;
- }
-
intptr_t Concatenate(FreeListCategory* category);
void Reset();
@@ -1476,7 +1495,7 @@ class FreeListCategory {
int available() const { return available_; }
void set_available(int available) { available_ = available; }
- Mutex* mutex() { return mutex_; }
+ Mutex* mutex() { return &mutex_; }
#ifdef DEBUG
intptr_t SumFreeList();
@@ -1486,7 +1505,7 @@ class FreeListCategory {
private:
FreeListNode* top_;
FreeListNode* end_;
- Mutex* mutex_;
+ Mutex mutex_;
// Total available bytes in all blocks of this free list category.
int available_;
@@ -1756,8 +1775,8 @@ class PagedSpace : public Space {
// Report code object related statistics
void CollectCodeStatistics();
- static void ReportCodeStatistics();
- static void ResetCodeStatistics();
+ static void ReportCodeStatistics(Isolate* isolate);
+ static void ResetCodeStatistics(Isolate* isolate);
#endif
bool was_swept_conservatively() { return was_swept_conservatively_; }
diff --git a/chromium/v8/src/splay-tree-inl.h b/chromium/v8/src/splay-tree-inl.h
index 4eca71d1004..42024756e9b 100644
--- a/chromium/v8/src/splay-tree-inl.h
+++ b/chromium/v8/src/splay-tree-inl.h
@@ -91,6 +91,12 @@ bool SplayTree<Config, Allocator>::FindInternal(const Key& key) {
template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::Contains(const Key& key) {
+ return FindInternal(key);
+}
+
+
+template<typename Config, class Allocator>
bool SplayTree<Config, Allocator>::Find(const Key& key, Locator* locator) {
if (FindInternal(key)) {
locator->bind(root_);
@@ -293,9 +299,10 @@ void SplayTree<Config, Allocator>::ForEach(Callback* callback) {
template <typename Config, class Allocator> template <class Callback>
void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
+ if (root_ == NULL) return;
// Pre-allocate some space for tiny trees.
List<Node*, Allocator> nodes_to_visit(10, allocator_);
- if (root_ != NULL) nodes_to_visit.Add(root_, allocator_);
+ nodes_to_visit.Add(root_, allocator_);
int pos = 0;
while (pos < nodes_to_visit.length()) {
Node* node = nodes_to_visit[pos++];
diff --git a/chromium/v8/src/splay-tree.h b/chromium/v8/src/splay-tree.h
index 8844d8a8ffe..f393027a82c 100644
--- a/chromium/v8/src/splay-tree.h
+++ b/chromium/v8/src/splay-tree.h
@@ -39,9 +39,9 @@ namespace internal {
//
// typedef Key: the key type
// typedef Value: the value type
-// static const kNoKey: the dummy key used when no key is set
-// static const kNoValue: the dummy value used to initialize nodes
-// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
+// static const Key kNoKey: the dummy key used when no key is set
+// static Value kNoValue(): the dummy value used to initialize nodes
+// static int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
//
// The tree is also parameterized by an allocation policy
// (Allocator). The policy is used for allocating lists in the C free
@@ -74,6 +74,11 @@ class SplayTree {
UNREACHABLE();
}
+ AllocationPolicy allocator() { return allocator_; }
+
+ // Checks if there is a mapping for the key.
+ bool Contains(const Key& key);
+
// Inserts the given key in this tree with the given value. Returns
// true if a node was inserted, otherwise false. If found the locator
// is enabled and provides access to the mapping for the key.
@@ -104,6 +109,9 @@ class SplayTree {
// Remove the node with the given key from the tree.
bool Remove(const Key& key);
+ // Remove all keys from the tree.
+ void Clear() { ResetRoot(); }
+
bool is_empty() { return root_ == NULL; }
// Perform the splay operation for the given key. Moves the node with
diff --git a/chromium/v8/src/store-buffer-inl.h b/chromium/v8/src/store-buffer-inl.h
index bb386dbacf9..e1fcdee6618 100644
--- a/chromium/v8/src/store-buffer-inl.h
+++ b/chromium/v8/src/store-buffer-inl.h
@@ -67,7 +67,7 @@ void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
if (top >= old_limit_) {
ASSERT(callback_ != NULL);
(*callback_)(heap_,
- MemoryChunk::FromAnyPointerAddress(addr),
+ MemoryChunk::FromAnyPointerAddress(heap_, addr),
kStoreBufferFullEvent);
}
}
diff --git a/chromium/v8/src/store-buffer.cc b/chromium/v8/src/store-buffer.cc
index 9705b604898..22a546742c8 100644
--- a/chromium/v8/src/store-buffer.cc
+++ b/chromium/v8/src/store-buffer.cc
@@ -170,7 +170,10 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
PointerChunkIterator it(heap_);
MemoryChunk* chunk;
while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+ if (chunk->scan_on_scavenge()) {
+ page_has_scan_on_scavenge_flag = true;
+ break;
+ }
}
if (page_has_scan_on_scavenge_flag) {
@@ -218,7 +221,7 @@ void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
containing_chunk = previous_chunk;
} else {
- containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+ containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
}
int old_counter = containing_chunk->store_buffer_counter();
if (old_counter == threshold) {
@@ -244,7 +247,7 @@ void StoreBuffer::Filter(int flag) {
if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
containing_chunk = previous_chunk;
} else {
- containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+ containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
previous_chunk = containing_chunk;
}
if (!containing_chunk->IsFlagSet(flag)) {
@@ -279,7 +282,10 @@ bool StoreBuffer::PrepareForIteration() {
MemoryChunk* chunk;
bool page_has_scan_on_scavenge_flag = false;
while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+ if (chunk->scan_on_scavenge()) {
+ page_has_scan_on_scavenge_flag = true;
+ break;
+ }
}
if (page_has_scan_on_scavenge_flag) {
diff --git a/chromium/v8/src/string-stream.cc b/chromium/v8/src/string-stream.cc
index 9c4394ed7f0..45b675fa8ba 100644
--- a/chromium/v8/src/string-stream.cc
+++ b/chromium/v8/src/string-stream.cc
@@ -194,7 +194,8 @@ void StringStream::PrintObject(Object* o) {
return;
}
if (o->IsHeapObject()) {
- DebugObjectCache* debug_object_cache = Isolate::Current()->
+ HeapObject* ho = HeapObject::cast(o);
+ DebugObjectCache* debug_object_cache = ho->GetIsolate()->
string_stream_debug_object_cache();
for (int i = 0; i < debug_object_cache->length(); i++) {
if ((*debug_object_cache)[i] == o) {
@@ -268,8 +269,8 @@ SmartArrayPointer<const char> StringStream::ToCString() const {
}
-void StringStream::Log() {
- LOG(ISOLATE, StringEvent("StackDump", buffer_));
+void StringStream::Log(Isolate* isolate) {
+ LOG(isolate, StringEvent("StackDump", buffer_));
}
@@ -289,14 +290,13 @@ void StringStream::OutputToFile(FILE* out) {
}
-Handle<String> StringStream::ToString() {
- Factory* factory = Isolate::Current()->factory();
- return factory->NewStringFromUtf8(Vector<const char>(buffer_, length_));
+Handle<String> StringStream::ToString(Isolate* isolate) {
+ return isolate->factory()->NewStringFromUtf8(
+ Vector<const char>(buffer_, length_));
}
-void StringStream::ClearMentionedObjectCache() {
- Isolate* isolate = Isolate::Current();
+void StringStream::ClearMentionedObjectCache(Isolate* isolate) {
isolate->set_string_stream_current_security_token(NULL);
if (isolate->string_stream_debug_object_cache() == NULL) {
isolate->set_string_stream_debug_object_cache(
@@ -307,9 +307,8 @@ void StringStream::ClearMentionedObjectCache() {
#ifdef DEBUG
-bool StringStream::IsMentionedObjectCacheClear() {
- return (
- Isolate::Current()->string_stream_debug_object_cache()->length() == 0);
+bool StringStream::IsMentionedObjectCacheClear(Isolate* isolate) {
+ return isolate->string_stream_debug_object_cache()->length() == 0;
}
#endif
@@ -351,7 +350,7 @@ void StringStream::PrintName(Object* name) {
void StringStream::PrintUsingMap(JSObject* js_object) {
Map* map = js_object->map();
- if (!HEAP->Contains(map) ||
+ if (!js_object->GetHeap()->Contains(map) ||
!map->IsHeapObject() ||
!map->IsMap()) {
Add("<Invalid map>\n");
@@ -385,7 +384,7 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
- Heap* heap = HEAP;
+ Heap* heap = array->GetHeap();
for (unsigned int i = 0; i < 10 && i < limit; i++) {
Object* element = array->get(i);
if (element != heap->the_hole_value()) {
@@ -422,9 +421,9 @@ void StringStream::PrintByteArray(ByteArray* byte_array) {
}
-void StringStream::PrintMentionedObjectCache() {
+void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
DebugObjectCache* debug_object_cache =
- Isolate::Current()->string_stream_debug_object_cache();
+ isolate->string_stream_debug_object_cache();
Add("==== Key ============================================\n\n");
for (int i = 0; i < debug_object_cache->length(); i++) {
HeapObject* printee = (*debug_object_cache)[i];
@@ -457,12 +456,12 @@ void StringStream::PrintMentionedObjectCache() {
void StringStream::PrintSecurityTokenIfChanged(Object* f) {
- Isolate* isolate = Isolate::Current();
+ if (!f->IsHeapObject()) return;
+ HeapObject* obj = HeapObject::cast(f);
+ Isolate* isolate = obj->GetIsolate();
Heap* heap = isolate->heap();
- if (!f->IsHeapObject() || !heap->Contains(HeapObject::cast(f))) {
- return;
- }
- Map* map = HeapObject::cast(f)->map();
+ if (!heap->Contains(obj)) return;
+ Map* map = obj->map();
if (!map->IsHeapObject() ||
!heap->Contains(map) ||
!map->IsMap() ||
@@ -492,48 +491,39 @@ void StringStream::PrintSecurityTokenIfChanged(Object* f) {
void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
- if (f->IsHeapObject() &&
- HEAP->Contains(HeapObject::cast(f)) &&
- HEAP->Contains(HeapObject::cast(f)->map()) &&
- HeapObject::cast(f)->map()->IsMap()) {
- if (f->IsJSFunction()) {
- JSFunction* fun = JSFunction::cast(f);
- // Common case: on-stack function present and resolved.
- PrintPrototype(fun, receiver);
- *code = fun->code();
- } else if (f->IsInternalizedString()) {
- // Unresolved and megamorphic calls: Instead of the function
- // we have the function name on the stack.
- PrintName(f);
- Add("/* unresolved */ ");
- } else {
- // Unless this is the frame of a built-in function, we should always have
- // the callee function or name on the stack. If we don't, we have a
- // problem or a change of the stack frame layout.
- Add("%o", f);
- Add("/* warning: no JSFunction object or function name found */ ");
- }
- /* } else if (is_trampoline()) {
- Print("trampoline ");
- */
+ if (!f->IsHeapObject()) {
+ Add("/* warning: 'function' was not a heap object */ ");
+ return;
+ }
+ Heap* heap = HeapObject::cast(f)->GetHeap();
+ if (!heap->Contains(HeapObject::cast(f))) {
+ Add("/* warning: 'function' was not on the heap */ ");
+ return;
+ }
+ if (!heap->Contains(HeapObject::cast(f)->map())) {
+ Add("/* warning: function's map was not on the heap */ ");
+ return;
+ }
+ if (!HeapObject::cast(f)->map()->IsMap()) {
+ Add("/* warning: function's map was not a valid map */ ");
+ return;
+ }
+ if (f->IsJSFunction()) {
+ JSFunction* fun = JSFunction::cast(f);
+ // Common case: on-stack function present and resolved.
+ PrintPrototype(fun, receiver);
+ *code = fun->code();
+ } else if (f->IsInternalizedString()) {
+ // Unresolved and megamorphic calls: Instead of the function
+ // we have the function name on the stack.
+ PrintName(f);
+ Add("/* unresolved */ ");
} else {
- if (!f->IsHeapObject()) {
- Add("/* warning: 'function' was not a heap object */ ");
- return;
- }
- if (!HEAP->Contains(HeapObject::cast(f))) {
- Add("/* warning: 'function' was not on the heap */ ");
- return;
- }
- if (!HEAP->Contains(HeapObject::cast(f)->map())) {
- Add("/* warning: function's map was not on the heap */ ");
- return;
- }
- if (!HeapObject::cast(f)->map()->IsMap()) {
- Add("/* warning: function's map was not a valid map */ ");
- return;
- }
- Add("/* warning: Invalid JSFunction object found */ ");
+ // Unless this is the frame of a built-in function, we should always have
+ // the callee function or name on the stack. If we don't, we have a
+ // problem or a change of the stack frame layout.
+ Add("%o", f);
+ Add("/* warning: no JSFunction object or function name found */ ");
}
}
diff --git a/chromium/v8/src/string-stream.h b/chromium/v8/src/string-stream.h
index 2367994116e..e3db2a8a868 100644
--- a/chromium/v8/src/string-stream.h
+++ b/chromium/v8/src/string-stream.h
@@ -147,8 +147,8 @@ class StringStream {
// Getting the message out.
void OutputToFile(FILE* out);
void OutputToStdOut() { OutputToFile(stdout); }
- void Log();
- Handle<String> ToString();
+ void Log(Isolate* isolate);
+ Handle<String> ToString(Isolate* isolate);
SmartArrayPointer<const char> ToCString() const;
int length() const { return length_; }
@@ -169,10 +169,10 @@ class StringStream {
}
// Mentioned object cache support.
- void PrintMentionedObjectCache();
- static void ClearMentionedObjectCache();
+ void PrintMentionedObjectCache(Isolate* isolate);
+ static void ClearMentionedObjectCache(Isolate* isolate);
#ifdef DEBUG
- static bool IsMentionedObjectCacheClear();
+ static bool IsMentionedObjectCacheClear(Isolate* isolate);
#endif
diff --git a/chromium/v8/src/stub-cache.cc b/chromium/v8/src/stub-cache.cc
index 9e29a95ebf4..bb8a76609d1 100644
--- a/chromium/v8/src/stub-cache.cc
+++ b/chromium/v8/src/stub-cache.cc
@@ -45,9 +45,7 @@ namespace internal {
StubCache::StubCache(Isolate* isolate)
- : isolate_(isolate) {
- ASSERT(isolate == Isolate::Current());
-}
+ : isolate_(isolate) { }
void StubCache::Initialize() {
@@ -168,68 +166,68 @@ Handle<Code> StubCache::FindStoreHandler(Handle<Name> name,
}
-Handle<Code> StubCache::ComputeMonomorphicLoadIC(Handle<JSObject> receiver,
+Handle<Code> StubCache::ComputeMonomorphicLoadIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name) {
- Handle<Code> ic = FindIC(name, receiver, Code::LOAD_IC, handler->type());
+ Handle<Map> map(receiver->map());
+ Handle<Code> ic = FindIC(name, map, Code::LOAD_IC, handler->type());
if (!ic.is_null()) return ic;
LoadStubCompiler ic_compiler(isolate());
- ic = ic_compiler.CompileMonomorphicIC(
- Handle<Map>(receiver->map()), handler, name);
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
- JSObject::UpdateMapCodeCache(receiver, name, ic);
+ HeapObject::UpdateMapCodeCache(receiver, name, ic);
return ic;
}
-Handle<Code> StubCache::ComputeMonomorphicKeyedLoadIC(Handle<JSObject> receiver,
- Handle<Code> handler,
- Handle<Name> name) {
- Handle<Code> ic = FindIC(
- name, receiver, Code::KEYED_LOAD_IC, handler->type());
+Handle<Code> StubCache::ComputeMonomorphicKeyedLoadIC(
+ Handle<HeapObject> receiver,
+ Handle<Code> handler,
+ Handle<Name> name) {
+ Handle<Map> map(receiver->map());
+ Handle<Code> ic = FindIC(name, map, Code::KEYED_LOAD_IC, handler->type());
if (!ic.is_null()) return ic;
KeyedLoadStubCompiler ic_compiler(isolate());
- ic = ic_compiler.CompileMonomorphicIC(
- Handle<Map>(receiver->map()), handler, name);
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
- JSObject::UpdateMapCodeCache(receiver, name, ic);
+ HeapObject::UpdateMapCodeCache(receiver, name, ic);
return ic;
}
-Handle<Code> StubCache::ComputeMonomorphicStoreIC(Handle<JSObject> receiver,
+Handle<Code> StubCache::ComputeMonomorphicStoreIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name,
StrictModeFlag strict_mode) {
+ Handle<Map> map(receiver->map());
Handle<Code> ic = FindIC(
- name, receiver, Code::STORE_IC, handler->type(), strict_mode);
+ name, map, Code::STORE_IC, handler->type(), strict_mode);
if (!ic.is_null()) return ic;
StoreStubCompiler ic_compiler(isolate(), strict_mode);
- ic = ic_compiler.CompileMonomorphicIC(
- Handle<Map>(receiver->map()), handler, name);
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
- JSObject::UpdateMapCodeCache(receiver, name, ic);
+ HeapObject::UpdateMapCodeCache(receiver, name, ic);
return ic;
}
Handle<Code> StubCache::ComputeMonomorphicKeyedStoreIC(
- Handle<JSObject> receiver,
+ Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name,
StrictModeFlag strict_mode) {
+ Handle<Map> map(receiver->map());
Handle<Code> ic = FindIC(
- name, receiver, Code::KEYED_STORE_IC, handler->type(), strict_mode);
+ name, map, Code::KEYED_STORE_IC, handler->type(), strict_mode);
if (!ic.is_null()) return ic;
KeyedStoreStubCompiler ic_compiler(isolate(), strict_mode, STANDARD_STORE);
- ic = ic_compiler.CompileMonomorphicIC(
- Handle<Map>(receiver->map()), handler, name);
+ ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
- JSObject::UpdateMapCodeCache(receiver, name, ic);
+ HeapObject::UpdateMapCodeCache(receiver, name, ic);
return ic;
}
@@ -245,12 +243,12 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
Handle<Name> cache_name = factory()->empty_string();
Handle<JSObject> current;
Handle<Object> next = receiver;
- Handle<GlobalObject> global;
+ Handle<JSGlobalObject> global;
do {
current = Handle<JSObject>::cast(next);
next = Handle<Object>(current->GetPrototype(), isolate_);
- if (current->IsGlobalObject()) {
- global = Handle<GlobalObject>::cast(current);
+ if (current->IsJSGlobalObject()) {
+ global = Handle<JSGlobalObject>::cast(current);
cache_name = name;
} else if (!current->HasFastProperties()) {
cache_name = name;
@@ -266,7 +264,7 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
handler =
compiler.CompileLoadNonexistent(receiver, current, cache_name, global);
- JSObject::UpdateMapCodeCache(receiver, cache_name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, cache_name, handler);
return handler;
}
@@ -291,7 +289,7 @@ Handle<Code> StubCache::ComputeLoadField(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadField(receiver, holder, name, field, representation);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -310,7 +308,25 @@ Handle<Code> StubCache::ComputeLoadCallback(
LoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadCallback(receiver, holder, name, callback);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
+ return handler;
+}
+
+
+Handle<Code> StubCache::ComputeLoadCallback(
+ Handle<Name> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimization) {
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> stub = FindLoadHandler(
+ name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
+ if (!stub.is_null()) return stub;
+
+ LoadStubCompiler compiler(isolate_);
+ Handle<Code> handler =
+ compiler.CompileLoadCallback(receiver, holder, name, call_optimization);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -327,7 +343,7 @@ Handle<Code> StubCache::ComputeLoadViaGetter(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadViaGetter(receiver, holder, name, getter);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -343,7 +359,7 @@ Handle<Code> StubCache::ComputeLoadConstant(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
handler = compiler.CompileLoadConstant(receiver, holder, name, value);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -360,7 +376,7 @@ Handle<Code> StubCache::ComputeLoadInterceptor(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadInterceptor(receiver, holder, name);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -383,7 +399,7 @@ Handle<Code> StubCache::ComputeLoadGlobal(Handle<Name> name,
LoadStubCompiler compiler(isolate_);
Handle<Code> ic =
compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
- JSObject::UpdateMapCodeCache(stub_holder, name, ic);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, ic);
return ic;
}
@@ -409,7 +425,7 @@ Handle<Code> StubCache::ComputeKeyedLoadField(Handle<Name> name,
KeyedLoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadField(receiver, holder, name, field, representation);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -426,7 +442,7 @@ Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<Name> name,
KeyedLoadStubCompiler compiler(isolate_);
handler = compiler.CompileLoadConstant(receiver, holder, name, value);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -442,7 +458,7 @@ Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<Name> name,
KeyedLoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadInterceptor(receiver, holder, name);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -460,7 +476,25 @@ Handle<Code> StubCache::ComputeKeyedLoadCallback(
KeyedLoadStubCompiler compiler(isolate_);
Handle<Code> handler =
compiler.CompileLoadCallback(receiver, holder, name, callback);
- JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
+ return handler;
+}
+
+
+Handle<Code> StubCache::ComputeKeyedLoadCallback(
+ Handle<Name> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimization) {
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> stub = FindLoadHandler(
+ name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS);
+ if (!stub.is_null()) return stub;
+
+ KeyedLoadStubCompiler compiler(isolate_);
+ Handle<Code> handler =
+ compiler.CompileLoadCallback(receiver, holder, name, call_optimization);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -475,7 +509,7 @@ Handle<Code> StubCache::ComputeStoreField(Handle<Name> name,
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> handler = compiler.CompileStoreField(receiver, lookup, name);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -492,7 +526,7 @@ Handle<Code> StubCache::ComputeStoreTransition(Handle<Name> name,
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> handler =
compiler.CompileStoreTransition(receiver, lookup, transition, name);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -572,7 +606,7 @@ Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name,
Handle<Map> cell_map(isolate_->heap()->global_property_cell_map());
code->ReplaceNthObject(1, *cell_map, *cell);
- JSObject::UpdateMapCodeCache(receiver, name, code);
+ HeapObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
@@ -592,7 +626,25 @@ Handle<Code> StubCache::ComputeStoreCallback(
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> handler = compiler.CompileStoreCallback(
receiver, holder, name, callback);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
+ return handler;
+}
+
+
+Handle<Code> StubCache::ComputeStoreCallback(
+ Handle<Name> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimization,
+ StrictModeFlag strict_mode) {
+ Handle<Code> stub = FindStoreHandler(
+ name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode);
+ if (!stub.is_null()) return stub;
+
+ StoreStubCompiler compiler(isolate_, strict_mode);
+ Handle<Code> handler = compiler.CompileStoreCallback(
+ receiver, holder, name, call_optimization);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -609,7 +661,7 @@ Handle<Code> StubCache::ComputeStoreViaSetter(Handle<Name> name,
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> handler = compiler.CompileStoreViaSetter(
receiver, holder, name, setter);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -623,7 +675,7 @@ Handle<Code> StubCache::ComputeStoreInterceptor(Handle<Name> name,
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> handler = compiler.CompileStoreInterceptor(receiver, name);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -638,7 +690,7 @@ Handle<Code> StubCache::ComputeKeyedStoreField(Handle<Name> name,
KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
Handle<Code> handler = compiler.CompileStoreField(receiver, lookup, name);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -656,7 +708,7 @@ Handle<Code> StubCache::ComputeKeyedStoreTransition(
KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
Handle<Code> handler =
compiler.CompileStoreTransition(receiver, lookup, transition, name);
- JSObject::UpdateMapCodeCache(receiver, name, handler);
+ HeapObject::UpdateMapCodeCache(receiver, name, handler);
return handler;
}
@@ -713,7 +765,7 @@ Handle<Code> StubCache::ComputeCallConstant(int argc,
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
if (CallStubCompiler::CanBeCached(function)) {
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
}
return code;
}
@@ -754,7 +806,7 @@ Handle<Code> StubCache::ComputeCallField(int argc,
PROFILE(isolate_,
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
return code;
}
@@ -793,7 +845,7 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc,
PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
return code;
}
@@ -824,7 +876,7 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc,
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
if (CallStubCompiler::CanBeCached(function)) {
- JSObject::UpdateMapCodeCache(stub_holder, name, code);
+ HeapObject::UpdateMapCodeCache(stub_holder, name, code);
}
return code;
}
@@ -1176,7 +1228,8 @@ RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
JSObject* recv = JSObject::cast(args[0]);
ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[1]);
Address setter_address = v8::ToCData<Address>(callback->setter());
- v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
+ v8::AccessorSetterCallback fun =
+ FUNCTION_CAST<v8::AccessorSetterCallback>(setter_address);
ASSERT(fun != NULL);
ASSERT(callback->IsCompatibleReceiver(recv));
Handle<Name> name = args.at<Name>(2);
@@ -1196,9 +1249,6 @@ RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
}
-static const int kAccessorInfoOffsetInInterceptorArgs = 2;
-
-
/**
* Attempts to load a property with an interceptor (which must be present),
* but doesn't search the prototype chain.
@@ -1207,13 +1257,11 @@ static const int kAccessorInfoOffsetInInterceptorArgs = 2;
* provide any value for the given name.
*/
RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
- typedef PropertyCallbackArguments PCA;
- static const int kArgsOffset = kAccessorInfoOffsetInInterceptorArgs;
- Handle<Name> name_handle = args.at<Name>(0);
- Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
- ASSERT(kArgsOffset == 2);
- // No ReturnValue in interceptors.
- ASSERT_EQ(kArgsOffset + PCA::kArgsLength - 2, args.length());
+ ASSERT(args.length() == StubCache::kInterceptorArgsLength);
+ Handle<Name> name_handle =
+ args.at<Name>(StubCache::kInterceptorArgsNameIndex);
+ Handle<InterceptorInfo> interceptor_info =
+ args.at<InterceptorInfo>(StubCache::kInterceptorArgsInfoIndex);
// TODO(rossberg): Support symbols in the API.
if (name_handle->IsSymbol())
@@ -1221,18 +1269,16 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
Handle<String> name = Handle<String>::cast(name_handle);
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetter getter =
- FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
+ v8::NamedPropertyGetterCallback getter =
+ FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
ASSERT(getter != NULL);
Handle<JSObject> receiver =
- args.at<JSObject>(kArgsOffset - PCA::kThisIndex);
+ args.at<JSObject>(StubCache::kInterceptorArgsThisIndex);
Handle<JSObject> holder =
- args.at<JSObject>(kArgsOffset - PCA::kHolderIndex);
- PropertyCallbackArguments callback_args(isolate,
- interceptor_info->data(),
- *receiver,
- *holder);
+ args.at<JSObject>(StubCache::kInterceptorArgsHolderIndex);
+ PropertyCallbackArguments callback_args(
+ isolate, interceptor_info->data(), *receiver, *holder);
{
// Use the interceptor getter.
HandleScope scope(isolate);
@@ -1256,7 +1302,7 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
// can't use either LoadIC or KeyedLoadIC constructors.
IC ic(IC::NO_EXTRA_FRAME, isolate);
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
- if (!ic.SlowIsUndeclaredGlobal()) return HEAP->undefined_value();
+ if (!ic.SlowIsUndeclaredGlobal()) return isolate->heap()->undefined_value();
// Throw a reference error.
HandleScope scope(isolate);
@@ -1270,17 +1316,15 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) {
static MaybeObject* LoadWithInterceptor(Arguments* args,
PropertyAttributes* attrs) {
- typedef PropertyCallbackArguments PCA;
- static const int kArgsOffset = kAccessorInfoOffsetInInterceptorArgs;
- Handle<Name> name_handle = args->at<Name>(0);
- Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(1);
- ASSERT(kArgsOffset == 2);
- // No ReturnValue in interceptors.
- ASSERT_EQ(kArgsOffset + PCA::kArgsLength - 2, args->length());
+ ASSERT(args->length() == StubCache::kInterceptorArgsLength);
+ Handle<Name> name_handle =
+ args->at<Name>(StubCache::kInterceptorArgsNameIndex);
+ Handle<InterceptorInfo> interceptor_info =
+ args->at<InterceptorInfo>(StubCache::kInterceptorArgsInfoIndex);
Handle<JSObject> receiver_handle =
- args->at<JSObject>(kArgsOffset - PCA::kThisIndex);
+ args->at<JSObject>(StubCache::kInterceptorArgsThisIndex);
Handle<JSObject> holder_handle =
- args->at<JSObject>(kArgsOffset - PCA::kHolderIndex);
+ args->at<JSObject>(StubCache::kInterceptorArgsHolderIndex);
Isolate* isolate = receiver_handle->GetIsolate();
@@ -1291,8 +1335,8 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
Handle<String> name = Handle<String>::cast(name_handle);
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetter getter =
- FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
+ v8::NamedPropertyGetterCallback getter =
+ FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
ASSERT(getter != NULL);
PropertyCallbackArguments callback_args(isolate,
@@ -1595,6 +1639,40 @@ Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<JSObject> object,
}
+void BaseLoadStubCompiler::NonexistentHandlerFrontend(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<Name> name,
+ Label* success,
+ Handle<JSGlobalObject> global) {
+ Label miss;
+
+ Register holder =
+ HandlerFrontendHeader(object, receiver(), last, name, &miss);
+
+ if (!last->HasFastProperties() &&
+ !last->IsJSGlobalObject() &&
+ !last->IsJSGlobalProxy()) {
+ if (!name->IsUniqueName()) {
+ ASSERT(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ ASSERT(last->property_dictionary()->FindEntry(*name) ==
+ NameDictionary::kNotFound);
+ GenerateDictionaryNegativeLookup(masm(), &miss, holder, name,
+ scratch2(), scratch3());
+ }
+
+ // If the last object in the prototype chain is a global object,
+ // check that the global property cell is empty.
+ if (!global.is_null()) {
+ GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
+ }
+
+ HandlerFrontendFooter(name, success, &miss);
+}
+
+
Handle<Code> BaseLoadStubCompiler::CompileLoadField(
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -1647,6 +1725,25 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
}
+Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ ASSERT(call_optimization.is_simple_api_call());
+ Label success;
+
+ Handle<JSFunction> callback = call_optimization.constant_function();
+ CallbackHandlerFrontend(
+ object, receiver(), holder, name, &success, callback);
+ __ bind(&success);
+ GenerateLoadCallback(call_optimization);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
Handle<Code> BaseLoadStubCompiler::CompileLoadInterceptor(
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -1855,9 +1952,8 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub;
- if (FLAG_compiled_keyed_stores &&
- (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements())) {
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
stub = KeyedStoreFastElementStub(
is_jsarray,
elements_kind,
@@ -1998,9 +2094,8 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic(
is_js_array,
store_mode_).GetCode(isolate());
} else {
- if (FLAG_compiled_keyed_stores &&
- (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements())) {
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
cached_stub = KeyedStoreFastElementStub(
is_js_array,
elements_kind,
diff --git a/chromium/v8/src/stub-cache.h b/chromium/v8/src/stub-cache.h
index bd0678ed3a7..12d4fc5266a 100644
--- a/chromium/v8/src/stub-cache.h
+++ b/chromium/v8/src/stub-cache.h
@@ -48,6 +48,8 @@ namespace internal {
// invalidate the cache whenever a prototype map is changed. The stub
// validates the map chain as in the mono-morphic case.
+
+class CallOptimization;
class SmallMapList;
class StubCache;
@@ -102,20 +104,20 @@ class StubCache {
Code::StubType type,
StrictModeFlag strict_mode);
- Handle<Code> ComputeMonomorphicLoadIC(Handle<JSObject> receiver,
+ Handle<Code> ComputeMonomorphicLoadIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name);
- Handle<Code> ComputeMonomorphicKeyedLoadIC(Handle<JSObject> receiver,
+ Handle<Code> ComputeMonomorphicKeyedLoadIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name);
- Handle<Code> ComputeMonomorphicStoreIC(Handle<JSObject> receiver,
+ Handle<Code> ComputeMonomorphicStoreIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name,
StrictModeFlag strict_mode);
- Handle<Code> ComputeMonomorphicKeyedStoreIC(Handle<JSObject> receiver,
+ Handle<Code> ComputeMonomorphicKeyedStoreIC(Handle<HeapObject> receiver,
Handle<Code> handler,
Handle<Name> name,
StrictModeFlag strict_mode);
@@ -136,6 +138,11 @@ class StubCache {
Handle<JSObject> holder,
Handle<ExecutableAccessorInfo> callback);
+ Handle<Code> ComputeLoadCallback(Handle<Name> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimization);
+
Handle<Code> ComputeLoadViaGetter(Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -173,6 +180,12 @@ class StubCache {
Handle<JSObject> holder,
Handle<ExecutableAccessorInfo> callback);
+ Handle<Code> ComputeKeyedLoadCallback(
+ Handle<Name> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimization);
+
Handle<Code> ComputeKeyedLoadConstant(Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -209,6 +222,12 @@ class StubCache {
Handle<ExecutableAccessorInfo> callback,
StrictModeFlag strict_mode);
+ Handle<Code> ComputeStoreCallback(Handle<Name> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ const CallOptimization& call_optimation,
+ StrictModeFlag strict_mode);
+
Handle<Code> ComputeStoreViaSetter(Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -389,6 +408,16 @@ class StubCache {
Heap* heap() { return isolate()->heap(); }
Factory* factory() { return isolate()->factory(); }
+ // These constants describe the structure of the interceptor arguments on the
+ // stack. The arguments are pushed by the (platform-specific)
+ // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
+ // LoadWithInterceptor.
+ static const int kInterceptorArgsNameIndex = 0;
+ static const int kInterceptorArgsInfoIndex = 1;
+ static const int kInterceptorArgsThisIndex = 2;
+ static const int kInterceptorArgsHolderIndex = 3;
+ static const int kInterceptorArgsLength = 4;
+
private:
explicit StubCache(Isolate* isolate);
@@ -516,6 +545,18 @@ class StubCompiler BASE_EMBEDDED {
int index,
Register prototype);
+ // Helper function used to check that the dictionary doesn't contain
+ // the property. This function may return false negatives, so miss_label
+ // must always call a backup property check that is complete.
+ // This function is safe to call if the receiver has fast properties.
+ // Name must be unique and receiver must be a heap object.
+ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register r0,
+ Register r1);
+
// Generates prototype loading code that uses the objects from the
// context we were in when this function was called. If the context
// has changed, a jump to miss is performed. This ties the generated
@@ -552,6 +593,24 @@ class StubCompiler BASE_EMBEDDED {
Register scratch2,
Label* miss_label);
+ // Generate code to check that a global property cell is empty. Create
+ // the property cell at compilation time if no cell exists for the
+ // property.
+ static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss);
+
+ // Calls GenerateCheckPropertyCell for each global object in the prototype
+ // chain from object to (but not including) holder.
+ static void GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss);
+
static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
// Generates code that verifies that the property holder has not changed
@@ -705,6 +764,11 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback);
+ Handle<Code> CompileLoadCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization);
+
Handle<Code> CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
@@ -730,12 +794,12 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
Handle<JSObject> holder,
Handle<Name> name,
Label* success,
- Handle<ExecutableAccessorInfo> callback);
+ Handle<Object> callback);
void NonexistentHandlerFrontend(Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
Label* success,
- Handle<GlobalObject> global);
+ Handle<JSGlobalObject> global);
void GenerateLoadField(Register reg,
Handle<JSObject> holder,
@@ -744,6 +808,7 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler {
void GenerateLoadConstant(Handle<Object> value);
void GenerateLoadCallback(Register reg,
Handle<ExecutableAccessorInfo> callback);
+ void GenerateLoadCallback(const CallOptimization& call_optimization);
void GenerateLoadInterceptor(Register holder_reg,
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -775,7 +840,7 @@ class LoadStubCompiler: public BaseLoadStubCompiler {
Handle<Code> CompileLoadNonexistent(Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
- Handle<GlobalObject> global);
+ Handle<JSGlobalObject> global);
static void GenerateLoadViaGetter(MacroAssembler* masm,
Handle<JSFunction> getter);
@@ -941,6 +1006,11 @@ class StoreStubCompiler: public BaseStoreStubCompiler {
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback);
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization);
+
static void GenerateStoreViaSetter(MacroAssembler* masm,
Handle<JSFunction> setter);
@@ -952,10 +1022,6 @@ class StoreStubCompiler: public BaseStoreStubCompiler {
Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
Handle<Name> name);
- Handle<Code> CompileStoreGlobal(Handle<GlobalObject> object,
- Handle<PropertyCell> holder,
- Handle<Name> name);
-
private:
static Register* registers();
virtual Code::Kind kind() { return Code::STORE_IC; }
@@ -984,18 +1050,6 @@ class KeyedStoreStubCompiler: public BaseStoreStubCompiler {
Handle<Code> CompileStoreElementPolymorphic(MapHandleList* receiver_maps);
- static void GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array,
- ElementsKind element_kind,
- KeyedAccessStoreMode store_mode);
-
- static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessStoreMode store_mode);
-
- static void GenerateStoreExternalArray(MacroAssembler* masm,
- ElementsKind elements_kind);
-
static void GenerateStoreDictionaryElement(MacroAssembler* masm);
protected:
@@ -1040,8 +1094,6 @@ class KeyedStoreStubCompiler: public BaseStoreStubCompiler {
V(ArrayCode)
-class CallOptimization;
-
class CallStubCompiler: public StubCompiler {
public:
CallStubCompiler(Isolate* isolate,
@@ -1173,6 +1225,12 @@ class CallOptimization BASE_EMBEDDED {
int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
Handle<JSObject> holder) const;
+ bool IsCompatibleReceiver(Object* receiver) {
+ ASSERT(is_simple_api_call());
+ if (expected_receiver_type_.is_null()) return true;
+ return receiver->IsInstanceOf(*expected_receiver_type_);
+ }
+
private:
void Initialize(Handle<JSFunction> function);
diff --git a/chromium/v8/src/sweeper-thread.cc b/chromium/v8/src/sweeper-thread.cc
index ede567a4857..58c684a54f6 100644
--- a/chromium/v8/src/sweeper-thread.cc
+++ b/chromium/v8/src/sweeper-thread.cc
@@ -42,9 +42,9 @@ SweeperThread::SweeperThread(Isolate* isolate)
isolate_(isolate),
heap_(isolate->heap()),
collector_(heap_->mark_compact_collector()),
- start_sweeping_semaphore_(OS::CreateSemaphore(0)),
- end_sweeping_semaphore_(OS::CreateSemaphore(0)),
- stop_semaphore_(OS::CreateSemaphore(0)),
+ start_sweeping_semaphore_(0),
+ end_sweeping_semaphore_(0),
+ stop_semaphore_(0),
free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
@@ -61,10 +61,10 @@ void SweeperThread::Run() {
DisallowHandleDereference no_deref;
while (true) {
- start_sweeping_semaphore_->Wait();
+ start_sweeping_semaphore_.Wait();
if (Acquire_Load(&stop_thread_)) {
- stop_semaphore_->Signal();
+ stop_semaphore_.Signal();
return;
}
@@ -74,7 +74,7 @@ void SweeperThread::Run() {
collector_->SweepInParallel(heap_->old_pointer_space(),
&private_free_list_old_pointer_space_,
&free_list_old_pointer_space_);
- end_sweeping_semaphore_->Signal();
+ end_sweeping_semaphore_.Signal();
}
}
@@ -91,18 +91,18 @@ intptr_t SweeperThread::StealMemory(PagedSpace* space) {
void SweeperThread::Stop() {
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
- start_sweeping_semaphore_->Signal();
- stop_semaphore_->Wait();
+ start_sweeping_semaphore_.Signal();
+ stop_semaphore_.Wait();
Join();
}
void SweeperThread::StartSweeping() {
- start_sweeping_semaphore_->Signal();
+ start_sweeping_semaphore_.Signal();
}
void SweeperThread::WaitForSweeperThread() {
- end_sweeping_semaphore_->Wait();
+ end_sweeping_semaphore_.Wait();
}
} } // namespace v8::internal
diff --git a/chromium/v8/src/sweeper-thread.h b/chromium/v8/src/sweeper-thread.h
index a170982141d..c36cfc39a29 100644
--- a/chromium/v8/src/sweeper-thread.h
+++ b/chromium/v8/src/sweeper-thread.h
@@ -43,6 +43,7 @@ namespace internal {
class SweeperThread : public Thread {
public:
explicit SweeperThread(Isolate* isolate);
+ ~SweeperThread() {}
void Run();
void Stop();
@@ -50,19 +51,13 @@ class SweeperThread : public Thread {
void WaitForSweeperThread();
intptr_t StealMemory(PagedSpace* space);
- ~SweeperThread() {
- delete start_sweeping_semaphore_;
- delete end_sweeping_semaphore_;
- delete stop_semaphore_;
- }
-
private:
Isolate* isolate_;
Heap* heap_;
MarkCompactCollector* collector_;
- Semaphore* start_sweeping_semaphore_;
- Semaphore* end_sweeping_semaphore_;
- Semaphore* stop_semaphore_;
+ Semaphore start_sweeping_semaphore_;
+ Semaphore end_sweeping_semaphore_;
+ Semaphore stop_semaphore_;
FreeList free_list_old_data_space_;
FreeList free_list_old_pointer_space_;
FreeList private_free_list_old_data_space_;
diff --git a/chromium/v8/src/transitions.cc b/chromium/v8/src/transitions.cc
index 086edcb9948..9d3f038947d 100644
--- a/chromium/v8/src/transitions.cc
+++ b/chromium/v8/src/transitions.cc
@@ -35,20 +35,20 @@ namespace v8 {
namespace internal {
-static MaybeObject* AllocateRaw(int length) {
- Heap* heap = Isolate::Current()->heap();
-
+static MaybeObject* AllocateRaw(Isolate* isolate, int length) {
// Use FixedArray to not use TransitionArray::cast on incomplete object.
FixedArray* array;
- MaybeObject* maybe_array = heap->AllocateFixedArray(length);
+ MaybeObject* maybe_array = isolate->heap()->AllocateFixedArray(length);
if (!maybe_array->To(&array)) return maybe_array;
return array;
}
-MaybeObject* TransitionArray::Allocate(int number_of_transitions) {
+MaybeObject* TransitionArray::Allocate(Isolate* isolate,
+ int number_of_transitions) {
FixedArray* array;
- MaybeObject* maybe_array = AllocateRaw(ToKeyIndex(number_of_transitions));
+ MaybeObject* maybe_array =
+ AllocateRaw(isolate, ToKeyIndex(number_of_transitions));
if (!maybe_array->To(&array)) return maybe_array;
array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
return array;
@@ -77,11 +77,11 @@ MaybeObject* TransitionArray::NewWith(SimpleTransitionFlag flag,
MaybeObject* maybe_result;
if (flag == SIMPLE_TRANSITION) {
- maybe_result = AllocateRaw(kSimpleTransitionSize);
+ maybe_result = AllocateRaw(target->GetIsolate(), kSimpleTransitionSize);
if (!maybe_result->To(&result)) return maybe_result;
result->set(kSimpleTransitionTarget, target);
} else {
- maybe_result = Allocate(1);
+ maybe_result = Allocate(target->GetIsolate(), 1);
if (!maybe_result->To(&result)) return maybe_result;
result->NoIncrementalWriteBarrierSet(0, key, target);
}
@@ -94,7 +94,7 @@ MaybeObject* TransitionArray::ExtendToFullTransitionArray() {
ASSERT(!IsFullTransitionArray());
int nof = number_of_transitions();
TransitionArray* result;
- MaybeObject* maybe_result = Allocate(nof);
+ MaybeObject* maybe_result = Allocate(GetIsolate(), nof);
if (!maybe_result->To(&result)) return maybe_result;
if (nof == 1) {
@@ -116,7 +116,7 @@ MaybeObject* TransitionArray::CopyInsert(Name* name, Map* target) {
if (insertion_index == kNotFound) ++new_size;
MaybeObject* maybe_array;
- maybe_array = TransitionArray::Allocate(new_size);
+ maybe_array = TransitionArray::Allocate(GetIsolate(), new_size);
if (!maybe_array->To(&result)) return maybe_array;
if (HasPrototypeTransitions()) {
diff --git a/chromium/v8/src/transitions.h b/chromium/v8/src/transitions.h
index fde12798952..b2e98396784 100644
--- a/chromium/v8/src/transitions.h
+++ b/chromium/v8/src/transitions.h
@@ -119,7 +119,8 @@ class TransitionArray: public FixedArray {
inline int Search(Name* name);
// Allocates a TransitionArray.
- MUST_USE_RESULT static MaybeObject* Allocate(int number_of_transitions);
+ MUST_USE_RESULT static MaybeObject* Allocate(
+ Isolate* isolate, int number_of_transitions);
bool IsSimpleTransition() {
return length() == kSimpleTransitionSize &&
diff --git a/chromium/v8/src/type-info.cc b/chromium/v8/src/type-info.cc
index 769df07e4fa..190eb3e6fff 100644
--- a/chromium/v8/src/type-info.cc
+++ b/chromium/v8/src/type-info.cc
@@ -152,12 +152,8 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- bool standard_store = FLAG_compiled_keyed_stores ||
- (Code::GetKeyedAccessStoreMode(code->extra_ic_state()) ==
- STANDARD_STORE);
bool preliminary_checks =
code->is_keyed_store_stub() &&
- standard_store &&
code->ic_state() == MONOMORPHIC &&
Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL;
if (!preliminary_checks) return false;
@@ -174,10 +170,7 @@ bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- bool standard_store = FLAG_compiled_keyed_stores ||
- (Code::GetKeyedAccessStoreMode(code->extra_ic_state()) ==
- STANDARD_STORE);
- return code->is_keyed_store_stub() && standard_store &&
+ return code->is_keyed_store_stub() &&
code->ic_state() == POLYMORPHIC;
}
return false;
@@ -384,17 +377,6 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
}
-Handle<Type> TypeFeedbackOracle::UnaryType(TypeFeedbackId id) {
- Handle<Object> object = GetInfo(id);
- if (!object->IsCode()) {
- return handle(Type::None(), isolate());
- }
- Handle<Code> code = Handle<Code>::cast(object);
- ASSERT(code->is_unary_op_stub());
- return UnaryOpStub(code->extended_extra_ic_state()).GetType(isolate());
-}
-
-
void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
Handle<Type>* left,
Handle<Type>* right,
@@ -658,7 +640,6 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
}
break;
- case Code::UNARY_OP_IC:
case Code::BINARY_OP_IC:
case Code::COMPARE_IC:
case Code::TO_BOOLEAN_IC:
diff --git a/chromium/v8/src/type-info.h b/chromium/v8/src/type-info.h
index 1a7c67dfb88..4b376c84bdc 100644
--- a/chromium/v8/src/type-info.h
+++ b/chromium/v8/src/type-info.h
@@ -297,7 +297,6 @@ class TypeFeedbackOracle: public ZoneObject {
byte ToBooleanTypes(TypeFeedbackId id);
// Get type information for arithmetic operations and compares.
- Handle<Type> UnaryType(TypeFeedbackId id);
void BinaryType(TypeFeedbackId id,
Handle<Type>* left,
Handle<Type>* right,
diff --git a/chromium/v8/src/typedarray.js b/chromium/v8/src/typedarray.js
index 7bd16f670b4..ec9849df699 100644
--- a/chromium/v8/src/typedarray.js
+++ b/chromium/v8/src/typedarray.js
@@ -69,6 +69,9 @@ function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
function ConstructByLength(obj, length) {
var l = ToPositiveInteger(length, "invalid_typed_array_length");
+ if (l > %MaxSmi()) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
var byteLength = l * elementSize;
var buffer = new global.ArrayBuffer(byteLength);
%TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
diff --git a/chromium/v8/src/types.h b/chromium/v8/src/types.h
index b2eb60c6920..2810ffc8a17 100644
--- a/chromium/v8/src/types.h
+++ b/chromium/v8/src/types.h
@@ -92,7 +92,7 @@ namespace internal {
// Note that the bitset representation is closed under both Union and Intersect.
//
// The type representation is heap-allocated, so cannot (currently) be used in
-// a parallel compilation context.
+// a concurrent compilation context.
#define PRIMITIVE_TYPE_LIST(V) \
@@ -303,6 +303,11 @@ struct Bounds {
explicit Bounds(Handle<Type> t) : lower(t), upper(t) {}
Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) {}
+ // Unrestricted bounds.
+ static Bounds Unbounded(Isolate* isl) {
+ return Bounds(Type::None(), Type::Any(), isl);
+ }
+
// Meet: both b1 and b2 are known to hold.
static Bounds Both(Bounds b1, Bounds b2, Isolate* isl) {
return Bounds(
diff --git a/chromium/v8/src/typing.cc b/chromium/v8/src/typing.cc
index 4220d2110db..34bb64bd7de 100644
--- a/chromium/v8/src/typing.cc
+++ b/chromium/v8/src/typing.cc
@@ -40,8 +40,9 @@ AstTyper::AstTyper(CompilationInfo* info)
Handle<Code>(info->closure()->shared()->code()),
Handle<Context>(info->closure()->context()->native_context()),
info->isolate(),
- info->zone()) {
- InitializeAstVisitor();
+ info->zone()),
+ store_(info->zone()) {
+ InitializeAstVisitor(info->isolate());
}
@@ -79,12 +80,16 @@ void AstTyper::VisitStatements(ZoneList<Statement*>* stmts) {
for (int i = 0; i < stmts->length(); ++i) {
Statement* stmt = stmts->at(i);
RECURSE(Visit(stmt));
+ if (stmt->IsJump()) break;
}
}
void AstTyper::VisitBlock(Block* stmt) {
RECURSE(VisitStatements(stmt->statements()));
+ if (stmt->labels() != NULL) {
+ store_.Forget(); // Control may transfer here via 'break l'.
+ }
}
@@ -98,30 +103,41 @@ void AstTyper::VisitEmptyStatement(EmptyStatement* stmt) {
void AstTyper::VisitIfStatement(IfStatement* stmt) {
- RECURSE(Visit(stmt->condition()));
- RECURSE(Visit(stmt->then_statement()));
- RECURSE(Visit(stmt->else_statement()));
-
+ // Collect type feedback.
if (!stmt->condition()->ToBooleanIsTrue() &&
!stmt->condition()->ToBooleanIsFalse()) {
stmt->condition()->RecordToBooleanTypeFeedback(oracle());
}
+
+ RECURSE(Visit(stmt->condition()));
+ Effects then_effects = EnterEffects();
+ RECURSE(Visit(stmt->then_statement()));
+ ExitEffects();
+ Effects else_effects = EnterEffects();
+ RECURSE(Visit(stmt->else_statement()));
+ ExitEffects();
+ then_effects.Alt(else_effects);
+ store_.Seq(then_effects);
}
void AstTyper::VisitContinueStatement(ContinueStatement* stmt) {
+ // TODO(rossberg): is it worth having a non-termination effect?
}
void AstTyper::VisitBreakStatement(BreakStatement* stmt) {
+ // TODO(rossberg): is it worth having a non-termination effect?
}
void AstTyper::VisitReturnStatement(ReturnStatement* stmt) {
- RECURSE(Visit(stmt->expression()));
-
+ // Collect type feedback.
// TODO(rossberg): we only need this for inlining into test contexts...
stmt->expression()->RecordToBooleanTypeFeedback(oracle());
+
+ RECURSE(Visit(stmt->expression()));
+ // TODO(rossberg): is it worth having a non-termination effect?
}
@@ -133,14 +149,18 @@ void AstTyper::VisitWithStatement(WithStatement* stmt) {
void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
RECURSE(Visit(stmt->tag()));
+
ZoneList<CaseClause*>* clauses = stmt->cases();
SwitchStatement::SwitchType switch_type = stmt->switch_type();
+ Effects local_effects(zone());
+ bool complex_effects = false; // True for label effects or fall-through.
+
for (int i = 0; i < clauses->length(); ++i) {
CaseClause* clause = clauses->at(i);
+ Effects clause_effects = EnterEffects();
+
if (!clause->is_default()) {
Expression* label = clause->label();
- RECURSE(Visit(label));
-
SwitchStatement::SwitchType label_switch_type =
label->IsSmiLiteral() ? SwitchStatement::SMI_SWITCH :
label->IsStringLiteral() ? SwitchStatement::STRING_SWITCH :
@@ -149,13 +169,32 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
switch_type = label_switch_type;
else if (switch_type != label_switch_type)
switch_type = SwitchStatement::GENERIC_SWITCH;
+
+ RECURSE(Visit(label));
+ if (!clause_effects.IsEmpty()) complex_effects = true;
+ }
+
+ ZoneList<Statement*>* stmts = clause->statements();
+ RECURSE(VisitStatements(stmts));
+ ExitEffects();
+ if (stmts->is_empty() || stmts->last()->IsJump()) {
+ local_effects.Alt(clause_effects);
+ } else {
+ complex_effects = true;
}
- RECURSE(VisitStatements(clause->statements()));
}
+
+ if (complex_effects) {
+ store_.Forget(); // Reached this in unknown state.
+ } else {
+ store_.Seq(local_effects);
+ }
+
if (switch_type == SwitchStatement::UNKNOWN_SWITCH)
switch_type = SwitchStatement::GENERIC_SWITCH;
stmt->set_switch_type(switch_type);
+ // Collect type feedback.
// TODO(rossberg): can we eliminate this special case and extra loop?
if (switch_type == SwitchStatement::SMI_SWITCH) {
for (int i = 0; i < clauses->length(); ++i) {
@@ -168,22 +207,31 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
- RECURSE(Visit(stmt->body()));
- RECURSE(Visit(stmt->cond()));
-
+ // Collect type feedback.
if (!stmt->cond()->ToBooleanIsTrue()) {
stmt->cond()->RecordToBooleanTypeFeedback(oracle());
}
+
+ // TODO(rossberg): refine the unconditional Forget (here and elsewhere) by
+ // computing the set of variables assigned in only some of the origins of the
+ // control transfer (such as the loop body here).
+ store_.Forget(); // Control may transfer here via looping or 'continue'.
+ RECURSE(Visit(stmt->body()));
+ RECURSE(Visit(stmt->cond()));
+ store_.Forget(); // Control may transfer here via 'break'.
}
void AstTyper::VisitWhileStatement(WhileStatement* stmt) {
- RECURSE(Visit(stmt->cond()));
- RECURSE(Visit(stmt->body()));
-
+ // Collect type feedback.
if (!stmt->cond()->ToBooleanIsTrue()) {
stmt->cond()->RecordToBooleanTypeFeedback(oracle());
}
+
+ store_.Forget(); // Control may transfer here via looping or 'continue'.
+ RECURSE(Visit(stmt->cond()));
+ RECURSE(Visit(stmt->body()));
+ store_.Forget(); // Control may transfer here via termination or 'break'.
}
@@ -191,45 +239,65 @@ void AstTyper::VisitForStatement(ForStatement* stmt) {
if (stmt->init() != NULL) {
RECURSE(Visit(stmt->init()));
}
+ store_.Forget(); // Control may transfer here via looping.
if (stmt->cond() != NULL) {
- RECURSE(Visit(stmt->cond()));
-
+ // Collect type feedback.
stmt->cond()->RecordToBooleanTypeFeedback(oracle());
+
+ RECURSE(Visit(stmt->cond()));
}
RECURSE(Visit(stmt->body()));
+ store_.Forget(); // Control may transfer here via 'continue'.
if (stmt->next() != NULL) {
RECURSE(Visit(stmt->next()));
}
+ store_.Forget(); // Control may transfer here via termination or 'break'.
}
void AstTyper::VisitForInStatement(ForInStatement* stmt) {
+ // Collect type feedback.
+ stmt->RecordTypeFeedback(oracle());
+
RECURSE(Visit(stmt->enumerable()));
+ store_.Forget(); // Control may transfer here via looping or 'continue'.
RECURSE(Visit(stmt->body()));
-
- stmt->RecordTypeFeedback(oracle());
+ store_.Forget(); // Control may transfer here via 'break'.
}
void AstTyper::VisitForOfStatement(ForOfStatement* stmt) {
RECURSE(Visit(stmt->iterable()));
+ store_.Forget(); // Control may transfer here via looping or 'continue'.
RECURSE(Visit(stmt->body()));
+ store_.Forget(); // Control may transfer here via 'break'.
}
void AstTyper::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ Effects try_effects = EnterEffects();
RECURSE(Visit(stmt->try_block()));
+ ExitEffects();
+ Effects catch_effects = EnterEffects();
+ store_.Forget(); // Control may transfer here via 'throw'.
RECURSE(Visit(stmt->catch_block()));
+ ExitEffects();
+ try_effects.Alt(catch_effects);
+ store_.Seq(try_effects);
+ // At this point, only variables that were reassigned in the catch block are
+ // still remembered.
}
void AstTyper::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
RECURSE(Visit(stmt->try_block()));
+ store_.Forget(); // Control may transfer here via 'throw'.
RECURSE(Visit(stmt->finally_block()));
}
void AstTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ store_.Forget(); // May do whatever.
}
@@ -242,11 +310,18 @@ void AstTyper::VisitSharedFunctionInfoLiteral(SharedFunctionInfoLiteral* expr) {
void AstTyper::VisitConditional(Conditional* expr) {
+ // Collect type feedback.
+ expr->condition()->RecordToBooleanTypeFeedback(oracle());
+
RECURSE(Visit(expr->condition()));
+ Effects then_effects = EnterEffects();
RECURSE(Visit(expr->then_expression()));
+ ExitEffects();
+ Effects else_effects = EnterEffects();
RECURSE(Visit(expr->else_expression()));
-
- expr->condition()->RecordToBooleanTypeFeedback(oracle());
+ ExitEffects();
+ then_effects.Alt(else_effects);
+ store_.Seq(then_effects);
NarrowType(expr, Bounds::Either(
expr->then_expression()->bounds(),
@@ -255,7 +330,10 @@ void AstTyper::VisitConditional(Conditional* expr) {
void AstTyper::VisitVariableProxy(VariableProxy* expr) {
- // TODO(rossberg): typing of variables
+ Variable* var = expr->var();
+ if (var->IsStackAllocated()) {
+ NarrowType(expr, store_.LookupBounds(variable_index(var)));
+ }
}
@@ -274,8 +352,8 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
for (int i = 0; i < properties->length(); ++i) {
ObjectLiteral::Property* prop = properties->at(i);
- RECURSE(Visit(prop->value()));
+ // Collect type feedback.
if ((prop->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL &&
!CompileTimeValue::IsCompileTimeValue(prop->value())) ||
prop->kind() == ObjectLiteral::Property::COMPUTED) {
@@ -283,6 +361,8 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
prop->RecordTypeFeedback(oracle());
}
}
+
+ RECURSE(Visit(prop->value()));
}
NarrowType(expr, Bounds(Type::Object(), isolate_));
@@ -303,29 +383,33 @@ void AstTyper::VisitArrayLiteral(ArrayLiteral* expr) {
void AstTyper::VisitAssignment(Assignment* expr) {
// TODO(rossberg): Can we clean this up?
if (expr->is_compound()) {
- RECURSE(Visit(expr->binary_operation()));
-
+ // Collect type feedback.
Expression* target = expr->target();
Property* prop = target->AsProperty();
if (prop != NULL) {
prop->RecordTypeFeedback(oracle(), zone());
- if (!prop->key()->IsPropertyName()) { // i.e., keyed
- expr->RecordTypeFeedback(oracle(), zone());
- }
+ expr->RecordTypeFeedback(oracle(), zone());
}
+ RECURSE(Visit(expr->binary_operation()));
+
NarrowType(expr, expr->binary_operation()->bounds());
} else {
- RECURSE(Visit(expr->target()));
- RECURSE(Visit(expr->value()));
-
- if (expr->target()->AsProperty()) {
+ // Collect type feedback.
+ if (expr->target()->IsProperty()) {
expr->RecordTypeFeedback(oracle(), zone());
}
+ RECURSE(Visit(expr->target()));
+ RECURSE(Visit(expr->value()));
+
NarrowType(expr, expr->value()->bounds());
}
- // TODO(rossberg): handle target variables
+
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->IsStackAllocated()) {
+ store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
+ }
}
@@ -333,35 +417,31 @@ void AstTyper::VisitYield(Yield* expr) {
RECURSE(Visit(expr->generator_object()));
RECURSE(Visit(expr->expression()));
- // We don't know anything about the type.
+ // We don't know anything about the result type.
}
void AstTyper::VisitThrow(Throw* expr) {
RECURSE(Visit(expr->exception()));
+ // TODO(rossberg): is it worth having a non-termination effect?
NarrowType(expr, Bounds(Type::None(), isolate_));
}
void AstTyper::VisitProperty(Property* expr) {
+ // Collect type feedback.
+ expr->RecordTypeFeedback(oracle(), zone());
+
RECURSE(Visit(expr->obj()));
RECURSE(Visit(expr->key()));
- expr->RecordTypeFeedback(oracle(), zone());
-
- // We don't know anything about the type.
+ // We don't know anything about the result type.
}
void AstTyper::VisitCall(Call* expr) {
- RECURSE(Visit(expr->expression()));
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE(Visit(arg));
- }
-
+ // Collect type feedback.
Expression* callee = expr->expression();
Property* prop = callee->AsProperty();
if (prop != NULL) {
@@ -371,11 +451,26 @@ void AstTyper::VisitCall(Call* expr) {
expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
}
- // We don't know anything about the type.
+ RECURSE(Visit(expr->expression()));
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(Visit(arg));
+ }
+
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ store_.Forget(); // Eval could do whatever to local variables.
+ }
+
+ // We don't know anything about the result type.
}
void AstTyper::VisitCallNew(CallNew* expr) {
+ // Collect type feedback.
+ expr->RecordTypeFeedback(oracle());
+
RECURSE(Visit(expr->expression()));
ZoneList<Expression*>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
@@ -383,9 +478,7 @@ void AstTyper::VisitCallNew(CallNew* expr) {
RECURSE(Visit(arg));
}
- expr->RecordTypeFeedback(oracle());
-
- // We don't know anything about the type.
+ // We don't know anything about the result type.
}
@@ -396,21 +489,19 @@ void AstTyper::VisitCallRuntime(CallRuntime* expr) {
RECURSE(Visit(arg));
}
- // We don't know anything about the type.
+ // We don't know anything about the result type.
}
void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
- RECURSE(Visit(expr->expression()));
-
// Collect type feedback.
- Handle<Type> op_type = oracle()->UnaryType(expr->UnaryOperationFeedbackId());
- NarrowLowerType(expr->expression(), op_type);
if (expr->op() == Token::NOT) {
// TODO(rossberg): only do in test or value context.
expr->expression()->RecordToBooleanTypeFeedback(oracle());
}
+ RECURSE(Visit(expr->expression()));
+
switch (expr->op()) {
case Token::NOT:
case Token::DELETE:
@@ -419,16 +510,6 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
case Token::VOID:
NarrowType(expr, Bounds(Type::Undefined(), isolate_));
break;
- case Token::ADD:
- case Token::SUB: {
- Type* upper = *expr->expression()->bounds().upper;
- if (!upper->Is(Type::Number())) upper = Type::Number();
- NarrowType(expr, Bounds(Type::Smi(), upper, isolate_));
- break;
- }
- case Token::BIT_NOT:
- NarrowType(expr, Bounds(Type::Smi(), Type::Signed32(), isolate_));
- break;
case Token::TYPEOF:
NarrowType(expr, Bounds(Type::InternalizedString(), isolate_));
break;
@@ -439,22 +520,25 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
void AstTyper::VisitCountOperation(CountOperation* expr) {
- RECURSE(Visit(expr->expression()));
-
+ // Collect type feedback.
expr->RecordTypeFeedback(oracle(), zone());
Property* prop = expr->expression()->AsProperty();
if (prop != NULL) {
prop->RecordTypeFeedback(oracle(), zone());
}
+ RECURSE(Visit(expr->expression()));
+
NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
+
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL && proxy->var()->IsStackAllocated()) {
+ store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
+ }
}
void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
-
// Collect type feedback.
Handle<Type> type, left_type, right_type;
Maybe<int> fixed_right_arg;
@@ -470,15 +554,29 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
switch (expr->op()) {
case Token::COMMA:
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
NarrowType(expr, expr->right()->bounds());
break;
case Token::OR:
- case Token::AND:
+ case Token::AND: {
+ Effects left_effects = EnterEffects();
+ RECURSE(Visit(expr->left()));
+ ExitEffects();
+ Effects right_effects = EnterEffects();
+ RECURSE(Visit(expr->right()));
+ ExitEffects();
+ left_effects.Alt(right_effects);
+ store_.Seq(left_effects);
+
NarrowType(expr, Bounds::Either(
expr->left()->bounds(), expr->right()->bounds(), isolate_));
break;
+ }
case Token::BIT_OR:
case Token::BIT_AND: {
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
Type* upper = Type::Union(
expr->left()->bounds().upper, expr->right()->bounds().upper);
if (!upper->Is(Type::Signed32())) upper = Type::Signed32();
@@ -488,12 +586,18 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::BIT_XOR:
case Token::SHL:
case Token::SAR:
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
NarrowType(expr, Bounds(Type::Smi(), Type::Signed32(), isolate_));
break;
case Token::SHR:
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
NarrowType(expr, Bounds(Type::Smi(), Type::Unsigned32(), isolate_));
break;
case Token::ADD: {
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
Bounds l = expr->left()->bounds();
Bounds r = expr->right()->bounds();
Type* lower =
@@ -513,6 +617,8 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::MUL:
case Token::DIV:
case Token::MOD:
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
break;
default:
@@ -522,9 +628,6 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
void AstTyper::VisitCompareOperation(CompareOperation* expr) {
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
-
// Collect type feedback.
Handle<Type> left_type, right_type, combined_type;
oracle()->CompareType(expr->CompareOperationFeedbackId(),
@@ -533,6 +636,9 @@ void AstTyper::VisitCompareOperation(CompareOperation* expr) {
NarrowLowerType(expr->right(), right_type);
expr->set_combined_type(combined_type);
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+
NarrowType(expr, Bounds(Type::Boolean(), isolate_));
}
diff --git a/chromium/v8/src/typing.h b/chromium/v8/src/typing.h
index ceef9843650..c942b006327 100644
--- a/chromium/v8/src/typing.h
+++ b/chromium/v8/src/typing.h
@@ -35,6 +35,7 @@
#include "compiler.h"
#include "type-info.h"
#include "types.h"
+#include "effects.h"
#include "zone.h"
#include "scopes.h"
@@ -57,8 +58,13 @@ class AstTyper: public AstVisitor {
private:
explicit AstTyper(CompilationInfo* info);
+ static const int kNoVar = INT_MIN;
+ typedef v8::internal::Effects<int, kNoVar> Effects;
+ typedef v8::internal::NestedEffects<int, kNoVar> Store;
+
CompilationInfo* info_;
TypeFeedbackOracle oracle_;
+ Store store_;
TypeFeedbackOracle* oracle() { return &oracle_; }
Zone* zone() const { return info_->zone(); }
@@ -70,6 +76,17 @@ class AstTyper: public AstVisitor {
e->set_bounds(Bounds::NarrowLower(e->bounds(), t, isolate_));
}
+ Effects EnterEffects() {
+ store_ = store_.Push();
+ return store_.Top();
+ }
+ void ExitEffects() { store_ = store_.Pop(); }
+
+ int variable_index(Variable* var) {
+ return var->IsStackLocal() ? var->index() :
+ var->IsParameter() ? -var->index() : kNoVar;
+ }
+
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void VisitStatements(ZoneList<Statement*>* statements);
diff --git a/chromium/v8/src/unicode.h b/chromium/v8/src/unicode.h
index 42a81824bac..a7061745a2a 100644
--- a/chromium/v8/src/unicode.h
+++ b/chromium/v8/src/unicode.h
@@ -235,6 +235,7 @@ struct ConnectorPunctuation {
};
struct ToLowercase {
static const int kMaxWidth = 3;
+ static const bool kIsToLower = true;
static int Convert(uchar c,
uchar n,
uchar* result,
@@ -242,6 +243,7 @@ struct ToLowercase {
};
struct ToUppercase {
static const int kMaxWidth = 3;
+ static const bool kIsToLower = false;
static int Convert(uchar c,
uchar n,
uchar* result,
diff --git a/chromium/v8/src/unique.h b/chromium/v8/src/unique.h
new file mode 100644
index 00000000000..7ae704a26ad
--- /dev/null
+++ b/chromium/v8/src/unique.h
@@ -0,0 +1,266 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_UNIQUE_H_
+#define V8_HYDROGEN_UNIQUE_H_
+
+#include "handles.h"
+#include "utils.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+
+template <typename T>
+class UniqueSet;
+
+
+// Represents a handle to an object on the heap, but with the additional
+// ability of checking for equality and hashing without accessing the heap.
+//
+// Creating a Unique<T> requires first dereferencing the handle to obtain
+// the address of the object, which is used as the hashcode and the basis for
+// comparison. The object can be moved later by the GC, but comparison
+// and hashing use the old address of the object, without dereferencing it.
+//
+// Careful! Comparison of two Uniques is only correct if both were created
+// in the same "era" of GC or if at least one is a non-movable object.
+template <typename T>
+class Unique V8_FINAL {
+ public:
+ // TODO(titzer): make private and introduce some builder/owner class.
+ explicit Unique(Handle<T> handle) {
+ if (handle.is_null()) {
+ raw_address_ = NULL;
+ } else {
+ raw_address_ = reinterpret_cast<Address>(*handle);
+ ASSERT_NE(raw_address_, NULL);
+ }
+ handle_ = handle;
+ }
+
+ // Constructor for handling automatic up casting.
+ // Ex. Unique<JSFunction> can be passed when Unique<Object> is expected.
+ template <class S> Unique(Unique<S> uniq) {
+#ifdef DEBUG
+ T* a = NULL;
+ S* b = NULL;
+ a = b; // Fake assignment to enforce type checks.
+ USE(a);
+#endif
+ raw_address_ = uniq.raw_address_;
+ handle_ = uniq.handle_; // Creates a new handle sharing the same location.
+ }
+
+ template <typename U>
+ bool operator==(const Unique<U>& other) const {
+ return raw_address_ == other.raw_address_;
+ }
+
+ template <typename U>
+ bool operator!=(const Unique<U>& other) const {
+ return raw_address_ != other.raw_address_;
+ }
+
+ intptr_t Hashcode() const {
+ return reinterpret_cast<intptr_t>(raw_address_);
+ }
+
+ bool IsNull() {
+ return raw_address_ == NULL;
+ }
+
+ // Don't do this unless you have access to the heap!
+ // No, seriously! You can compare and hash and set-ify uniques that were
+ // all created at the same time; please don't dereference.
+ Handle<T> handle() {
+ return handle_;
+ }
+
+ friend class UniqueSet<T>; // Uses internal details for speed.
+ template <class U>
+ friend class Unique; // For comparing raw_address values.
+
+ private:
+ Address raw_address_;
+ Handle<T> handle_;
+};
+
+
+template <typename T>
+class UniqueSet V8_FINAL : public ZoneObject {
+ public:
+ // Constructor. A new set will be empty.
+ UniqueSet() : size_(0), capacity_(0), array_(NULL) { }
+
+ // Add a new element to this unique set. Mutates this set. O(|this|).
+ void Add(Unique<T> uniq, Zone* zone) {
+ // Keep the set sorted by the {raw_address} of the unique elements.
+ for (int i = 0; i < size_; i++) {
+ if (array_[i] == uniq) return;
+ if (array_[i].raw_address_ > uniq.raw_address_) {
+ // Insert in the middle.
+ Grow(size_ + 1, zone);
+ for (int j = size_ - 1; j >= i; j--) array_[j + 1] = array_[j];
+ array_[i] = uniq;
+ size_++;
+ return;
+ }
+ }
+ // Append the element to the the end.
+ Grow(size_ + 1, zone);
+ array_[size_++] = uniq;
+ }
+
+ // Compare this set against another set. O(|this|).
+ bool Equals(UniqueSet<T>* that) {
+ if (that->size_ != this->size_) return false;
+ for (int i = 0; i < this->size_; i++) {
+ if (this->array_[i] != that->array_[i]) return false;
+ }
+ return true;
+ }
+
+ // Check if this set is a subset of the given set. O(|this| + |that|).
+ bool IsSubset(UniqueSet<T>* that) {
+ if (that->size_ < this->size_) return false;
+ int j = 0;
+ for (int i = 0; i < this->size_; i++) {
+ Unique<T> sought = this->array_[i];
+ while (true) {
+ if (sought == that->array_[j++]) break;
+ // Fail whenever there are more elements in {this} than {that}.
+ if ((this->size_ - i) > (that->size_ - j)) return false;
+ }
+ }
+ return true;
+ }
+
+ // Returns a new set representing the intersection of this set and the other.
+ // O(|this| + |that|).
+ UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) {
+ if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>();
+
+ UniqueSet<T>* out = new(zone) UniqueSet<T>();
+ out->Grow(Min(this->size_, that->size_), zone);
+
+ int i = 0, j = 0, k = 0;
+ while (i < this->size_ && j < that->size_) {
+ Unique<T> a = this->array_[i];
+ Unique<T> b = that->array_[j];
+ if (a == b) {
+ out->array_[k++] = a;
+ i++;
+ j++;
+ } else if (a.raw_address_ < b.raw_address_) {
+ i++;
+ } else {
+ j++;
+ }
+ }
+
+ out->size_ = k;
+ return out;
+ }
+
+ // Returns a new set representing the union of this set and the other.
+ // O(|this| + |that|).
+ UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) {
+ if (that->size_ == 0) return this->Copy(zone);
+ if (this->size_ == 0) return that->Copy(zone);
+
+ UniqueSet<T>* out = new(zone) UniqueSet<T>();
+ out->Grow(this->size_ + that->size_, zone);
+
+ int i = 0, j = 0, k = 0;
+ while (i < this->size_ && j < that->size_) {
+ Unique<T> a = this->array_[i];
+ Unique<T> b = that->array_[j];
+ if (a == b) {
+ out->array_[k++] = a;
+ i++;
+ j++;
+ } else if (a.raw_address_ < b.raw_address_) {
+ out->array_[k++] = a;
+ i++;
+ } else {
+ out->array_[k++] = b;
+ j++;
+ }
+ }
+
+ while (i < this->size_) out->array_[k++] = this->array_[i++];
+ while (j < that->size_) out->array_[k++] = that->array_[j++];
+
+ out->size_ = k;
+ return out;
+ }
+
+ // Makes an exact copy of this set. O(|this| + |that|).
+ UniqueSet<T>* Copy(Zone* zone) {
+ UniqueSet<T>* copy = new(zone) UniqueSet<T>();
+ copy->size_ = this->size_;
+ copy->capacity_ = this->size_;
+ copy->array_ = zone->NewArray<Unique<T> >(this->size_);
+ memcpy(copy->array_, this->array_, this->size_ * sizeof(Unique<T>));
+ return copy;
+ }
+
+ inline int size() {
+ return size_;
+ }
+
+ private:
+ // These sets should be small, since operations are implemented with simple
+ // linear algorithms. Enforce a maximum size.
+ static const int kMaxCapacity = 65535;
+
+ uint16_t size_;
+ uint16_t capacity_;
+ Unique<T>* array_;
+
+ // Grow the size of internal storage to be at least {size} elements.
+ void Grow(int size, Zone* zone) {
+ CHECK(size < kMaxCapacity); // Enforce maximum size.
+ if (capacity_ < size) {
+ int new_capacity = 2 * capacity_ + size;
+ if (new_capacity > kMaxCapacity) new_capacity = kMaxCapacity;
+ Unique<T>* new_array = zone->NewArray<Unique<T> >(new_capacity);
+ if (size_ > 0) {
+ memcpy(new_array, array_, size_ * sizeof(Unique<T>));
+ }
+ capacity_ = new_capacity;
+ array_ = new_array;
+ }
+ }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_UNIQUE_H_
diff --git a/chromium/v8/src/utils/random-number-generator.cc b/chromium/v8/src/utils/random-number-generator.cc
new file mode 100644
index 00000000000..1e03ee24499
--- /dev/null
+++ b/chromium/v8/src/utils/random-number-generator.cc
@@ -0,0 +1,136 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "utils/random-number-generator.h"
+
+#include <cstdio>
+
+#include "flags.h"
+#include "platform/mutex.h"
+#include "platform/time.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
+static RandomNumberGenerator::EntropySource entropy_source = NULL;
+
+
+// static
+void RandomNumberGenerator::SetEntropySource(EntropySource source) {
+ LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+ entropy_source = source;
+}
+
+
+RandomNumberGenerator::RandomNumberGenerator() {
+ // Check --random-seed flag first.
+ if (FLAG_random_seed != 0) {
+ SetSeed(FLAG_random_seed);
+ return;
+ }
+
+ // Check if embedder supplied an entropy source.
+ { LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+ if (entropy_source != NULL) {
+ int64_t seed;
+ if (entropy_source(reinterpret_cast<unsigned char*>(&seed),
+ sizeof(seed))) {
+ SetSeed(seed);
+ return;
+ }
+ }
+ }
+
+ // Gather entropy from /dev/urandom if available.
+ FILE* fp = fopen("/dev/urandom", "rb");
+ if (fp != NULL) {
+ int64_t seed;
+ size_t n = fread(&seed, sizeof(seed), 1, fp);
+ fclose(fp);
+ if (n == 1) {
+ SetSeed(seed);
+ return;
+ }
+ }
+
+ // We cannot assume that random() or rand() were seeded
+ // properly, so instead of relying on random() or rand(),
+ // we just seed our PRNG using timing data as fallback.
+ int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24;
+ seed ^= TimeTicks::HighResNow().ToInternalValue() << 16;
+ seed ^= TimeTicks::Now().ToInternalValue() << 8;
+ SetSeed(seed);
+}
+
+
+int RandomNumberGenerator::NextInt(int max) {
+ ASSERT_LE(0, max);
+
+ // Fast path if max is a power of 2.
+ if (IsPowerOf2(max)) {
+ return static_cast<int>((max * static_cast<int64_t>(Next(31))) >> 31);
+ }
+
+ while (true) {
+ int rnd = Next(31);
+ int val = rnd % max;
+ if (rnd - val + (max - 1) >= 0) {
+ return val;
+ }
+ }
+}
+
+
+double RandomNumberGenerator::NextDouble() {
+ return ((static_cast<int64_t>(Next(26)) << 27) + Next(27)) /
+ static_cast<double>(static_cast<int64_t>(1) << 53);
+}
+
+
+void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
+ for (size_t n = 0; n < buflen; ++n) {
+ static_cast<uint8_t*>(buffer)[n] = static_cast<uint8_t>(Next(8));
+ }
+}
+
+
+int RandomNumberGenerator::Next(int bits) {
+ ASSERT_LT(0, bits);
+ ASSERT_GE(32, bits);
+ int64_t seed = (seed_ * kMultiplier + kAddend) & kMask;
+ seed_ = seed;
+ return static_cast<int>(seed >> (48 - bits));
+}
+
+
+void RandomNumberGenerator::SetSeed(int64_t seed) {
+ seed_ = (seed ^ kMultiplier) & kMask;
+}
+
+} } // namespace v8::internal
diff --git a/chromium/v8/src/utils/random-number-generator.h b/chromium/v8/src/utils/random-number-generator.h
new file mode 100644
index 00000000000..bd7dca7e651
--- /dev/null
+++ b/chromium/v8/src/utils/random-number-generator.h
@@ -0,0 +1,106 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
+#define V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
+
+#include "globals.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// RandomNumberGenerator
+//
+// This class is used to generate a stream of pseudorandom numbers. The class
+// uses a 48-bit seed, which is modified using a linear congruential formula.
+// (See Donald Knuth, The Art of Computer Programming, Volume 3, Section 3.2.1.)
+// If two instances of RandomNumberGenerator are created with the same seed, and
+// the same sequence of method calls is made for each, they will generate and
+// return identical sequences of numbers.
+// This class is neither reentrant nor threadsafe.
+
+class RandomNumberGenerator V8_FINAL {
+ public:
+ // EntropySource is used as a callback function when V8 needs a source of
+ // entropy.
+ typedef bool (*EntropySource)(unsigned char* buffer, size_t buflen);
+ static void SetEntropySource(EntropySource entropy_source);
+
+ RandomNumberGenerator();
+ explicit RandomNumberGenerator(int64_t seed) { SetSeed(seed); }
+
+ // Returns the next pseudorandom, uniformly distributed int value from this
+ // random number generator's sequence. The general contract of |NextInt()| is
+ // that one int value is pseudorandomly generated and returned.
+ // All 2^32 possible integer values are produced with (approximately) equal
+ // probability.
+ V8_INLINE int NextInt() V8_WARN_UNUSED_RESULT {
+ return Next(32);
+ }
+
+ // Returns a pseudorandom, uniformly distributed int value between 0
+ // (inclusive) and the specified max value (exclusive), drawn from this random
+ // number generator's sequence. The general contract of |NextInt(int)| is that
+ // one int value in the specified range is pseudorandomly generated and
+ // returned. All max possible int values are produced with (approximately)
+ // equal probability.
+ int NextInt(int max) V8_WARN_UNUSED_RESULT;
+
+ // Returns the next pseudorandom, uniformly distributed boolean value from
+ // this random number generator's sequence. The general contract of
+ // |NextBoolean()| is that one boolean value is pseudorandomly generated and
+ // returned. The values true and false are produced with (approximately) equal
+ // probability.
+ V8_INLINE bool NextBool() V8_WARN_UNUSED_RESULT {
+ return Next(1) != 0;
+ }
+
+ // Returns the next pseudorandom, uniformly distributed double value between
+ // 0.0 and 1.0 from this random number generator's sequence.
+ // The general contract of |NextDouble()| is that one double value, chosen
+ // (approximately) uniformly from the range 0.0 (inclusive) to 1.0
+ // (exclusive), is pseudorandomly generated and returned.
+ double NextDouble() V8_WARN_UNUSED_RESULT;
+
+ // Fills the elements of a specified array of bytes with random numbers.
+ void NextBytes(void* buffer, size_t buflen);
+
+ private:
+ static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
+ static const int64_t kAddend = 0xb;
+ static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff);
+
+ int Next(int bits) V8_WARN_UNUSED_RESULT;
+ void SetSeed(int64_t seed);
+
+ int64_t seed_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
diff --git a/chromium/v8/src/v8-counters.cc b/chromium/v8/src/v8-counters.cc
index 905e178fec6..6711c80203e 100644
--- a/chromium/v8/src/v8-counters.cc
+++ b/chromium/v8/src/v8-counters.cc
@@ -49,31 +49,31 @@ Counters::Counters(Isolate* isolate) {
#undef HM
#define SC(name, caption) \
- name##_ = StatsCounter("c:" #caption);
+ name##_ = StatsCounter(isolate, "c:" #caption);
STATS_COUNTER_LIST_1(SC)
STATS_COUNTER_LIST_2(SC)
#undef SC
#define SC(name) \
- count_of_##name##_ = StatsCounter("c:" "V8.CountOf_" #name); \
- size_of_##name##_ = StatsCounter("c:" "V8.SizeOf_" #name);
+ count_of_##name##_ = StatsCounter(isolate, "c:" "V8.CountOf_" #name); \
+ size_of_##name##_ = StatsCounter(isolate, "c:" "V8.SizeOf_" #name);
INSTANCE_TYPE_LIST(SC)
#undef SC
#define SC(name) \
count_of_CODE_TYPE_##name##_ = \
- StatsCounter("c:" "V8.CountOf_CODE_TYPE-" #name); \
+ StatsCounter(isolate, "c:" "V8.CountOf_CODE_TYPE-" #name); \
size_of_CODE_TYPE_##name##_ = \
- StatsCounter("c:" "V8.SizeOf_CODE_TYPE-" #name);
+ StatsCounter(isolate, "c:" "V8.SizeOf_CODE_TYPE-" #name);
CODE_KIND_LIST(SC)
#undef SC
#define SC(name) \
count_of_FIXED_ARRAY_##name##_ = \
- StatsCounter("c:" "V8.CountOf_FIXED_ARRAY-" #name); \
+ StatsCounter(isolate, "c:" "V8.CountOf_FIXED_ARRAY-" #name); \
size_of_FIXED_ARRAY_##name##_ = \
- StatsCounter("c:" "V8.SizeOf_FIXED_ARRAY-" #name);
+ StatsCounter(isolate, "c:" "V8.SizeOf_FIXED_ARRAY-" #name);
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
}
diff --git a/chromium/v8/src/v8.cc b/chromium/v8/src/v8.cc
index 521d064129a..e894164cd16 100644
--- a/chromium/v8/src/v8.cc
+++ b/chromium/v8/src/v8.cc
@@ -50,18 +50,9 @@ namespace internal {
V8_DECLARE_ONCE(init_once);
-bool V8::is_running_ = false;
-bool V8::has_been_set_up_ = false;
-bool V8::has_been_disposed_ = false;
-bool V8::has_fatal_error_ = false;
-bool V8::use_crankshaft_ = true;
List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
v8::ArrayBuffer::Allocator* V8::array_buffer_allocator_ = NULL;
-static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
-
-static EntropySource entropy_source;
-
bool V8::Initialize(Deserializer* des) {
InitializeOncePerProcess();
@@ -80,31 +71,18 @@ bool V8::Initialize(Deserializer* des) {
ASSERT(i::Isolate::CurrentPerIsolateThreadData()->isolate() ==
i::Isolate::Current());
- if (IsDead()) return false;
-
Isolate* isolate = Isolate::Current();
+ if (isolate->IsDead()) return false;
if (isolate->IsInitialized()) return true;
- is_running_ = true;
- has_been_set_up_ = true;
- has_fatal_error_ = false;
- has_been_disposed_ = false;
-
return isolate->Init(des);
}
-void V8::SetFatalError() {
- is_running_ = false;
- has_fatal_error_ = true;
-}
-
-
void V8::TearDown() {
Isolate* isolate = Isolate::Current();
ASSERT(isolate->IsDefaultIsolate());
-
- if (!has_been_set_up_ || has_been_disposed_) return;
+ if (!isolate->IsInitialized()) return;
// The isolate has to be torn down before clearing the LOperand
// caches so that the optimizing compiler thread (if running)
@@ -118,49 +96,10 @@ void V8::TearDown() {
RegisteredExtension::UnregisterAll();
Isolate::GlobalTearDown();
- is_running_ = false;
- has_been_disposed_ = true;
-
delete call_completed_callbacks_;
call_completed_callbacks_ = NULL;
Sampler::TearDown();
- OS::TearDown();
-}
-
-
-static void seed_random(uint32_t* state) {
- for (int i = 0; i < 2; ++i) {
- if (FLAG_random_seed != 0) {
- state[i] = FLAG_random_seed;
- } else if (entropy_source != NULL) {
- uint32_t val;
- ScopedLock lock(entropy_mutex.Pointer());
- entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t));
- state[i] = val;
- } else {
- state[i] = random();
- }
- }
-}
-
-
-// Random number generator using George Marsaglia's MWC algorithm.
-static uint32_t random_base(uint32_t* state) {
- // Initialize seed using the system random().
- // No non-zero seed will ever become zero again.
- if (state[0] == 0) seed_random(state);
-
- // Mix the bits. Never replaces state[i] with 0 if it is nonzero.
- state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
- state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
-
- return (state[0] << 14) + (state[1] & 0x3FFFF);
-}
-
-
-void V8::SetEntropySource(EntropySource source) {
- entropy_source = source;
}
@@ -174,26 +113,18 @@ void V8::SetReturnAddressLocationResolver(
uint32_t V8::Random(Context* context) {
ASSERT(context->IsNativeContext());
ByteArray* seed = context->random_seed();
- return random_base(reinterpret_cast<uint32_t*>(seed->GetDataStartAddress()));
-}
-
-
-// Used internally by the JIT and memory allocator for security
-// purposes. So, we keep a different state to prevent informations
-// leaks that could be used in an exploit.
-uint32_t V8::RandomPrivate(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
- return random_base(isolate->private_random_seed());
-}
+ uint32_t* state = reinterpret_cast<uint32_t*>(seed->GetDataStartAddress());
+ // When we get here, the RNG must have been initialized,
+ // see the Genesis constructor in file bootstrapper.cc.
+ ASSERT_NE(0, state[0]);
+ ASSERT_NE(0, state[1]);
-bool V8::IdleNotification(int hint) {
- // Returning true tells the caller that there is no need to call
- // IdleNotification again.
- if (!FLAG_use_idle_notification) return true;
+ // Mix the bits. Never replaces state[i] with 0 if it is nonzero.
+ state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
+ state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
- // Tell the heap that it may want to adjust.
- return HEAP->IdleNotification(hint);
+ return (state[0] << 14) + (state[1] & 0x3FFFF);
}
@@ -272,9 +203,10 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
}
- if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs) {
- // Tracing hydrogen do not work with parallel recompilation.
- FLAG_parallel_recompilation = false;
+ if (FLAG_concurrent_recompilation &&
+ (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs)) {
+ FLAG_concurrent_recompilation = false;
+ PrintF("Concurrent recompilation has been disabled for tracing.\n");
}
if (FLAG_sweeper_threads <= 0) {
@@ -308,18 +240,14 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_marking_threads = 0;
}
- if (FLAG_parallel_recompilation &&
+ if (FLAG_concurrent_recompilation &&
SystemThreadManager::NumberOfParallelSystemThreads(
SystemThreadManager::PARALLEL_RECOMPILATION) == 0) {
- FLAG_parallel_recompilation = false;
+ FLAG_concurrent_recompilation = false;
}
- OS::SetUp();
Sampler::SetUp();
CPU::SetUp();
- use_crankshaft_ = FLAG_crankshaft
- && !Serializer::enabled()
- && CPU::SupportsCrankshaft();
OS::PostSetUp();
ElementsAccessor::InitializeOncePerProcess();
LOperand::SetUpCaches();
diff --git a/chromium/v8/src/v8.h b/chromium/v8/src/v8.h
index 47893e8215e..5848f748185 100644
--- a/chromium/v8/src/v8.h
+++ b/chromium/v8/src/v8.h
@@ -82,12 +82,6 @@ class V8 : public AllStatic {
// empty heap.
static bool Initialize(Deserializer* des);
static void TearDown();
- static bool IsRunning() { return is_running_; }
- static bool UseCrankshaft() { return use_crankshaft_; }
- // To be dead you have to have lived
- // TODO(isolates): move IsDead to Isolate.
- static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
- static void SetFatalError();
// Report process out of memory. Implementation found in api.cc.
static void FatalProcessOutOfMemory(const char* location,
@@ -103,17 +97,9 @@ class V8 : public AllStatic {
static void SetFunctionEntryHook(FunctionEntryHook entry_hook);
// Random number generation support. Not cryptographically safe.
static uint32_t Random(Context* context);
- // We use random numbers internally in memory allocation and in the
- // compilers for security. In order to prevent information leaks we
- // use a separate random state for internal random number
- // generation.
- static uint32_t RandomPrivate(Isolate* isolate);
static Object* FillHeapNumberWithRandom(Object* heap_number,
Context* context);
- // Idle notification directly from the API.
- static bool IdleNotification(int hint);
-
static void AddCallCompletedCallback(CallCompletedCallback callback);
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
static void FireCallCompletedCallback(Isolate* isolate);
@@ -131,18 +117,6 @@ class V8 : public AllStatic {
static void InitializeOncePerProcessImpl();
static void InitializeOncePerProcess();
- // True if engine is currently running
- static bool is_running_;
- // True if V8 has ever been run
- static bool has_been_set_up_;
- // True if error has been signaled for current engine
- // (reset to false if engine is restarted)
- static bool has_fatal_error_;
- // True if engine has been shut down
- // (reset if engine is restarted)
- static bool has_been_disposed_;
- // True if we are using the crankshaft optimizing compiler.
- static bool use_crankshaft_;
// List of callbacks when a Call completes.
static List<CallCompletedCallback>* call_completed_callbacks_;
// Allocator for external array buffers.
diff --git a/chromium/v8/src/v8dll-main.cc b/chromium/v8/src/v8dll-main.cc
index 49d868957d9..7f6c9f955d3 100644
--- a/chromium/v8/src/v8dll-main.cc
+++ b/chromium/v8/src/v8dll-main.cc
@@ -30,8 +30,8 @@
#undef USING_V8_SHARED
#include "../include/v8.h"
-#ifdef WIN32
-#include <windows.h> // NOLINT
+#if V8_OS_WIN
+#include "win32-headers.h"
extern "C" {
BOOL WINAPI DllMain(HANDLE hinstDLL,
@@ -41,4 +41,4 @@ BOOL WINAPI DllMain(HANDLE hinstDLL,
return TRUE;
}
}
-#endif
+#endif // V8_OS_WIN
diff --git a/chromium/v8/src/v8globals.h b/chromium/v8/src/v8globals.h
index c3f1f01f944..7fa2fd62c56 100644
--- a/chromium/v8/src/v8globals.h
+++ b/chromium/v8/src/v8globals.h
@@ -97,7 +97,7 @@ const int kPageSizeBits = 20;
// On Intel architecture, cache line size is 64 bytes.
// On ARM it may be less (32 bytes), but as far this constant is
// used for aligning data, it doesn't hurt to align on a greater value.
-const int kProcessorCacheLineSize = 64;
+#define PROCESSOR_CACHE_LINE_SIZE 64
// Constants relevant to double precision floating point numbers.
// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
@@ -163,6 +163,7 @@ class Deserializer;
class MessageLocation;
class VirtualMemory;
class Mutex;
+class RecursiveMutex;
typedef bool (*WeakSlotCallback)(Object** pointer);
@@ -346,8 +347,9 @@ union IeeeDoubleBigEndianArchType {
// AccessorCallback
struct AccessorDescriptor {
- MaybeObject* (*getter)(Object* object, void* data);
- MaybeObject* (*setter)(JSObject* object, Object* value, void* data);
+ MaybeObject* (*getter)(Isolate* isolate, Object* object, void* data);
+ MaybeObject* (*setter)(
+ Isolate* isolate, JSObject* object, Object* value, void* data);
void* data;
};
@@ -363,7 +365,8 @@ enum StateTag {
GC,
COMPILER,
OTHER,
- EXTERNAL
+ EXTERNAL,
+ IDLE
};
@@ -411,34 +414,12 @@ enum StateTag {
#endif
-enum CpuImplementer {
- UNKNOWN_IMPLEMENTER,
- ARM_IMPLEMENTER,
- QUALCOMM_IMPLEMENTER
-};
-
-
-enum CpuPart {
- CPU_UNKNOWN,
- CORTEX_A15,
- CORTEX_A12,
- CORTEX_A9,
- CORTEX_A8,
- CORTEX_A7,
- CORTEX_A5
-};
-
-
// Feature flags bit positions. They are mostly based on the CPUID spec.
-// (We assign CPUID itself to one of the currently reserved bits --
-// feel free to change this if needed.)
// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
enum CpuFeature { SSE4_1 = 32 + 19, // x86
SSE3 = 32 + 0, // x86
SSE2 = 26, // x86
CMOV = 15, // x86
- RDTSC = 4, // x86
- CPUID = 10, // x86
VFP3 = 1, // ARM
ARMv7 = 2, // ARM
SUDIV = 3, // ARM
@@ -584,6 +565,11 @@ enum ClearExceptionFlag {
};
+enum MinusZeroMode {
+ TREAT_MINUS_ZERO_AS_ZERO,
+ FAIL_ON_MINUS_ZERO
+};
+
} } // namespace v8::internal
#endif // V8_V8GLOBALS_H_
diff --git a/chromium/v8/src/v8threads.cc b/chromium/v8/src/v8threads.cc
index 2df187a572c..33b620d8eab 100644
--- a/chromium/v8/src/v8threads.cc
+++ b/chromium/v8/src/v8threads.cc
@@ -144,12 +144,13 @@ Unlocker::~Unlocker() {
void Locker::StartPreemption(int every_n_ms) {
- v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
+ v8::internal::ContextSwitcher::StartPreemption(
+ i::Isolate::Current(), every_n_ms);
}
void Locker::StopPreemption() {
- v8::internal::ContextSwitcher::StopPreemption();
+ v8::internal::ContextSwitcher::StopPreemption(i::Isolate::Current());
}
@@ -214,7 +215,7 @@ bool ThreadManager::RestoreThread() {
void ThreadManager::Lock() {
- mutex_->Lock();
+ mutex_.Lock();
mutex_owner_ = ThreadId::Current();
ASSERT(IsLockedByCurrentThread());
}
@@ -222,7 +223,7 @@ void ThreadManager::Lock() {
void ThreadManager::Unlock() {
mutex_owner_ = ThreadId::Invalid();
- mutex_->Unlock();
+ mutex_.Unlock();
}
@@ -303,8 +304,7 @@ ThreadState* ThreadState::Next() {
// be distinguished from not having a thread id at all (since NULL is
// defined as 0.)
ThreadManager::ThreadManager()
- : mutex_(OS::CreateMutex()),
- mutex_owner_(ThreadId::Invalid()),
+ : mutex_owner_(ThreadId::Invalid()),
lazily_archived_thread_(ThreadId::Invalid()),
lazily_archived_thread_state_(NULL),
free_anchor_(NULL),
@@ -315,7 +315,6 @@ ThreadManager::ThreadManager()
ThreadManager::~ThreadManager() {
- delete mutex_;
DeleteThreadStateList(free_anchor_);
DeleteThreadStateList(in_use_anchor_);
}
@@ -439,8 +438,7 @@ ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
// Set the scheduling interval of V8 threads. This function starts the
// ContextSwitcher thread if needed.
-void ContextSwitcher::StartPreemption(int every_n_ms) {
- Isolate* isolate = Isolate::Current();
+void ContextSwitcher::StartPreemption(Isolate* isolate, int every_n_ms) {
ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
if (isolate->context_switcher() == NULL) {
// If the ContextSwitcher thread is not running at the moment start it now.
@@ -456,8 +454,7 @@ void ContextSwitcher::StartPreemption(int every_n_ms) {
// Disable preemption of V8 threads. If multiple threads want to use V8 they
// must cooperatively schedule amongst them from this point on.
-void ContextSwitcher::StopPreemption() {
- Isolate* isolate = Isolate::Current();
+void ContextSwitcher::StopPreemption(Isolate* isolate) {
ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
if (isolate->context_switcher() != NULL) {
// The ContextSwitcher thread is running. We need to stop it and release
diff --git a/chromium/v8/src/v8threads.h b/chromium/v8/src/v8threads.h
index 8dce8602f66..1edacfc3bb7 100644
--- a/chromium/v8/src/v8threads.h
+++ b/chromium/v8/src/v8threads.h
@@ -119,7 +119,7 @@ class ThreadManager {
void EagerlyArchiveThread();
- Mutex* mutex_;
+ Mutex mutex_;
ThreadId mutex_owner_;
ThreadId lazily_archived_thread_;
ThreadState* lazily_archived_thread_state_;
@@ -146,10 +146,10 @@ class ThreadManager {
class ContextSwitcher: public Thread {
public:
// Set the preemption interval for the ContextSwitcher thread.
- static void StartPreemption(int every_n_ms);
+ static void StartPreemption(Isolate* isolate, int every_n_ms);
// Stop sending preemption requests to threads.
- static void StopPreemption();
+ static void StopPreemption(Isolate* isolate);
// Preempted thread needs to call back to the ContextSwitcher to acknowledge
// the handling of a preemption request.
diff --git a/chromium/v8/src/version.cc b/chromium/v8/src/version.cc
index 292389aa4ab..7f7cb0156d0 100644
--- a/chromium/v8/src/version.cc
+++ b/chromium/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 20
-#define BUILD_NUMBER 15
-#define PATCH_LEVEL 5
+#define MINOR_VERSION 21
+#define BUILD_NUMBER 18
+#define PATCH_LEVEL 13
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/chromium/v8/src/win32-headers.h b/chromium/v8/src/win32-headers.h
index 2b5d7d71f21..98b0120ea16 100644
--- a/chromium/v8/src/win32-headers.h
+++ b/chromium/v8/src/win32-headers.h
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_WIN32_HEADERS_H_
+#define V8_WIN32_HEADERS_H_
+
#ifndef WIN32_LEAN_AND_MEAN
// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
#define WIN32_LEAN_AND_MEAN
@@ -55,7 +58,6 @@
#include <windows.h>
-#ifdef V8_WIN32_HEADERS_FULL
#include <signal.h> // For raise().
#include <time.h> // For LocalOffset() implementation.
#include <mmsystem.h> // For timeGetTime().
@@ -81,7 +83,6 @@
#endif // __MINGW32__
#include <process.h> // For _beginthreadex().
#include <stdlib.h>
-#endif // V8_WIN32_HEADERS_FULL
#undef VOID
#undef DELETE
@@ -94,5 +95,7 @@
#undef ANY
#undef IGNORE
#undef GetObject
-#undef CreateMutex
#undef CreateSemaphore
+#undef Yield
+
+#endif // V8_WIN32_HEADERS_H_
diff --git a/chromium/v8/src/win32-math.cc b/chromium/v8/src/win32-math.cc
index 9ffc4ea73bd..88fa3a684be 100644
--- a/chromium/v8/src/win32-math.cc
+++ b/chromium/v8/src/win32-math.cc
@@ -31,8 +31,6 @@
// (http://www.opengroup.org/onlinepubs/000095399/)
#ifdef _MSC_VER
-#undef V8_WIN32_LEAN_AND_MEAN
-#define V8_WIN32_HEADERS_FULL
#include "win32-headers.h"
#include <limits.h> // Required for INT_MAX etc.
#include <float.h> // Required for DBL_MAX and on Win32 for finite()
diff --git a/chromium/v8/src/x64/assembler-x64-inl.h b/chromium/v8/src/x64/assembler-x64-inl.h
index ae9aeee8122..07d07033e95 100644
--- a/chromium/v8/src/x64/assembler-x64-inl.h
+++ b/chromium/v8/src/x64/assembler-x64-inl.h
@@ -373,13 +373,14 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
bool RelocInfo::IsPatchedReturnSequence() {
// The recognized call sequence is:
- // movq(kScratchRegister, immediate64); call(kScratchRegister);
+ // movq(kScratchRegister, address); call(kScratchRegister);
// It only needs to be distinguished from a return sequence
// movq(rsp, rbp); pop(rbp); ret(n); int3 *6
// The 11th byte is int3 (0xCC) in the return sequence and
// REX.WB (0x48+register bit) for the call sequence.
#ifdef ENABLE_DEBUGGER_SUPPORT
- return pc_[2 + kPointerSize] != 0xCC;
+ return pc_[Assembler::kMoveAddressIntoScratchRegisterInstructionLength] !=
+ 0xCC;
#else
return false;
#endif
@@ -447,7 +448,7 @@ Object** RelocInfo::call_object_address() {
}
-void RelocInfo::Visit(ObjectVisitor* visitor) {
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
@@ -462,12 +463,11 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
+ isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
diff --git a/chromium/v8/src/x64/assembler-x64.cc b/chromium/v8/src/x64/assembler-x64.cc
index f5939c3b7e1..41bf297b387 100644
--- a/chromium/v8/src/x64/assembler-x64.cc
+++ b/chromium/v8/src/x64/assembler-x64.cc
@@ -63,98 +63,32 @@ void CpuFeatures::Probe() {
return; // No features if we might serialize.
}
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
+ uint64_t probed_features = 0;
+ CPU cpu;
+ if (cpu.has_sse41()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE4_1;
}
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
+ if (cpu.has_sse3()) {
+ probed_features |= static_cast<uint64_t>(1) << SSE3;
}
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old rsp, since we are going to modify the stack.
- __ push(rbp);
- __ pushfq();
- __ push(rdi);
- __ push(rcx);
- __ push(rbx);
- __ movq(rbp, rsp);
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfq();
- __ pop(rax);
- __ movq(rdx, rax);
- __ xor_(rax, Immediate(0x200000)); // Flip bit 21.
- __ push(rax);
- __ popfq();
- __ pushfq();
- __ pop(rax);
- __ xor_(rax, rdx); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in rax.
- __ xor_(rax, rax);
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ movl(rax, Immediate(1));
- supported_ = kDefaultCpuFeatures | (1 << CPUID);
- { CpuFeatureScope fscope(&assm, CPUID);
- __ cpuid();
- // Move the result from ecx:edx to rdi.
- __ movl(rdi, rdx); // Zero-extended to 64 bits.
- __ shl(rcx, Immediate(32));
- __ or_(rdi, rcx);
-
- // Get the sahf supported flag, from CPUID(0x80000001)
- __ movq(rax, 0x80000001, RelocInfo::NONE64);
- __ cpuid();
+ // SSE2 must be available on every x64 CPU.
+ ASSERT(cpu.has_sse2());
+ probed_features |= static_cast<uint64_t>(1) << SSE2;
+
+ // CMOD must be available on every x64 CPU.
+ ASSERT(cpu.has_cmov());
+ probed_features |= static_cast<uint64_t>(1) << CMOV;
+
+ // SAHF is not generally available in long mode.
+ if (cpu.has_sahf()) {
+ probed_features |= static_cast<uint64_t>(1) << SAHF;
}
- supported_ = kDefaultCpuFeatures;
- // Put the CPU flags in rax.
- // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID).
- __ movl(rax, Immediate(1));
- __ and_(rcx, rax); // Bit 0 is set if SAHF instruction supported.
- __ not_(rax);
- __ and_(rax, rdi);
- __ or_(rax, rcx);
- __ or_(rax, Immediate(1 << CPUID));
-
- // Done.
- __ bind(&done);
- __ movq(rsp, rbp);
- __ pop(rbx);
- __ pop(rcx);
- __ pop(rdi);
- __ popfq();
- __ pop(rbp);
- __ ret(0);
-#undef __
-
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
-
- uint64_t probed_features = probe();
uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
supported_ = probed_features | platform_features;
found_by_runtime_probing_only_
= probed_features & ~kDefaultCpuFeatures & ~platform_features;
-
- // CMOV must be available on an X64 CPU.
- ASSERT(IsSupported(CPUID));
- ASSERT(IsSupported(CMOV));
-
- delete memory;
}
@@ -164,10 +98,7 @@ void CpuFeatures::Probe() {
// Patch the code at the current PC with a call to the target address.
// Additional guard int3 instructions can be added if required.
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Load register with immediate 64 and call through a register instructions
- // takes up 13 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 13;
- int code_size = kCallCodeSize + guard_bytes;
+ int code_size = Assembler::kCallSequenceLength + guard_bytes;
// Create a code patcher.
CodePatcher patcher(pc_, code_size);
@@ -183,7 +114,7 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
patcher.masm()->call(r10);
// Check that the size of the code generated is as expected.
- ASSERT_EQ(kCallCodeSize,
+ ASSERT_EQ(Assembler::kCallSequenceLength,
patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
// Add the requested number of int3 instructions after the call.
@@ -465,7 +396,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > HEAP->MaxOldGenerationSize())) {
+ (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -990,7 +921,6 @@ void Assembler::cmpb_al(Immediate imm8) {
void Assembler::cpuid() {
- ASSERT(IsEnabled(CPUID));
EnsureSpace ensure_space(this);
emit(0x0F);
emit(0xA2);
@@ -1603,7 +1533,7 @@ void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
} else {
EnsureSpace ensure_space(this);
ASSERT(value->IsHeapObject());
- ASSERT(!HEAP->InNewSpace(*value));
+ ASSERT(!isolate()->heap()->InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitp(value.location(), mode);
@@ -1922,13 +1852,6 @@ void Assembler::pushfq() {
}
-void Assembler::rdtsc() {
- EnsureSpace ensure_space(this);
- emit(0x0F);
- emit(0x31);
-}
-
-
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
ASSERT(is_uint16(imm16));
@@ -2995,6 +2918,17 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
}
+void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(0x01); // LT == 1
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src,
Assembler::RoundingMode mode) {
ASSERT(IsEnabled(SSE4_1));
diff --git a/chromium/v8/src/x64/assembler-x64.h b/chromium/v8/src/x64/assembler-x64.h
index 07afc129dc8..f2e37fe863f 100644
--- a/chromium/v8/src/x64/assembler-x64.h
+++ b/chromium/v8/src/x64/assembler-x64.h
@@ -475,7 +475,6 @@ class CpuFeatures : public AllStatic {
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
if (f == SAHF && !FLAG_enable_sahf) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
@@ -579,29 +578,36 @@ class Assembler : public AssemblerBase {
// Distance between the address of the code target in the call instruction
// and the return address pushed on the stack.
static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
- // Distance between the start of the JS return sequence and where the
- // 32-bit displacement of a near call would be, relative to the pushed
- // return address. TODO: Use return sequence length instead.
- // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
- static const int kPatchReturnSequenceAddressOffset = 13 - 4;
- // Distance between start of patched debug break slot and where the
- // 32-bit displacement of a near call would be, relative to the pushed
- // return address. TODO: Use return sequence length instead.
- // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
- static const int kPatchDebugBreakSlotAddressOffset = 13 - 4;
- // TODO(X64): Rename this, removing the "Real", after changing the above.
- static const int kRealPatchReturnSequenceAddressOffset = 2;
-
- // Some x64 JS code is padded with int3 to make it large
- // enough to hold an instruction when the debugger patches it.
- static const int kJumpInstructionLength = 13;
- static const int kCallInstructionLength = 13;
- static const int kJSReturnSequenceLength = 13;
+ // The length of call(kScratchRegister).
+ static const int kCallScratchRegisterInstructionLength = 3;
+ // The length of call(Immediate32).
static const int kShortCallInstructionLength = 5;
- static const int kPatchDebugBreakSlotReturnOffset = 4;
-
- // The debug break slot must be able to contain a call instruction.
- static const int kDebugBreakSlotLength = kCallInstructionLength;
+ // The length of movq(kScratchRegister, address).
+ static const int kMoveAddressIntoScratchRegisterInstructionLength =
+ 2 + kPointerSize;
+ // The length of movq(kScratchRegister, address) and call(kScratchRegister).
+ static const int kCallSequenceLength =
+ kMoveAddressIntoScratchRegisterInstructionLength +
+ kCallScratchRegisterInstructionLength;
+
+ // The js return and debug break slot must be able to contain an indirect
+ // call sequence, some x64 JS code is padded with int3 to make it large
+ // enough to hold an instruction when the debugger patches it.
+ static const int kJSReturnSequenceLength = kCallSequenceLength;
+ static const int kDebugBreakSlotLength = kCallSequenceLength;
+ static const int kPatchDebugBreakSlotReturnOffset = kCallTargetAddressOffset;
+ // Distance between the start of the JS return sequence and where the
+ // 32-bit displacement of a short call would be. The short call is from
+ // SetDebugBreakAtIC from debug-x64.cc.
+ static const int kPatchReturnSequenceAddressOffset =
+ kJSReturnSequenceLength - kPatchDebugBreakSlotReturnOffset;
+ // Distance between the start of the JS return sequence and where the
+ // 32-bit displacement of a short call would be. The short call is from
+ // SetDebugBreakAtIC from debug-x64.cc.
+ static const int kPatchDebugBreakSlotAddressOffset =
+ kDebugBreakSlotLength - kPatchDebugBreakSlotReturnOffset;
+ static const int kRealPatchReturnSequenceAddressOffset =
+ kMoveAddressIntoScratchRegisterInstructionLength - kPointerSize;
// One byte opcode for test eax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9;
@@ -1169,7 +1175,6 @@ class Assembler : public AssemblerBase {
void hlt();
void int3();
void nop();
- void rdtsc();
void ret(int imm16);
void setcc(Condition cc, Register reg);
@@ -1379,6 +1384,8 @@ class Assembler : public AssemblerBase {
void movmskpd(Register dst, XMMRegister src);
void movmskps(Register dst, XMMRegister src);
+ void cmpltsd(XMMRegister dst, XMMRegister src);
+
// The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister reg, const Operand& adr);
diff --git a/chromium/v8/src/x64/builtins-x64.cc b/chromium/v8/src/x64/builtins-x64.cc
index d34e4f70d9a..81721c25e1a 100644
--- a/chromium/v8/src/x64/builtins-x64.cc
+++ b/chromium/v8/src/x64/builtins-x64.cc
@@ -59,9 +59,9 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
int num_extra_args = 0;
if (extra_args == NEEDS_CALLED_FUNCTION) {
num_extra_args = 1;
- __ pop(kScratchRegister); // Save return address.
+ __ PopReturnAddressTo(kScratchRegister);
__ push(rdi);
- __ push(kScratchRegister); // Restore return address.
+ __ PushReturnAddressFrom(kScratchRegister);
} else {
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
}
@@ -73,6 +73,24 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
+static void CallRuntimePassFunction(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
+ // Function is also the parameter to the runtime call.
+ __ push(rdi);
+
+ __ CallRuntime(function_id, 1);
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore receiver.
+ __ pop(rdi);
+}
+
+
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ movq(kScratchRegister,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -84,57 +102,27 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
+ // Checking whether the queued function is ready for install is optional,
+ // since we come across interrupts and stack checks elsewhere. However,
+ // not checking may delay installing ready functions, and always checking
+ // would be quite expensive. A good compromise is to first check against
+ // stack limit as a cue for an interrupt signal.
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok);
+
+ CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
+ // Tail call to returned code.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax);
-}
-
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
+ __ bind(&ok);
+ GenerateTailCallToSharedCode(masm);
+}
- // Tear down internal frame.
- }
+void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
GenerateTailCallToSharedCode(masm);
}
@@ -249,7 +237,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
if (FLAG_debug_code) {
__ cmpq(rsi, rdi);
__ Assert(less_equal,
- "Unexpected number of pre-allocated property fields.");
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(rcx, rsi, rdx);
__ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
@@ -280,7 +268,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ subq(rdx, rcx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
+ __ Assert(positive, kPropertyAllocationCountFailed);
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
@@ -429,10 +417,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Remove caller arguments from the stack and return.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->constructed_objects(), 1);
__ ret(0);
@@ -586,26 +574,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyCompile);
// Do a tail-call of the compiled function.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax);
@@ -613,26 +582,7 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
+ CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
// Do a tail-call of the compiled function.
__ lea(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax);
@@ -708,7 +658,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
}
// Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger32(r10, Operand(rsp, kPCOnStackSize));
// Switch on the state.
Label not_no_registers, not_tos_rax;
@@ -717,13 +667,13 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ movq(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
__ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
__ bind(&not_tos_rax);
- __ Abort("no cases left");
+ __ Abort(kNoCasesLeft);
}
@@ -772,9 +722,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label done;
__ testq(rax, rax);
__ j(not_zero, &done);
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ Push(masm->isolate()->factory()->undefined_value());
- __ push(rbx);
+ __ PushReturnAddressFrom(rbx);
__ incq(rax);
__ bind(&done);
}
@@ -782,8 +732,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack, check
// if it is a function.
Label slow, non_function;
- // The function to call is at position n+1 on the stack.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, rax);
+ __ movq(rdi, args.GetReceiverOperand());
__ JumpIfSmi(rdi, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
@@ -808,7 +758,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(not_zero, &shift_arguments);
// Compute the receiver in non-strict mode.
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+ __ movq(rbx, args.GetArgumentOperand(1));
__ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
@@ -837,7 +787,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
// Restore the function to rdi.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ movq(rdi, args.GetReceiverOperand());
__ jmp(&patch_receiver, Label::kNear);
// Use the global receiver object from the called function as the
@@ -851,7 +801,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
+ __ movq(args.GetArgumentOperand(1), rbx);
__ jmp(&shift_arguments);
}
@@ -868,7 +818,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
+ __ movq(args.GetArgumentOperand(1), rdi);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@@ -895,9 +845,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ cmpq(rdx, Immediate(1));
__ j(not_equal, &non_proxy);
- __ pop(rdx); // return address
+ __ PopReturnAddressTo(rdx);
__ push(rdi); // re-add proxy object as additional argument
- __ push(rdx);
+ __ PushReturnAddressFrom(rdx);
__ incq(rax);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
@@ -1113,9 +1063,9 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for InternalArray function");
+ __ Check(not_smi, kUnexpectedInitialMapForInternalArrayFunction);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for InternalArray function");
+ __ Check(equal, kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
@@ -1143,9 +1093,9 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for Array function");
+ __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
+ __ Check(equal, kUnexpectedInitialMapForArrayFunction);
}
// Run the native code for the Array function called as a normal function.
@@ -1173,18 +1123,19 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
__ cmpq(rdi, rcx);
- __ Assert(equal, "Unexpected String function");
+ __ Assert(equal, kUnexpectedStringFunction);
}
// Load the first argument into rax and get rid of the rest
// (including the receiver).
+ StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
__ testq(rax, rax);
__ j(zero, &no_arguments);
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
- __ pop(rcx);
+ __ movq(rbx, args.GetArgumentOperand(1));
+ __ PopReturnAddressTo(rcx);
__ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
__ movq(rax, rbx);
// Lookup the argument in the number to string cache.
@@ -1219,9 +1170,9 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ cmpb(FieldOperand(rcx, Map::kInstanceSizeOffset),
Immediate(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(equal, "Unexpected string wrapper instance size");
+ __ Assert(equal, kUnexpectedStringWrapperInstanceSize);
__ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
- __ Assert(equal, "Unexpected unused properties of string wrapper");
+ __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
}
__ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
@@ -1268,9 +1219,9 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
__ LoadRoot(rbx, Heap::kempty_stringRootIndex);
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
__ lea(rsp, Operand(rsp, kPointerSize));
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
__ jmp(&argument_is_string);
// At this point the argument is already a string. Call runtime to
@@ -1313,10 +1264,10 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ pop(rbp);
// Remove caller arguments from the stack.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
}
@@ -1407,32 +1358,46 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ // Lookup the function in the JavaScript frame.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Lookup and calculate pc offset.
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+ __ movq(rbx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ subq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ subq(rdx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
+ __ Integer32ToSmi(rdx, rdx);
+
+ // Pass both function and pc offset as arguments.
__ push(rax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ push(rdx);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
}
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
Label skip;
- __ SmiCompare(rax, Smi::FromInt(-1));
+ // If the code object is null, just return to the unoptimized code.
+ __ cmpq(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiToInteger32(rax, rax);
- __ push(rax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
+
+ // Load deoptimization data from the code object.
+ __ movq(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ __ SmiToInteger32(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ __ lea(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
+
+ // Overwrite the return address on the stack.
+ __ movq(Operand(rsp, 0), rax);
+
+ // And "return" to the OSR entry point of the function.
+ __ ret(0);
}
diff --git a/chromium/v8/src/x64/code-stubs-x64.cc b/chromium/v8/src/x64/code-stubs-x64.cc
index 551a71690e7..51e1a5395cf 100644
--- a/chromium/v8/src/x64/code-stubs-x64.cc
+++ b/chromium/v8/src/x64/code-stubs-x64.cc
@@ -39,6 +39,17 @@ namespace v8 {
namespace internal {
+void FastNewClosureStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rbx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+}
+
+
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -246,17 +257,6 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
}
-void UnaryOpStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rax };
- descriptor->register_param_count_ = 1;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(UnaryOpIC_Miss);
-}
-
-
void StoreGlobalStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -306,140 +306,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in rsi.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
- __ Allocate(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1);
-
- // Get the function info from the stack.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
-
- int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
- __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
- __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
- __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ movq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ testq(rbx, rbx);
- __ j(not_zero, &check_optimized, Label::kNear);
- }
- __ bind(&install_unoptimized);
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
- rdi); // Initialize with undefined.
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
-
- // rcx holds native context, rbx points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into edx.
- __ movq(rdx, FieldOperand(rbx, SharedFunctionInfo::kFirstCodeSlot));
- __ cmpq(rcx, FieldOperand(rbx, SharedFunctionInfo::kFirstContextSlot));
- __ j(equal, &install_optimized);
-
- // Iterate through the rest of map backwards. rdx holds an index.
- Label loop;
- Label restore;
- __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ SmiToInteger32(rdx, rdx);
- __ bind(&loop);
- // Do not double check first entry.
- __ cmpq(rdx, Immediate(SharedFunctionInfo::kSecondEntryIndex));
- __ j(equal, &restore);
- __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
- __ cmpq(rcx, FieldOperand(rbx,
- rdx,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, &loop, Label::kNear);
- // Hit: fetch the optimized code.
- __ movq(rdx, FieldOperand(rbx,
- rdx,
- times_pointer_size,
- FixedArray::kHeaderSize + 1 * kPointerSize));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
- // Now link a function into a list of optimized functions.
- __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
- // No need for write barrier as JSFunction (rax) is in the new space.
-
- __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
- // Store JSFunction (rax) into rdx before issuing write barrier as
- // it clobbers all the registers passed.
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- rdx,
- rbx,
- kDontSaveFPRegs);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&restore);
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- __ jmp(&install_unoptimized);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(rcx); // Temporarily remove return address.
- __ pop(rdx);
- __ push(rsi);
- __ push(rdx);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ push(rcx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
@@ -448,7 +314,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(0));
// Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
@@ -494,10 +361,10 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(1));
// Get the serialized scope info from the stack.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rbx, args.GetArgumentOperand(0));
// Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
@@ -511,9 +378,8 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
Label after_sentinel;
__ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
__ cmpq(rcx, Immediate(0));
- __ Assert(equal, message);
+ __ Assert(equal, kExpected0AsASmiSentinel);
}
__ movq(rcx, GlobalObjectOperand());
__ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
@@ -572,7 +438,6 @@ class FloatingPointHelper : public AllStatic {
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
// NumberOperands assumes both are smis or heap numbers.
static void LoadSSE2SmiOperands(MacroAssembler* masm);
- static void LoadSSE2NumberOperands(MacroAssembler* masm);
static void LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers);
@@ -581,9 +446,6 @@ class FloatingPointHelper : public AllStatic {
static void LoadAsIntegers(MacroAssembler* masm,
Label* operand_conversion_failure,
Register heap_number_map);
- // As above, but we know the operands to be numbers. In that case,
- // conversion can't fail.
- static void LoadNumbersAsIntegers(MacroAssembler* masm);
// Tries to convert two values to smis losslessly.
// This fails if either argument is not a Smi nor a HeapNumber,
@@ -695,13 +557,13 @@ void BinaryOpStub::Initialize() {}
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Save return address.
+ __ PopReturnAddressTo(rcx);
__ push(rdx);
__ push(rax);
// Left and right arguments are now on top.
__ Push(Smi::FromInt(MinorKey()));
- __ push(rcx); // Push return address.
+ __ PushReturnAddressFrom(rcx);
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
@@ -954,7 +816,7 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
// Set the map.
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
+ kHeapNumberMapRegisterClobbered);
__ movq(FieldOperand(rax, HeapObject::kMapOffset),
heap_number_map);
__ cvtqsi2sd(xmm0, rbx);
@@ -974,8 +836,7 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
}
// No fall-through from this generated code.
if (FLAG_debug_code) {
- __ Abort("Unexpected fall-through in "
- "BinaryStub_GenerateFloatingPointCode.");
+ __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode);
}
}
@@ -984,10 +845,10 @@ static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn(
MacroAssembler* masm) {
// Push arguments, but ensure they are under the return address
// for a tail call.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
__ push(rdx);
__ push(rax);
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
}
@@ -1275,8 +1136,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
Label input_not_smi, loaded;
+
// Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0));
__ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the bits of the double into rbx.
@@ -1337,7 +1200,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference::transcendental_cache_array_address(masm->isolate());
__ movq(rax, cache_array);
int cache_array_index =
- type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
+ type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
__ movq(rax, Operand(rax, cache_array_index));
// rax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
@@ -1561,40 +1424,6 @@ void TranscendentalCacheStub::GenerateOperation(
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- // Check float operands.
- Label done;
- Label rax_is_smi;
- Label rax_is_object;
- Label rdx_is_object;
-
- __ JumpIfNotSmi(rdx, &rdx_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ JumpIfSmi(rax, &rax_is_smi);
-
- __ bind(&rax_is_object);
- DoubleToIStub stub1(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
-
- __ jmp(&done);
-
- __ bind(&rdx_is_object);
- DoubleToIStub stub2(rdx, rdx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- __ JumpIfNotSmi(rax, &rax_is_object);
-
- __ bind(&rax_is_smi);
- __ SmiToInteger32(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
// Jump to conversion_failure: rdx and rax are unchanged.
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
Label* conversion_failure,
@@ -1618,10 +1447,8 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ bind(&arg1_is_object);
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the rdx heap number in rcx.
- DoubleToIStub stub1(rdx, r8, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ // Get the untagged integer version of the rdx heap number in r8.
+ __ TruncateHeapNumberToI(r8, rdx);
// Here r8 has the untagged integer, rax has a Smi or a heap number.
__ bind(&load_arg2);
@@ -1641,9 +1468,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the rax heap number in rcx.
- DoubleToIStub stub2(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag,
- true);
- __ call(stub2.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ TruncateHeapNumberToI(rcx, rax);
__ bind(&done);
__ movl(rax, r8);
@@ -1658,30 +1483,6 @@ void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
}
-void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
- // Load operand in rdx into xmm0.
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1.
- __ JumpIfSmi(rax, &load_smi_rax);
- __ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-
- __ bind(&done);
-}
-
-
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
@@ -1809,8 +1610,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack.
- __ movq(base, Operand(rsp, 2 * kPointerSize));
- __ movq(exponent, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(base, args.GetArgumentOperand(0));
+ __ movq(exponent, args.GetArgumentOperand(1));
__ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
@@ -1843,16 +1645,17 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
if (exponent_type_ != INTEGER) {
- Label fast_power;
+ Label fast_power, try_arithmetic_simplification;
// Detect integer exponents stored as double.
+ __ DoubleToI(exponent, double_exponent, double_scratch,
+ TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+ __ jmp(&int_exponent);
+
+ __ bind(&try_arithmetic_simplification);
__ cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cmpl(exponent, Immediate(0x80000000u));
__ j(equal, &call_runtime);
- __ cvtlsi2sd(double_scratch, exponent);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_exponent, double_scratch);
- __ j(equal, &int_exponent);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -2155,10 +1958,10 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(value, &miss);
// Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
+ __ PopReturnAddressTo(scratch);
__ push(receiver);
__ push(value);
- __ push(scratch); // return address
+ __ PushReturnAddressFrom(scratch);
ExternalReference ref =
ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
@@ -2224,9 +2027,9 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Slow-case: Handle non-smi or out-of-bounds access to arguments
// by calling the runtime system.
__ bind(&slow);
- __ pop(rbx); // Return address.
+ __ PopReturnAddressTo(rbx);
__ push(rdx);
- __ push(rbx);
+ __ PushReturnAddressFrom(rbx);
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
@@ -2243,7 +2046,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Factory* factory = masm->isolate()->factory();
- __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ SmiToInteger64(rbx, args.GetArgumentOperand(2));
// rbx = parameter count (untagged)
// Check if the calling frame is an arguments adaptor frame.
@@ -2265,7 +2069,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ movq(args.GetArgumentOperand(1), rdx);
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
@@ -2326,7 +2130,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(0));
__ movq(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize),
rdx);
@@ -2377,7 +2181,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Load tagged parameter count into r9.
__ Integer32ToSmi(r9, rbx);
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, Operand(rsp, 1 * kPointerSize));
+ __ addq(r8, args.GetArgumentOperand(2));
__ subq(r8, r9);
__ Move(r11, factory->the_hole_value());
__ movq(rdx, rdi);
@@ -2416,7 +2220,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label arguments_loop, arguments_test;
__ movq(r8, rbx);
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(1));
// Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
__ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
@@ -2443,7 +2247,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// rcx = argument count (untagged)
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
- __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
+ __ movq(args.GetArgumentOperand(2), rcx); // Patch argument count.
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
@@ -2462,12 +2266,13 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ movq(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ movq(args.GetArgumentOperand(1), rdx);
__ bind(&runtime);
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@@ -2488,18 +2293,19 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ j(equal, &adaptor_frame);
// Get the length from the frame.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(2));
__ SmiToInteger64(rcx, rcx);
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ movq(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
__ lea(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+ __ movq(args.GetArgumentOperand(1), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
@@ -2529,7 +2335,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(rcx, args.GetArgumentOperand(2));
__ movq(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
@@ -2540,7 +2346,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ j(zero, &done);
// Get the parameters pointer from the stack.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(1));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@@ -2616,9 +2422,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Condition is_smi = masm->CheckSmi(rax);
__ Check(NegateCondition(is_smi),
- "Unexpected type for RegExp data, FixedArray expected");
+ kUnexpectedTypeForRegExpDataFixedArrayExpected);
__ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected);
}
// rax: RegExp data (FixedArray)
@@ -2984,7 +2790,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ testb(rbx, Immediate(kIsIndirectStringMask));
- __ Assert(zero, "external string expected, but not found");
+ __ Assert(zero, kExternalStringExpectedButNotFound);
}
__ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
@@ -3023,7 +2829,8 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
const int kMaxInlineLength = 100;
Label slowcase;
Label done;
- __ movq(r8, Operand(rsp, kPointerSize * 3));
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(r8, args.GetArgumentOperand(0));
__ JumpIfNotSmi(r8, &slowcase);
__ SmiToInteger32(rbx, r8);
__ cmpl(rbx, Immediate(kMaxInlineLength));
@@ -3061,11 +2868,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
// Set input, index and length fields from arguments.
- __ movq(r8, Operand(rsp, kPointerSize * 1));
+ __ movq(r8, args.GetArgumentOperand(2));
__ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 2));
+ __ movq(r8, args.GetArgumentOperand(1));
__ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 3));
+ __ movq(r8, args.GetArgumentOperand(0));
__ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
// Fill out the elements FixedArray.
@@ -3196,7 +3003,8 @@ void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
void NumberToStringStub::Generate(MacroAssembler* masm) {
Label runtime;
- __ movq(rbx, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rbx, args.GetArgumentOperand(0));
// Generate code to lookup number in the number string cache.
GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, &runtime);
@@ -3448,7 +3256,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
+ __ Abort(kUnexpectedFallThroughFromStringComparison);
#endif
__ bind(&check_unequal_objects);
@@ -3486,7 +3294,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
// Push arguments below the return address to prepare jump to builtin.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
__ push(rdx);
__ push(rax);
@@ -3499,8 +3307,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(Smi::FromInt(NegativeComparisonResult(cc)));
}
- // Restore return address on the stack.
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
@@ -3511,16 +3318,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
@@ -3582,6 +3379,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Integer32ToSmi(rax, rax);
__ push(rax);
__ push(rdi);
__ push(rbx);
@@ -3592,6 +3390,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ pop(rbx);
__ pop(rdi);
__ pop(rax);
+ __ SmiToInteger32(rax, rax);
}
__ jmp(&done);
@@ -3608,6 +3407,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function;
+ StackArgumentsAccessor args(rsp, argc_);
// The receiver might implicitly be the global object. This is
// indicated by passing the hole as the receiver to the call
@@ -3615,15 +3415,14 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (ReceiverMightBeImplicit()) {
Label call;
// Get the receiver from the stack.
- // +1 ~ return address
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
+ __ movq(rax, args.GetReceiverOperand());
// Call as function is indicated with the hole.
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &call, Label::kNear);
// Patch the receiver on the stack with the global receiver object.
__ movq(rcx, GlobalObjectOperand());
__ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx);
+ __ movq(args.GetReceiverOperand(), rcx);
__ bind(&call);
}
@@ -3669,9 +3468,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Check for function proxy.
__ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
__ j(not_equal, &non_function);
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
__ push(rdi); // put proxy as additional argument under return address
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
__ Set(rax, argc_ + 1);
__ Set(rbx, 0);
__ SetCallKind(rcx, CALL_AS_METHOD);
@@ -3685,13 +3484,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
__ bind(&non_function);
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
+ __ movq(args.GetReceiverOperand(), rdi);
__ Set(rax, argc_);
__ Set(rbx, 0);
__ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor =
- Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
+ isolate->builtins()->ArgumentsAdaptorTrampoline();
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
@@ -3746,7 +3545,7 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated() {
+bool CEntryStub::IsPregenerated(Isolate* isolate) {
#ifdef _WIN64
return result_size_ == 1;
#else
@@ -4220,12 +4019,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const unsigned int kWordBeforeResultValue = 0x458B4909;
// Only the inline check flag is supported on X64.
ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
- int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
+ int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
// Get the object - go slow case if it's a smi.
Label slow;
-
- __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
+ StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0));
__ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
@@ -4235,7 +4035,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(above, &slow);
// Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
+ __ movq(rdx, args.GetArgumentOperand(1));
// rdx is function, rax is map.
// If there is a call site cache don't look in the global cache, but do the
@@ -4270,12 +4070,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
} else {
// Get return address and delta to inlined map check.
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ subq(kScratchRegister, args.GetArgumentOperand(2));
if (FLAG_debug_code) {
__ movl(rdi, Immediate(kWordBeforeMapCheckValue));
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
- __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
}
__ movq(kScratchRegister,
Operand(kScratchRegister, kOffsetToMapCheckValue));
@@ -4311,17 +4111,17 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Assert it is a 1-byte signed value.
ASSERT(true_offset >= 0 && true_offset < 0x100);
__ movl(rax, Immediate(true_offset));
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ subq(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
__ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
}
__ Set(rax, 0);
}
- __ ret(2 * kPointerSize + extra_stack_space);
+ __ ret((2 + extra_argument_offset) * kPointerSize);
__ bind(&is_not_instance);
if (!HasCallSiteInlineCheck()) {
@@ -4334,24 +4134,24 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Assert it is a 1-byte signed value.
ASSERT(false_offset >= 0 && false_offset < 0x100);
__ movl(rax, Immediate(false_offset));
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+ __ movq(kScratchRegister, StackOperandForReturnAddress(0));
+ __ subq(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
__ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
+ __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
}
}
- __ ret(2 * kPointerSize + extra_stack_space);
+ __ ret((2 + extra_argument_offset) * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
__ bind(&slow);
if (HasCallSiteInlineCheck()) {
// Remove extra value from the stack.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
__ pop(rax);
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
@@ -4404,7 +4204,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
Factory* factory = masm->isolate()->factory();
// Index is not a smi.
@@ -4454,7 +4254,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+ __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
@@ -4480,7 +4280,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
@@ -4492,7 +4292,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
+ __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
@@ -4501,8 +4301,9 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Builtins::JavaScript builtin_id = Builtins::ADD;
// Load the two arguments.
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rax, args.GetArgumentOperand(0)); // First argument (left).
+ __ movq(rdx, args.GetArgumentOperand(1)); // Second argument (right).
// Make sure that both arguments are strings if not known in advance.
// Otherwise, at least one of the arguments is definitely a string,
@@ -4822,10 +4623,10 @@ void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm,
Register temp) {
- __ pop(temp);
+ __ PopReturnAddressTo(temp);
__ pop(rdx);
__ pop(rax);
- __ push(temp);
+ __ PushReturnAddressFrom(temp);
}
@@ -4843,7 +4644,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
__ j(below, &done);
// Check the number to string cache.
- Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
NumberToStringStub::GenerateLookupNumberStringCache(masm,
@@ -4851,22 +4651,9 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch1,
scratch2,
scratch3,
- &not_cached);
+ slow);
__ movq(arg, scratch1);
__ movq(Operand(rsp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(zero, slow);
- __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ movq(Operand(rsp, stack_offset), arg);
-
__ bind(&done);
}
@@ -5040,7 +4827,7 @@ void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
if (FLAG_debug_code) {
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ cmpq(kScratchRegister, candidate);
- __ Assert(equal, "oddball in string table is not undefined or the hole");
+ __ Assert(equal, kOddballInStringTableIsNotUndefinedOrTheHole);
}
__ jmp(&next_probe[i]);
@@ -5509,8 +5296,9 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// rsp[8] : right string
// rsp[16] : left string
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rdx, args.GetArgumentOperand(0)); // left
+ __ movq(rax, args.GetArgumentOperand(1)); // right
// Check for identity.
Label not_same;
@@ -5529,9 +5317,9 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Inline comparison of ASCII strings.
__ IncrementCounter(counters->string_compare_native(), 1);
// Drop arguments from the stack
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
__ addq(rsp, Immediate(2 * kPointerSize));
- __ push(rcx);
+ __ PushReturnAddressFrom(rcx);
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
@@ -5800,10 +5588,10 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
- __ pop(tmp1); // Return address.
+ __ PopReturnAddressTo(tmp1);
__ push(left);
__ push(right);
- __ push(tmp1);
+ __ PushReturnAddressFrom(tmp1);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
@@ -6023,9 +5811,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
// (their names are the null value).
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER,
+ kPointerSize);
for (int i = kInlinedProbes; i < kTotalProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movq(scratch, Operand(rsp, 2 * kPointerSize));
+ __ movq(scratch, args.GetArgumentOperand(1));
if (i > 0) {
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
@@ -6045,7 +5835,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ j(equal, &not_in_dictionary);
// Stop if found the property.
- __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
+ __ cmpq(scratch, args.GetArgumentOperand(0));
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
@@ -6095,8 +5885,6 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
{ REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField and
// KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
{ REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
@@ -6133,7 +5921,7 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
#undef REG
-bool RecordWriteStub::IsPregenerated() {
+bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -6397,8 +6185,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
// Get array literal index, array literal and its map.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rdx, args.GetArgumentOperand(1));
+ __ movq(rbx, args.GetArgumentOperand(0));
__ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
__ CheckFastElements(rdi, &double_elements);
@@ -6411,16 +6200,14 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// the runtime.
__ bind(&slow_elements);
- __ pop(rdi); // Pop return address and remember to put back later for tail
- // call.
+ __ PopReturnAddressTo(rdi);
__ push(rbx);
__ push(rcx);
__ push(rax);
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ push(rdx);
- __ push(rdi); // Return return address so that tail call returns to right
- // place.
+ __ PushReturnAddressFrom(rdi);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
@@ -6467,7 +6254,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
__ movq(rbx, MemOperand(rbp, parameter_count_offset));
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
? kPointerSize
: 0;
@@ -6525,96 +6312,133 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
template<class T>
-static void CreateArrayDispatch(MacroAssembler* masm) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmpl(rdx, Immediate(kind));
- __ j(not_equal, &next);
- T stub(kind);
+static void CreateArrayDispatch(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ T stub(GetInitialFastElementsKind(),
+ CONTEXT_CHECK_REQUIRED,
+ mode);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmpl(rdx, Immediate(kind));
+ __ j(not_equal, &next);
+ T stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
- // If we reached this point there is a problem.
- __ Abort("Unexpected ElementsKind in array constructor");
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
-static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
- // rbx - type info cell
- // rdx - kind
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ // rbx - type info cell (if mode != DISABLE_ALLOCATION_SITES)
+ // rdx - kind (if mode != DISABLE_ALLOCATION_SITES)
// rax - number of arguments
// rdi - constructor?
// rsp[0] - return address
// rsp[8] - last argument
- ASSERT(FAST_SMI_ELEMENTS == 0);
- ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ASSERT(FAST_ELEMENTS == 2);
- ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ASSERT(FAST_DOUBLE_ELEMENTS == 4);
- ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
-
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
- // is the low bit set? If so, we are holey and that is good.
- __ testb(rdx, Immediate(1));
Label normal_sequence;
- __ j(not_zero, &normal_sequence);
+ if (mode == DONT_OVERRIDE) {
+ ASSERT(FAST_SMI_ELEMENTS == 0);
+ ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ ASSERT(FAST_ELEMENTS == 2);
+ ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+ // is the low bit set? If so, we are holey and that is good.
+ __ testb(rdx, Immediate(1));
+ __ j(not_zero, &normal_sequence);
+ }
// look at the first argument
- __ movq(rcx, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(0));
__ testq(rcx, rcx);
__ j(zero, &normal_sequence);
- // We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
- __ incl(rdx);
- __ Cmp(rbx, undefined_sentinel);
- __ j(equal, &normal_sequence);
- __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ j(not_equal, &normal_sequence);
+ if (mode == DISABLE_ALLOCATION_SITES) {
+ ElementsKind initial = GetInitialFastElementsKind();
+ ElementsKind holey_initial = GetHoleyElementsKind(initial);
- // Save the resulting elements kind in type info
- __ Integer32ToSmi(rdx, rdx);
- __ movq(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset), rdx);
- __ SmiToInteger32(rdx, rdx);
+ ArraySingleArgumentConstructorStub stub_holey(holey_initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
+ __ TailCallStub(&stub_holey);
- __ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
- for (int i = 0; i <= last_index; ++i) {
- Label next;
- ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
- __ cmpl(rdx, Immediate(kind));
- __ j(not_equal, &next);
- ArraySingleArgumentConstructorStub stub(kind);
+ __ bind(&normal_sequence);
+ ArraySingleArgumentConstructorStub stub(initial,
+ CONTEXT_CHECK_REQUIRED,
+ DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
- __ bind(&next);
- }
+ } else if (mode == DONT_OVERRIDE) {
+ // We are going to create a holey array, but our kind is non-holey.
+ // Fix kind and retry (only if we have an allocation site in the cell).
+ __ incl(rdx);
+ __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ Assert(equal, kExpectedAllocationSiteInCell);
+ }
- // If we reached this point there is a problem.
- __ Abort("Unexpected ElementsKind in array constructor");
+ // Save the resulting elements kind in type info
+ __ Integer32ToSmi(rdx, rdx);
+ __ movq(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset), rdx);
+ __ SmiToInteger32(rdx, rdx);
+
+ __ bind(&normal_sequence);
+ int last_index = GetSequenceIndexFromFastElementsKind(
+ TERMINAL_FAST_ELEMENTS_KIND);
+ for (int i = 0; i <= last_index; ++i) {
+ Label next;
+ ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+ __ cmpl(rdx, Immediate(kind));
+ __ j(not_equal, &next);
+ ArraySingleArgumentConstructorStub stub(kind);
+ __ TailCallStub(&stub);
+ __ bind(&next);
+ }
+
+ // If we reached this point there is a problem.
+ __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ } else {
+ UNREACHABLE();
+ }
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+ ElementsKind initial_kind = GetInitialFastElementsKind();
+ ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
+
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
+ (!FLAG_track_allocation_sites &&
+ (kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -6647,6 +6471,34 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
}
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+ MacroAssembler* masm,
+ AllocationSiteOverrideMode mode) {
+ if (argument_count_ == ANY) {
+ Label not_zero_case, not_one_case;
+ __ testq(rax, rax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmpl(rax, Immediate(1));
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else if (argument_count_ == NONE) {
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ } else if (argument_count_ == ONE) {
+ CreateArrayDispatchOneArgument(masm, mode);
+ } else if (argument_count_ == MORE_THAN_ONE) {
+ CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@@ -6668,9 +6520,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
- __ Check(not_smi, "Unexpected initial map for Array function");
+ __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(rcx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
+ __ Check(equal, kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in rbx or a valid cell
Label okay_here;
@@ -6678,54 +6530,26 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &okay_here);
__ Cmp(FieldOperand(rbx, 0), cell_map);
- __ Assert(equal, "Expected property cell in register rbx");
+ __ Assert(equal, kExpectedPropertyCellInRegisterRbx);
__ bind(&okay_here);
}
- Label no_info, switch_ready;
- // Get the elements kind and case on that.
+ Label no_info;
+ // If the type cell is undefined, or contains anything other than an
+ // AllocationSite, call an array constructor that doesn't use AllocationSites.
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &no_info);
__ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
-
- // The type cell may have undefined in its value.
- __ Cmp(rdx, undefined_sentinel);
- __ j(equal, &no_info);
-
- // The type cell has either an AllocationSite or a JSFunction
__ Cmp(FieldOperand(rdx, 0),
Handle<Map>(masm->isolate()->heap()->allocation_site_map()));
__ j(not_equal, &no_info);
__ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
- __ jmp(&switch_ready);
- __ bind(&no_info);
- __ movq(rdx, Immediate(GetInitialFastElementsKind()));
- __ bind(&switch_ready);
-
- if (argument_count_ == ANY) {
- Label not_zero_case, not_one_case;
- __ testq(rax, rax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
-
- __ bind(&not_zero_case);
- __ cmpl(rax, Immediate(1));
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm);
+ GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
- __ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else if (argument_count_ == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
- } else if (argument_count_ == ONE) {
- CreateArrayDispatchOneArgument(masm);
- } else if (argument_count_ == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
- } else {
- UNREACHABLE();
- }
+ __ bind(&no_info);
+ GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
}
@@ -6746,7 +6570,8 @@ void InternalArrayConstructorStub::GenerateCase(
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument
- __ movq(rcx, Operand(rsp, kPointerSize));
+ StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movq(rcx, args.GetArgumentOperand(0));
__ testq(rcx, rcx);
__ j(zero, &normal_sequence);
@@ -6783,9 +6608,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
- __ Check(not_smi, "Unexpected initial map for Array function");
+ __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
__ CmpObjectType(rcx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
+ __ Check(equal, kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
@@ -6804,7 +6629,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ j(equal, &done);
__ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
__ Assert(equal,
- "Invalid ElementsKind for InternalArray or InternalPackedArray");
+ kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
diff --git a/chromium/v8/src/x64/code-stubs-x64.h b/chromium/v8/src/x64/code-stubs-x64.h
index e430bf2c805..41678ecd20e 100644
--- a/chromium/v8/src/x64/code-stubs-x64.h
+++ b/chromium/v8/src/x64/code-stubs-x64.h
@@ -69,7 +69,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated() { return true; }
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -321,7 +321,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE;
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
diff --git a/chromium/v8/src/x64/codegen-x64.cc b/chromium/v8/src/x64/codegen-x64.cc
index a823bf2e6d0..24773c2595d 100644
--- a/chromium/v8/src/x64/codegen-x64.cc
+++ b/chromium/v8/src/x64/codegen-x64.cc
@@ -394,7 +394,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
if (FLAG_debug_code) {
__ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ Assert(equal, "object found in smi-only array");
+ __ Assert(equal, kObjectFoundInSmiOnlyArray);
}
__ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
@@ -577,7 +577,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ testb(result, Immediate(kIsIndirectStringMask));
- __ Assert(zero, "external string expected, but not found");
+ __ Assert(zero, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
@@ -744,6 +744,28 @@ void Code::PatchPlatformCodeAge(byte* sequence,
}
+Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
+ ASSERT(index >= 0);
+ ASSERT(base_reg_.is(rsp) || base_reg_.is(rbp));
+ int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
+ int displacement_to_last_argument = base_reg_.is(rsp) ?
+ kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
+ displacement_to_last_argument += extra_displacement_to_last_argument_;
+ if (argument_count_reg_.is(no_reg)) {
+ // argument[0] is at base_reg_ + displacement_to_last_argument +
+ // (argument_count_immediate_ + receiver - 1) * kPointerSize.
+ ASSERT(argument_count_immediate_ + receiver > 0);
+ return Operand(base_reg_, displacement_to_last_argument +
+ (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
+ } else {
+ // argument[0] is at base_reg_ + displacement_to_last_argument +
+ // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
+ return Operand(base_reg_, argument_count_reg_, times_pointer_size,
+ displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/chromium/v8/src/x64/codegen-x64.h b/chromium/v8/src/x64/codegen-x64.h
index 5747e0bc6f0..7d1f59ad5ff 100644
--- a/chromium/v8/src/x64/codegen-x64.h
+++ b/chromium/v8/src/x64/codegen-x64.h
@@ -44,8 +44,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
- CodeGenerator() {
- InitializeAstVisitor();
+ explicit CodeGenerator(Isolate* isolate) {
+ InitializeAstVisitor(isolate);
}
static bool MakeCode(CompilationInfo* info);
@@ -61,7 +61,7 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
- static bool ShouldGenerateLog(Expression* type);
+ static bool ShouldGenerateLog(Isolate* isolate, Expression* type);
static bool RecordPositions(MacroAssembler* masm,
int pos,
@@ -103,6 +103,73 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
+
+enum StackArgumentsAccessorReceiverMode {
+ ARGUMENTS_CONTAIN_RECEIVER,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER
+};
+
+
+class StackArgumentsAccessor BASE_EMBEDDED {
+ public:
+ StackArgumentsAccessor(
+ Register base_reg,
+ int argument_count_immediate,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(no_reg),
+ argument_count_immediate_(argument_count_immediate),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) { }
+
+ StackArgumentsAccessor(
+ Register base_reg,
+ Register argument_count_reg,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(argument_count_reg),
+ argument_count_immediate_(0),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) { }
+
+ StackArgumentsAccessor(
+ Register base_reg,
+ const ParameterCount& parameter_count,
+ StackArgumentsAccessorReceiverMode receiver_mode =
+ ARGUMENTS_CONTAIN_RECEIVER,
+ int extra_displacement_to_last_argument = 0)
+ : base_reg_(base_reg),
+ argument_count_reg_(parameter_count.is_reg() ?
+ parameter_count.reg() : no_reg),
+ argument_count_immediate_(parameter_count.is_immediate() ?
+ parameter_count.immediate() : 0),
+ receiver_mode_(receiver_mode),
+ extra_displacement_to_last_argument_(
+ extra_displacement_to_last_argument) { }
+
+ Operand GetArgumentOperand(int index);
+ Operand GetReceiverOperand() {
+ ASSERT(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER);
+ return GetArgumentOperand(0);;
+ }
+
+ private:
+ const Register base_reg_;
+ const Register argument_count_reg_;
+ const int argument_count_immediate_;
+ const StackArgumentsAccessorReceiverMode receiver_mode_;
+ const int extra_displacement_to_last_argument_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
+};
+
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/chromium/v8/src/x64/cpu-x64.cc b/chromium/v8/src/x64/cpu-x64.cc
index 96c53308326..4fa290a8b5f 100644
--- a/chromium/v8/src/x64/cpu-x64.cc
+++ b/chromium/v8/src/x64/cpu-x64.cc
@@ -72,18 +72,6 @@ void CPU::FlushICache(void* start, size_t size) {
#endif
}
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#else
- asm("int $3");
-#endif
-}
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/chromium/v8/src/x64/debug-x64.cc b/chromium/v8/src/x64/debug-x64.cc
index a337b0d052f..6612242a037 100644
--- a/chromium/v8/src/x64/debug-x64.cc
+++ b/chromium/v8/src/x64/debug-x64.cc
@@ -48,11 +48,10 @@ bool BreakLocationIterator::IsDebugBreakAtReturn() {
// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x64.cc
// for the precise return instructions sequence.
void BreakLocationIterator::SetDebugBreakAtReturn() {
- ASSERT(Assembler::kJSReturnSequenceLength >=
- Assembler::kCallInstructionLength);
+ ASSERT(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength);
rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_return()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
+ debug_info_->GetIsolate()->debug()->debug_break_return()->entry(),
+ Assembler::kJSReturnSequenceLength - Assembler::kCallSequenceLength);
}
@@ -81,8 +80,8 @@ bool BreakLocationIterator::IsDebugBreakAtSlot() {
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_slot()->entry(),
- Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
+ debug_info_->GetIsolate()->debug()->debug_break_slot()->entry(),
+ Assembler::kDebugBreakSlotLength - Assembler::kCallSequenceLength);
}
@@ -124,14 +123,8 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if ((object_regs & (1 << r)) != 0) {
__ push(reg);
}
- // Store the 64-bit value as two smis.
if ((non_object_regs & (1 << r)) != 0) {
- __ movq(kScratchRegister, reg);
- __ Integer32ToSmi(reg, reg);
- __ push(reg);
- __ sar(kScratchRegister, Immediate(32));
- __ Integer32ToSmi(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
+ __ PushInt64AsTwoSmis(reg);
}
}
@@ -156,12 +149,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
// Reconstruct the 64-bit value from two smis.
if ((non_object_regs & (1 << r)) != 0) {
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ shl(kScratchRegister, Immediate(32));
- __ pop(reg);
- __ SmiToInteger32(reg, reg);
- __ or_(reg, kScratchRegister);
+ __ PopInt64AsTwoSmis(reg);
}
}
diff --git a/chromium/v8/src/x64/deoptimizer-x64.cc b/chromium/v8/src/x64/deoptimizer-x64.cc
index b45e9663e2a..303b756cacd 100644
--- a/chromium/v8/src/x64/deoptimizer-x64.cc
+++ b/chromium/v8/src/x64/deoptimizer-x64.cc
@@ -42,7 +42,7 @@ const int Deoptimizer::table_entry_size_ = 10;
int Deoptimizer::patch_size() {
- return Assembler::kCallInstructionLength;
+ return Assembler::kCallSequenceLength;
}
@@ -69,7 +69,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address call_address = instruction_start + deopt_data->Pc(i)->value();
// There is room enough to write a long call instruction because we pad
// LLazyBailout instructions with nops if necessary.
- CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
+ CodePatcher patcher(call_address, Assembler::kCallSequenceLength);
patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY),
RelocInfo::NONE64);
ASSERT(prev_call_address == NULL ||
@@ -105,12 +105,7 @@ static const byte kNopByteTwo = 0x90;
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
@@ -126,12 +121,7 @@ void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
@@ -146,195 +136,33 @@ void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT(replacement_code->entry() ==
- Assembler::target_address_at(call_target_address));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- return true;
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT_EQ(osr_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ return PATCHED_FOR_OSR;
} else {
- ASSERT_EQ(interrupt_code->entry(),
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+ ASSERT_EQ(interrupt_builtin->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] "
- "(fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
- output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- intptr_t pc = reinterpret_cast<intptr_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- PrintFunctionName();
- PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
- }
-}
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -531,9 +359,7 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(rbx, FrameDescription::state_offset()));
- }
+ __ push(Operand(rbx, FrameDescription::state_offset()));
__ push(Operand(rbx, FrameDescription::pc_offset()));
__ push(Operand(rbx, FrameDescription::continuation_offset()));
diff --git a/chromium/v8/src/x64/disasm-x64.cc b/chromium/v8/src/x64/disasm-x64.cc
index eefa70372ec..9984a46307d 100644
--- a/chromium/v8/src/x64/disasm-x64.cc
+++ b/chromium/v8/src/x64/disasm-x64.cc
@@ -332,10 +332,10 @@ class DisassemblerX64 {
private:
enum OperandSize {
- BYTE_SIZE = 0,
- WORD_SIZE = 1,
- DOUBLEWORD_SIZE = 2,
- QUADWORD_SIZE = 3
+ OPERAND_BYTE_SIZE = 0,
+ OPERAND_WORD_SIZE = 1,
+ OPERAND_DOUBLEWORD_SIZE = 2,
+ OPERAND_QUADWORD_SIZE = 3
};
const NameConverter& converter_;
@@ -369,10 +369,10 @@ class DisassemblerX64 {
bool rex_w() { return (rex_ & 0x08) != 0; }
OperandSize operand_size() {
- if (byte_size_operand_) return BYTE_SIZE;
- if (rex_w()) return QUADWORD_SIZE;
- if (operand_size_ != 0) return WORD_SIZE;
- return DOUBLEWORD_SIZE;
+ if (byte_size_operand_) return OPERAND_BYTE_SIZE;
+ if (rex_w()) return OPERAND_QUADWORD_SIZE;
+ if (operand_size_ != 0) return OPERAND_WORD_SIZE;
+ return OPERAND_DOUBLEWORD_SIZE;
}
char operand_size_code() {
@@ -562,19 +562,19 @@ int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
int64_t value;
int count;
switch (size) {
- case BYTE_SIZE:
+ case OPERAND_BYTE_SIZE:
value = *data;
count = 1;
break;
- case WORD_SIZE:
+ case OPERAND_WORD_SIZE:
value = *reinterpret_cast<int16_t*>(data);
count = 2;
break;
- case DOUBLEWORD_SIZE:
+ case OPERAND_DOUBLEWORD_SIZE:
value = *reinterpret_cast<uint32_t*>(data);
count = 4;
break;
- case QUADWORD_SIZE:
+ case OPERAND_QUADWORD_SIZE:
value = *reinterpret_cast<int32_t*>(data);
count = 4;
break;
@@ -682,7 +682,8 @@ int DisassemblerX64::PrintImmediateOp(byte* data) {
AppendToBuffer("%s%c ", mnem, operand_size_code());
int count = PrintRightOperand(data + 1);
AppendToBuffer(",0x");
- OperandSize immediate_size = byte_size_immediate ? BYTE_SIZE : operand_size();
+ OperandSize immediate_size =
+ byte_size_immediate ? OPERAND_BYTE_SIZE : operand_size();
count += PrintImmediate(data + 1 + count, immediate_size);
return 1 + count;
}
@@ -1153,6 +1154,25 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0xC2) {
+ // Intel manual 2A, Table 3-18.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ const char* const pseudo_op[] = {
+ "cmpeqsd",
+ "cmpltsd",
+ "cmplesd",
+ "cmpunordsd",
+ "cmpneqsd",
+ "cmpnltsd",
+ "cmpnlesd",
+ "cmpordsd"
+ };
+ AppendToBuffer("%s %s,%s",
+ pseudo_op[current[1]],
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ current += 2;
} else {
UnimplementedInstruction();
}
@@ -1229,8 +1249,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else if (opcode == 0xA2 || opcode == 0x31) {
- // RDTSC or CPUID
+ } else if (opcode == 0xA2) {
+ // CPUID
AppendToBuffer("%s", mnemonic);
} else if ((opcode & 0xF0) == 0x40) {
@@ -1294,14 +1314,14 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "nop";
case 0x2A: // F2/F3 prefix.
return "cvtsi2s";
- case 0x31:
- return "rdtsc";
case 0x51: // F2 prefix.
return "sqrtsd";
case 0x58: // F2 prefix.
return "addsd";
case 0x59: // F2 prefix.
return "mulsd";
+ case 0x5A: // F2 prefix.
+ return "cvtsd2ss";
case 0x5C: // F2 prefix.
return "subsd";
case 0x5E: // F2 prefix.
@@ -1398,15 +1418,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case MOVE_REG_INSTR: {
byte* addr = NULL;
switch (operand_size()) {
- case WORD_SIZE:
+ case OPERAND_WORD_SIZE:
addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
data += 3;
break;
- case DOUBLEWORD_SIZE:
+ case OPERAND_DOUBLEWORD_SIZE:
addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
data += 5;
break;
- case QUADWORD_SIZE:
+ case OPERAND_QUADWORD_SIZE:
addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
data += 9;
break;
@@ -1611,11 +1631,11 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("mov%c %s, ",
operand_size_code(),
NameOfCPURegister(reg));
- data += PrintImmediate(data, DOUBLEWORD_SIZE);
+ data += PrintImmediate(data, OPERAND_DOUBLEWORD_SIZE);
} else {
AppendToBuffer("movb %s, ",
NameOfByteCPURegister(reg));
- data += PrintImmediate(data, BYTE_SIZE);
+ data += PrintImmediate(data, OPERAND_BYTE_SIZE);
}
break;
}
@@ -1644,7 +1664,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0xA1: // Fall through.
case 0xA3:
switch (operand_size()) {
- case DOUBLEWORD_SIZE: {
+ case OPERAND_DOUBLEWORD_SIZE: {
const char* memory_location = NameOfAddress(
reinterpret_cast<byte*>(
*reinterpret_cast<int32_t*>(data + 1)));
@@ -1656,7 +1676,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 5;
break;
}
- case QUADWORD_SIZE: {
+ case OPERAND_QUADWORD_SIZE: {
// New x64 instruction mov rax,(imm_64).
const char* memory_location = NameOfAddress(
*reinterpret_cast<byte**>(data + 1));
@@ -1682,15 +1702,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0xA9: {
int64_t value = 0;
switch (operand_size()) {
- case WORD_SIZE:
+ case OPERAND_WORD_SIZE:
value = *reinterpret_cast<uint16_t*>(data + 1);
data += 3;
break;
- case DOUBLEWORD_SIZE:
+ case OPERAND_DOUBLEWORD_SIZE:
value = *reinterpret_cast<uint32_t*>(data + 1);
data += 5;
break;
- case QUADWORD_SIZE:
+ case OPERAND_QUADWORD_SIZE:
value = *reinterpret_cast<int32_t*>(data + 1);
data += 5;
break;
diff --git a/chromium/v8/src/x64/full-codegen-x64.cc b/chromium/v8/src/x64/full-codegen-x64.cc
index bac4e793b27..c24512ecae3 100644
--- a/chromium/v8/src/x64/full-codegen-x64.cc
+++ b/chromium/v8/src/x64/full-codegen-x64.cc
@@ -280,8 +280,7 @@ void FullCodeGenerator::Generate() {
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -341,8 +340,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
@@ -388,8 +386,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(rax);
EmitProfilingCounterReset();
@@ -753,9 +751,9 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// Check that we're not inside a with or catch context.
__ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
__ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
- __ Check(not_equal, "Declaration in with context.");
+ __ Check(not_equal, kDeclarationInWithContext);
__ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
- __ Check(not_equal, "Declaration in catch context.");
+ __ Check(not_equal, kDeclarationInCatchContext);
}
}
@@ -1292,7 +1290,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
- __ Push(info);
+ __ Move(rbx, info);
__ CallStub(&stub);
} else {
__ push(rsi);
@@ -2192,7 +2190,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Push(Smi::FromInt(resume_mode));
__ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
- __ Abort("Generator failed to resume.");
+ __ Abort(kGeneratorFailedToResume);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
@@ -2456,7 +2454,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Check for an uninitialized let binding.
__ movq(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ Check(equal, "Let binding re-initialization.");
+ __ Check(equal, kLetBindingReInitialization);
}
// Perform the assignment.
__ movq(location, rax);
@@ -2937,7 +2935,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -2951,7 +2949,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(not_zero, if_true);
+ __ j(not_zero, &skip_lookup);
// Check for fast case object. Generate false result for slow case object.
__ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
@@ -2969,7 +2967,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmpq(rcx, Immediate(0));
__ j(equal, &done);
- __ LoadInstanceDescriptors(rbx, rbx);
+ __ LoadInstanceDescriptors(rbx, r8);
// rbx: descriptor array.
// rcx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
@@ -2977,24 +2975,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
__ lea(rcx,
Operand(
- rbx, index.reg, index.scale, DescriptorArray::kFirstOffset));
+ r8, index.reg, index.scale, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
- __ addq(rbx, Immediate(DescriptorArray::kFirstOffset));
+ __ addq(r8, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// internalized string "valueOf" the result is false.
__ jmp(&entry);
__ bind(&loop);
- __ movq(rdx, FieldOperand(rbx, 0));
+ __ movq(rdx, FieldOperand(r8, 0));
__ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
- __ addq(rbx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
- __ cmpq(rbx, rcx);
+ __ cmpq(r8, rcx);
__ j(not_equal, &loop);
__ bind(&done);
- // Reload map as register rbx was used as temporary above.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+
+ // Set the bit in the map to indicate that there is no local valueOf field.
+ __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+
+ __ bind(&skip_lookup);
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
@@ -3006,14 +3008,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
__ cmpq(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
context()->Plug(if_true, if_false);
}
@@ -3249,7 +3246,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
@@ -3398,14 +3395,14 @@ void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
Register index,
Register value,
uint32_t encoding_mask) {
- __ Check(masm()->CheckSmi(index), "Non-smi index");
- __ Check(masm()->CheckSmi(value), "Non-smi value");
+ __ Check(masm()->CheckSmi(index), kNonSmiIndex);
+ __ Check(masm()->CheckSmi(value), kNonSmiValue);
__ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, "Index is too large");
+ __ Check(less, kIndexIsTooLarge);
__ SmiCompare(index, Smi::FromInt(0));
- __ Check(greater_equal, "Index is negative");
+ __ Check(greater_equal, kIndexIsNegative);
__ push(value);
__ movq(value, FieldOperand(string, HeapObject::kMapOffset));
@@ -3413,7 +3410,7 @@ void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
__ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
__ cmpq(value, Immediate(encoding_mask));
- __ Check(equal, "Unexpected string type");
+ __ Check(equal, kUnexpectedStringType);
__ pop(value);
}
@@ -3777,7 +3774,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
+ __ Abort(kAttemptToUseUndefinedCache);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
context()->Plug(rax);
return;
@@ -3971,7 +3968,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// scratch, string_length(int32), elements(FixedArray*).
if (generate_debug_code_) {
__ cmpq(index, array_length);
- __ Assert(below, "No empty arrays here in EmitFastAsciiArrayJoin");
+ __ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
__ movq(string, FieldOperand(elements,
@@ -4335,35 +4332,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
default:
UNREACHABLE();
}
}
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- // TODO(svenpanne): Allowing format strings in Comment would be nice here...
- Comment cmt(masm_, comment);
- UnaryOpStub stub(expr->op());
- // UnaryOpStub expects the argument to be in the
- // accumulator register rax.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
@@ -4819,7 +4793,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Cook return address on top of stack (smi encoded Code* delta)
- __ pop(rdx);
+ __ PopReturnAddressTo(rdx);
__ Move(rcx, masm_->CodeObject());
__ subq(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
diff --git a/chromium/v8/src/x64/ic-x64.cc b/chromium/v8/src/x64/ic-x64.cc
index 6e238c76ece..4a7c68a53ca 100644
--- a/chromium/v8/src/x64/ic-x64.cc
+++ b/chromium/v8/src/x64/ic-x64.cc
@@ -570,10 +570,10 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ j(not_zero, &slow);
// Everything is fine, call runtime.
- __ pop(rcx);
+ __ PopReturnAddressTo(rcx);
__ push(rdx); // receiver
__ push(rax); // key
- __ push(rcx); // return address
+ __ PushReturnAddressFrom(rcx);
// Perform tail call to the entry.
__ TailCallExternalReference(
@@ -822,8 +822,8 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
extra_state,
Code::NORMAL,
argc);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- rax);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rdx, rcx, rbx, rax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -859,8 +859,8 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rdx, rcx, rbx, no_reg);
__ bind(&miss);
}
@@ -904,8 +904,8 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
// -----------------------------------
Label miss;
- // Get the receiver of the function from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
GenerateNameDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
@@ -940,8 +940,8 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
__ IncrementCounter(counters->keyed_call_miss(), 1);
}
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Enter an internal frame.
{
@@ -965,7 +965,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
// This can happen only for regular CallIC but not KeyedCallIC.
if (id == IC::kCallIC_Miss) {
Label invoke, global;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
+ __ movq(rdx, args.GetReceiverOperand());
__ JumpIfSmi(rdx, &invoke);
__ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
__ j(equal, &global);
@@ -975,7 +975,7 @@ void CallICBase::GenerateMiss(MacroAssembler* masm,
// Patch the receiver on the stack.
__ bind(&global);
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
__ bind(&invoke);
}
@@ -1005,8 +1005,8 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
GenerateMiss(masm, argc, extra_ic_state);
}
@@ -1023,8 +1023,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
Label do_call, slow_call, slow_load;
Label check_number_dictionary, check_name, lookup_monomorphic_cache;
@@ -1302,7 +1302,8 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
Label slow, notin;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
__ movq(rdi, mapped_location);
@@ -1331,7 +1332,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(
+ masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rax, rcx, rbx, rdx);
GenerateMiss(masm);
@@ -1369,10 +1370,10 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->load_miss(), 1);
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rax); // receiver
__ push(rcx); // name
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
ExternalReference ref =
@@ -1388,10 +1389,10 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rax); // receiver
__ push(rcx); // name
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
@@ -1408,10 +1409,10 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->keyed_load_miss(), 1);
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rax); // name
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
@@ -1429,10 +1430,10 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rax); // name
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
@@ -1452,8 +1453,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, flags, rdx, rcx, rbx, no_reg);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1468,11 +1469,11 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rcx); // name
__ push(rax); // value
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
ExternalReference ref =
@@ -1512,13 +1513,13 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx);
__ push(rcx);
__ push(rax);
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode));
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
@@ -1534,13 +1535,13 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rcx); // key
__ push(rax); // value
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode)); // Strict mode.
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
@@ -1555,11 +1556,11 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rcx); // key
__ push(rax); // value
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
@@ -1575,11 +1576,11 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rcx); // key
__ push(rax); // value
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
@@ -1595,11 +1596,11 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// -- rsp[0] : return address
// -----------------------------------
- __ pop(rbx);
+ __ PopReturnAddressTo(rbx);
__ push(rdx); // receiver
__ push(rcx); // key
__ push(rax); // value
- __ push(rbx); // return address
+ __ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
diff --git a/chromium/v8/src/x64/lithium-codegen-x64.cc b/chromium/v8/src/x64/lithium-codegen-x64.cc
index c2207317c18..483d537568b 100644
--- a/chromium/v8/src/x64/lithium-codegen-x64.cc
+++ b/chromium/v8/src/x64/lithium-codegen-x64.cc
@@ -32,6 +32,7 @@
#include "x64/lithium-codegen-x64.h"
#include "code-stubs.h"
#include "stub-cache.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -39,7 +40,7 @@ namespace internal {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public CallWrapper {
+class SafepointGenerator V8_FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -47,13 +48,13 @@ class SafepointGenerator : public CallWrapper {
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
+ virtual ~SafepointGenerator() {}
- virtual void BeforeCall(int call_size) const {
+ virtual void BeforeCall(int call_size) const V8_OVERRIDE {
codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
}
- virtual void AfterCall() const {
+ virtual void AfterCall() const V8_OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@@ -96,7 +97,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LChunkBuilder::Abort(const char* reason) {
+void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -257,6 +258,21 @@ bool LCodeGen::GeneratePrologue() {
}
+void LCodeGen::GenerateOsrPrologue() {
+ // Generate the OSR entry prologue at the first unknown OSR value, or if there
+ // are none, at the OSR entrypoint instruction.
+ if (osr_pc_offset_ >= 0) return;
+
+ osr_pc_offset_ = masm()->pc_offset();
+
+ // Adjust the frame size, subsuming the unoptimized frame into the
+ // optimized frame.
+ int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+ ASSERT(slots >= 0);
+ __ subq(rsp, Immediate(slots * kPointerSize));
+}
+
+
bool LCodeGen::GenerateBody() {
ASSERT(is_generating());
bool emit_instructions = true;
@@ -278,6 +294,8 @@ bool LCodeGen::GenerateBody() {
instr->Mnemonic());
}
+ RecordAndUpdatePosition(instr->position());
+
instr->CompileToNative(this);
}
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
@@ -331,6 +349,10 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
+
+ int pos = instructions_->at(code->instruction_index())->position();
+ RecordAndUpdatePosition(pos);
+
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
code->instruction_index(),
@@ -351,6 +373,7 @@ bool LCodeGen::GenerateDeferredCode() {
}
code->Generate();
if (NeedsDeferredFrame()) {
+ __ bind(code->done());
Comment(";;; Destroy frame");
ASSERT(frame_is_built_);
frame_is_built_ = false;
@@ -444,7 +467,7 @@ ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
- return constant->handle();
+ return constant->handle(isolate());
}
@@ -497,37 +520,57 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
break;
}
+ int object_index = 0;
+ int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
-
- // TODO(mstarzinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- if (value == NULL) {
- int arguments_count = environment->values()->length() - translation_size;
- translation->BeginArgumentsObject(arguments_count);
- for (int i = 0; i < arguments_count; ++i) {
- LOperand* value = environment->values()->at(translation_size + i);
- AddToTranslation(translation,
- value,
- environment->HasTaggedValueAt(translation_size + i),
- environment->HasUint32ValueAt(translation_size + i));
- }
- continue;
- }
-
- AddToTranslation(translation,
+ AddToTranslation(environment,
+ translation,
value,
environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i));
+ environment->HasUint32ValueAt(i),
+ &object_index,
+ &dematerialized_index);
}
}
-void LCodeGen::AddToTranslation(Translation* translation,
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32) {
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer) {
+ if (op == LEnvironment::materialization_marker()) {
+ int object_index = (*object_index_pointer)++;
+ if (environment->ObjectIsDuplicateAt(object_index)) {
+ int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+ translation->DuplicateObject(dupe_of);
+ return;
+ }
+ int object_length = environment->ObjectLengthAt(object_index);
+ if (environment->ObjectIsArgumentsAt(object_index)) {
+ translation->BeginArgumentsObject(object_length);
+ } else {
+ translation->BeginCapturedObject(object_length);
+ }
+ int dematerialized_index = *dematerialized_index_pointer;
+ int env_offset = environment->translation_size() + dematerialized_index;
+ *dematerialized_index_pointer += object_length;
+ for (int i = 0; i < object_length; ++i) {
+ LOperand* value = environment->values()->at(env_offset + i);
+ AddToTranslation(environment,
+ translation,
+ value,
+ environment->HasTaggedValueAt(env_offset + i),
+ environment->HasUint32ValueAt(env_offset + i),
+ object_index_pointer,
+ dematerialized_index_pointer);
+ }
+ return;
+ }
+
if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@@ -556,7 +599,7 @@ void LCodeGen::AddToTranslation(Translation* translation,
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
+ int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@@ -661,13 +704,13 @@ void LCodeGen::DeoptimizeIf(Condition cc,
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
- Abort("bailout was not prepared");
+ Abort(kBailoutWasNotPrepared);
return;
}
ASSERT(FLAG_deopt_every_n_times == 0); // Not yet implemented on x64.
- if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
+ if (info()->ShouldTrapOnDeopt()) {
Label done;
if (cc != no_condition) {
__ j(NegateCondition(cc), &done, Label::kNear);
@@ -859,6 +902,14 @@ void LCodeGen::RecordPosition(int position) {
}
+void LCodeGen::RecordAndUpdatePosition(int position) {
+ if (position >= 0 && position != old_position_) {
+ masm()->positions_recorder()->RecordPosition(position);
+ old_position_ = position;
+ }
+}
+
+
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -945,8 +996,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Record the address of the first unknown OSR value as the place to enter.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
@@ -1182,6 +1232,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmpl(dividend, Immediate(0));
__ j(less, &negative, Label::kNear);
__ sarl(dividend, Immediate(power));
+ if (divisor < 0) __ negl(dividend);
__ jmp(&done, Label::kNear);
__ bind(&negative);
@@ -1264,13 +1315,17 @@ void LCodeGen::DoMulI(LMulI* instr) {
LOperand* right = instr->right();
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ movl(kScratchRegister, left);
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ movq(kScratchRegister, left);
+ } else {
+ __ movl(kScratchRegister, left);
+ }
}
bool can_overflow =
instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (right->IsConstantOperand()) {
- int right_value = ToInteger32(LConstantOperand::cast(right));
+ int32_t right_value = ToInteger32(LConstantOperand::cast(right));
if (right_value == -1) {
__ negl(left);
} else if (right_value == 0) {
@@ -1312,14 +1367,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ SmiToInteger32(left, left);
+ __ SmiToInteger64(left, left);
__ imul(left, ToOperand(right));
} else {
__ imull(left, ToOperand(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ SmiToInteger32(left, left);
+ __ SmiToInteger64(left, left);
__ imul(left, ToRegister(right));
} else {
__ imull(left, ToRegister(right));
@@ -1333,9 +1388,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Bail out if the result is supposed to be negative zero.
Label done;
- __ testl(left, left);
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ testq(left, left);
+ } else {
+ __ testl(left, left);
+ }
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
+ // Constant can't be represented as Smi due to immediate size limit.
+ ASSERT(!instr->hydrogen_value()->representation().IsSmi());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr->environment());
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
@@ -1343,11 +1404,19 @@ void LCodeGen::DoMulI(LMulI* instr) {
DeoptimizeIf(less, instr->environment());
}
} else if (right->IsStackSlot()) {
- __ orl(kScratchRegister, ToOperand(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ or_(kScratchRegister, ToOperand(right));
+ } else {
+ __ orl(kScratchRegister, ToOperand(right));
+ }
DeoptimizeIf(sign, instr->environment());
} else {
// Test the non-zero operand for negative sign.
- __ orl(kScratchRegister, ToRegister(right));
+ if (instr->hydrogen_value()->representation().IsSmi()) {
+ __ or_(kScratchRegister, ToRegister(right));
+ } else {
+ __ orl(kScratchRegister, ToRegister(right));
+ }
DeoptimizeIf(sign, instr->environment());
}
__ bind(&done);
@@ -1362,7 +1431,7 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(left->IsRegister());
if (right->IsConstantOperand()) {
- int right_operand = ToInteger32(LConstantOperand::cast(right));
+ int32_t right_operand = ToInteger32(LConstantOperand::cast(right));
switch (instr->op()) {
case Token::BIT_AND:
__ andl(ToRegister(left), Immediate(right_operand));
@@ -1371,7 +1440,11 @@ void LCodeGen::DoBitI(LBitI* instr) {
__ orl(ToRegister(left), Immediate(right_operand));
break;
case Token::BIT_XOR:
- __ xorl(ToRegister(left), Immediate(right_operand));
+ if (right_operand == int32_t(~0)) {
+ __ notl(ToRegister(left));
+ } else {
+ __ xorl(ToRegister(left), Immediate(right_operand));
+ }
break;
default:
UNREACHABLE();
@@ -1442,7 +1515,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
break;
}
} else {
- int value = ToInteger32(LConstantOperand::cast(right));
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
case Token::ROR:
@@ -1541,7 +1614,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
+ Handle<Object> value = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
__ LoadObject(ToRegister(instr->result()), value);
}
@@ -1642,7 +1715,7 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, "Unexpected string type");
+ __ Check(equal, kUnexpectedStringType);
__ pop(value);
}
@@ -1656,13 +1729,6 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->Equals(instr->result()));
- __ not_(ToRegister(input));
-}
-
-
void LCodeGen::DoThrow(LThrow* instr) {
__ push(ToRegister(instr->value()));
CallRuntime(Runtime::kThrow, 1, instr);
@@ -1869,6 +1935,13 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
}
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
+ int false_block = instr->FalseDestination(chunk_);
+ __ j(cc, chunk_->GetAssemblyLabel(false_block));
+}
+
+
void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
__ int3();
}
@@ -2142,6 +2215,28 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+ if (instr->hydrogen()->representation().IsTagged()) {
+ Register input_reg = ToRegister(instr->object());
+ __ Cmp(input_reg, factory()->the_hole_value());
+ EmitBranch(instr, equal);
+ return;
+ }
+
+ XMMRegister input_reg = ToDoubleRegister(instr->object());
+ __ ucomisd(input_reg, input_reg);
+ EmitFalseBranch(instr, parity_odd);
+
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(MemOperand(rsp, 0), input_reg);
+ __ addq(rsp, Immediate(kDoubleSize));
+
+ int offset = sizeof(kHoleNanUpper32);
+ __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
+ EmitBranch(instr, equal);
+}
+
+
Condition LCodeGen::EmitIsObject(Register input,
Label* is_not_object,
Label* is_object) {
@@ -2406,15 +2501,15 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
@@ -2574,7 +2669,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// The argument count parameter is a smi
__ SmiToInteger32(reg, reg);
Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
- __ pop(return_addr_reg);
+ __ PopReturnAddressTo(return_addr_reg);
__ shl(reg, Immediate(kPointerSizeLog2));
__ addq(rsp, reg);
__ jmp(return_addr_reg);
@@ -2733,111 +2828,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ movq(result, FieldOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstant()) {
- Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
- __ LoadObject(result, constant);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
- Handle<Map>(current->map()));
- DeoptimizeIf(not_equal, env);
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- }
-}
-
-
-// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
-// prototype chain, which causes unbounded code generation.
-static bool CompactEmit(SmallMapList* list,
- Handle<String> name,
- int i,
- Isolate* isolate) {
- Handle<Map> map = list->at(i);
- LookupResult lookup(isolate);
- map->LookupDescriptor(NULL, *name, &lookup);
- return lookup.IsField() || lookup.IsConstant();
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- bool all_are_compact = true;
- for (int i = 0; i < map_count; ++i) {
- if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
- all_are_compact = false;
- break;
- }
- }
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMap(object, map, &check_passed);
- if (last && !need_generic) {
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- bool compact = all_are_compact ? true :
- CompactEmit(instr->hydrogen()->types(), name, i, isolate());
- __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ Move(rcx, name);
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
-}
-
-
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
@@ -2904,8 +2894,8 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
if (instr->length()->IsConstantOperand() &&
instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+ int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
int index = (const_length - const_index) + 1;
__ movq(result, Operand(arguments, index * kPointerSize));
} else {
@@ -3092,9 +3082,9 @@ Operand LCodeGen::BuildFastArrayOperand(
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
- int constant_value = ToInteger32(LConstantOperand::cast(key));
+ int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
- Abort("array index constant value too big");
+ Abort(kArrayIndexConstantValueTooBig);
}
return Operand(elements_pointer_reg,
((constant_value + additional_index) << shift_size)
@@ -3432,16 +3422,27 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
}
+void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
+ Register input_reg = ToRegister(instr->value());
+ __ testq(input_reg, input_reg);
+ Label is_positive;
+ __ j(not_sign, &is_positive, Label::kNear);
+ __ neg(input_reg); // Sets flags.
+ DeoptimizeIf(negative, instr->environment());
+ __ bind(&is_positive);
+}
+
+
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@@ -3457,15 +3458,15 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
__ andpd(input_reg, scratch);
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
+ } else if (r.IsSmi()) {
+ EmitSmiMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiToInteger32(input_reg, input_reg);
- EmitIntegerMathAbs(instr);
- __ Integer32ToSmi(input_reg, input_reg);
+ EmitSmiMathAbs(instr);
__ bind(deferred->exit());
}
}
@@ -3666,90 +3667,64 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-
- // Choose the right register for the first argument depending on
- // calling convention.
-#ifdef _WIN64
- ASSERT(ToRegister(instr->global_object()).is(rcx));
- Register global_object = rcx;
-#else
- ASSERT(ToRegister(instr->global_object()).is(rdi));
- Register global_object = rdi;
-#endif
-
+ // Assert that register size is twice the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
- __ movq(global_object,
- FieldOperand(global_object, GlobalObject::kNativeContextOffset));
+ // Load native context
+ Register global_object = ToRegister(instr->global_object());
+ Register native_context = global_object;
+ __ movq(native_context, FieldOperand(
+ global_object, GlobalObject::kNativeContextOffset));
+
+ // Load state (FixedArray of the native context's random seeds)
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
- // rbx: FixedArray of the native context's random seeds
+ Register state = native_context;
+ __ movq(state, FieldOperand(native_context, kRandomSeedOffset));
// Load state[0].
- __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
- // If state[0] == 0, call runtime to initialize seeds.
- __ testl(rax, rax);
- __ j(zero, deferred->entry());
+ Register state0 = ToRegister(instr->scratch());
+ __ movl(state0, FieldOperand(state, ByteArray::kHeaderSize));
// Load state[1].
- __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
+ Register state1 = ToRegister(instr->scratch2());
+ __ movl(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- // Only operate on the lower 32 bit of rax.
- __ movzxwl(rdx, rax);
- __ imull(rdx, rdx, Immediate(18273));
- __ shrl(rax, Immediate(16));
- __ addl(rax, rdx);
+ Register scratch3 = ToRegister(instr->scratch3());
+ __ movzxwl(scratch3, state0);
+ __ imull(scratch3, scratch3, Immediate(18273));
+ __ shrl(state0, Immediate(16));
+ __ addl(state0, scratch3);
// Save state[0].
- __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);
+ __ movl(FieldOperand(state, ByteArray::kHeaderSize), state0);
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzxwl(rdx, rcx);
- __ imull(rdx, rdx, Immediate(36969));
- __ shrl(rcx, Immediate(16));
- __ addl(rcx, rdx);
+ __ movzxwl(scratch3, state1);
+ __ imull(scratch3, scratch3, Immediate(36969));
+ __ shrl(state1, Immediate(16));
+ __ addl(state1, scratch3);
// Save state[1].
- __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
+ __ movl(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ shll(rax, Immediate(14));
- __ andl(rcx, Immediate(0x3FFFF));
- __ addl(rax, rcx);
+ Register random = state0;
+ __ shll(random, Immediate(14));
+ __ andl(state1, Immediate(0x3FFFF));
+ __ addl(random, state1);
- __ bind(deferred->exit());
// Convert 32 random bits in rax to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movq(rcx, V8_INT64_C(0x4130000000000000),
+ XMMRegister result = ToDoubleRegister(instr->result());
+ // We use xmm0 as fixed scratch register here.
+ XMMRegister scratch4 = xmm0;
+ __ movq(scratch3, V8_INT64_C(0x4130000000000000),
RelocInfo::NONE64); // 1.0 x 2^20 as double
- __ movq(xmm2, rcx);
- __ movd(xmm1, rax);
- __ xorps(xmm1, xmm2);
- __ subsd(xmm1, xmm2);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Return value is in rax.
+ __ movq(scratch4, scratch3);
+ __ movd(result, random);
+ __ xorps(result, scratch4);
+ __ subsd(result, scratch4);
}
@@ -3939,6 +3914,14 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+ Register function = ToRegister(instr->function());
+ Register code_object = ToRegister(instr->code_object());
+ __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ movq(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+}
+
+
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
@@ -4089,7 +4072,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ AssertZeroExtended(reg);
}
if (instr->index()->IsConstantOperand()) {
- int constant_index =
+ int32_t constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
if (instr->hydrogen()->length()->representation().IsSmi()) {
__ Cmp(reg, Smi::FromInt(constant_index));
@@ -4106,7 +4089,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
} else {
Operand length = ToOperand(instr->length());
if (instr->index()->IsConstantOperand()) {
- int constant_index =
+ int32_t constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
if (instr->hydrogen()->length()->representation().IsSmi()) {
__ Cmp(length, Smi::FromInt(constant_index));
@@ -4356,12 +4339,14 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
+ class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharCodeAt(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -4393,7 +4378,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
__ Push(Smi::FromInt(const_index));
} else {
Register index = ToRegister(instr->index());
@@ -4408,12 +4393,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
+ class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStringCharFromCode(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -4492,6 +4479,22 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ LOperand* output = instr->result();
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange() ||
+ instr->hydrogen()->value()->range()->upper() == kMaxInt) {
+ // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
+ // interval, so we treat kMaxInt as a sentinel for this entire interval.
+ __ testl(ToRegister(input), Immediate(0x80000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Integer32ToSmi(ToRegister(output), ToRegister(input));
+}
+
+
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
@@ -4502,14 +4505,14 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
+ class DeferredNumberTagU V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
+ virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredNumberTagU(instr_);
}
- virtual LInstruction* instr() { return instr_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@@ -4565,12 +4568,14 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
+ class DeferredNumberTagD V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredNumberTagD(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -4579,36 +4584,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->temp());
- bool convert_hole = false;
- HValue* change_input = instr->hydrogen()->value();
- if (change_input->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(change_input);
- convert_hole = load->UsesMustHandleHole();
- }
-
- Label no_special_nan_handling;
- Label done;
- if (convert_hole) {
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ ucomisd(input_reg, input_reg);
- __ j(parity_odd, &no_special_nan_handling);
- __ subq(rsp, Immediate(kDoubleSize));
- __ movsd(MemOperand(rsp, 0), input_reg);
- __ cmpl(MemOperand(rsp, sizeof(kHoleNanLower32)),
- Immediate(kHoleNanUpper32));
- Label canonicalize;
- __ j(not_equal, &canonicalize);
- __ addq(rsp, Immediate(kDoubleSize));
- __ Move(reg, factory()->the_hole_value());
- __ jmp(&done);
- __ bind(&canonicalize);
- __ addq(rsp, Immediate(kDoubleSize));
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- __ movq(input_reg, kScratchRegister);
- }
-
- __ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, deferred->entry());
@@ -4617,8 +4592,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
}
__ bind(deferred->exit());
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
-
- __ bind(&done);
}
@@ -4662,22 +4635,20 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
- bool allow_undefined_as_nan,
+ bool can_convert_undefined_to_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
- STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
- NUMBER_CANDIDATE_IS_ANY_TAGGED);
- if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- if (!allow_undefined_as_nan) {
+ if (!can_convert_undefined_to_nan) {
DeoptimizeIf(not_equal, env);
} else {
Label heap_number, convert;
@@ -4685,10 +4656,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Convert undefined (and hole) to NaN. Compute NaN as 0/0.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
- __ j(equal, &convert, Label::kNear);
- __ CompareRoot(input_reg, Heap::kTheHoleValueRootIndex);
- }
DeoptimizeIf(not_equal, env);
__ bind(&convert);
@@ -4722,60 +4689,47 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Label done, heap_number;
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
+ Label heap_number;
Register input_reg = ToRegister(instr->value());
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
if (instr->truncating()) {
+ // Heap number map check.
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
__ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(not_equal, instr->environment());
__ Set(input_reg, 0);
- __ jmp(&done, Label::kNear);
+ __ jmp(done);
__ bind(&heap_number);
-
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2siq(input_reg, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpq(input_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
+ __ TruncateHeapNumberToI(input_reg, input_reg);
} else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(not_equal, instr->environment());
-
+ Label bailout;
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, xmm0);
- __ cvtlsi2sd(xmm_temp, input_reg);
- __ ucomisd(xmm0, xmm_temp);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(input_reg, input_reg);
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ andl(input_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- }
+ __ TaggedToI(input_reg, input_reg, xmm_temp,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+
+ __ jmp(done);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
}
- __ bind(&done);
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
+ class DeferredTaggedToI V8_FINAL : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredTaggedToI(instr_, done());
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@@ -4801,19 +4755,12 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
- if (value->type().IsSmi()) {
- mode = NUMBER_CANDIDATE_IS_SMI;
- } else if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
- }
- }
+ NumberUntagDMode mode = value->representation().IsSmi()
+ ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->allow_undefined_as_nan(),
+ instr->hydrogen()->can_convert_undefined_to_nan(),
instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment(),
mode);
@@ -4830,34 +4777,16 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register result_reg = ToRegister(result);
if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- __ cvttsd2siq(result_reg, input_reg);
- __ movq(kScratchRegister,
- V8_INT64_C(0x8000000000000000),
- RelocInfo::NONE64);
- __ cmpq(result_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
+ __ TruncateDoubleToI(result_reg, input_reg);
} else {
- __ cvttsd2si(result_reg, input_reg);
- __ cvtlsi2sd(xmm0, result_reg);
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ andl(result_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&done);
- }
+ Label bailout, done;
+ __ DoubleToI(result_reg, input_reg, xmm0,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
}
}
@@ -4867,31 +4796,19 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
- CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
- Label done;
- __ cvttsd2si(result_reg, input_reg);
- __ cvtlsi2sd(xmm0, result_reg);
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
+ Label bailout, done;
+ __ DoubleToI(result_reg, input_reg, xmm0,
+ instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
+
+ __ jmp(&done, Label::kNear);
+ __ bind(&bailout);
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&done);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ andl(result_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&done);
- }
__ Integer32ToSmi(result_reg, result_reg);
DeoptimizeIf(overflow, instr->environment());
}
@@ -4959,39 +4876,72 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
}
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- __ CmpHeapObject(reg, target);
+ Handle<HeapObject> object = instr->hydrogen()->object();
+ __ CmpHeapObject(reg, object);
DeoptimizeIf(not_equal, instr->environment());
}
-void LCodeGen::DoCheckMapCommon(Register reg,
- Handle<Map> map,
- LInstruction* instr) {
- Label success;
- __ CompareMap(reg, map, &success);
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&success);
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
+ __ testq(rax, Immediate(kSmiTagMask));
+ }
+ DeoptimizeIf(zero, instr->environment());
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+ public:
+ DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+ : LDeferredCode(codegen), instr_(instr), object_(object) {
+ SetExit(check_maps());
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredInstanceMigration(instr_, object_);
+ }
+ Label* check_maps() { return &check_maps_; }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LCheckMaps* instr_;
+ Label check_maps_;
+ Register object_;
+ };
+
if (instr->hydrogen()->CanOmitMapChecks()) return;
+
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- Label success;
SmallMapList* map_set = instr->hydrogen()->map_set();
+
+ DeferredCheckMaps* deferred = NULL;
+ if (instr->hydrogen()->has_migration_target()) {
+ deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+ __ bind(deferred->check_maps());
+ }
+
+ Label success;
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
__ CompareMap(reg, map, &success);
__ j(equal, &success);
}
+
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, instr);
+ __ CompareMap(reg, map, &success);
+ if (instr->hydrogen()->has_migration_target()) {
+ __ j(not_equal, deferred->entry());
+ } else {
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+
__ bind(&success);
}
@@ -5045,29 +4995,15 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
- Register reg = ToRegister(instr->temp());
-
- ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
- ZoneList<Handle<Map> >* maps = instr->maps();
-
- ASSERT(prototypes->length() == maps->length());
-
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(reg, prototypes->at(i));
- DoCheckMapCommon(reg, maps->at(i), instr);
- }
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
+ class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredAllocate(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@@ -5218,7 +5154,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
- __ Push(instr->hydrogen()->shared_info());
+ __ Move(rbx, instr->hydrogen()->shared_info());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ push(rsi);
@@ -5400,6 +5336,8 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
+
+ Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
DeoptimizeIf(no_condition, instr->environment(), type);
}
@@ -5421,12 +5359,14 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
+ class DeferredStackCheck V8_FINAL : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredStackCheck(instr_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@@ -5440,8 +5380,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
Label done;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &done, Label::kNear);
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
@@ -5477,9 +5418,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- // Normally we record the first unknown OSR value as the entrypoint to the OSR
- // code, but if there were none, record the entrypoint here.
- if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
+ GenerateOsrPrologue();
}
diff --git a/chromium/v8/src/x64/lithium-codegen-x64.h b/chromium/v8/src/x64/lithium-codegen-x64.h
index 4286d07de74..f994645019d 100644
--- a/chromium/v8/src/x64/lithium-codegen-x64.h
+++ b/chromium/v8/src/x64/lithium-codegen-x64.h
@@ -44,7 +44,7 @@ namespace internal {
class LDeferredCode;
class SafepointGenerator;
-class LCodeGen BASE_EMBEDDED {
+class LCodeGen V8_FINAL BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
@@ -67,7 +67,8 @@ class LCodeGen BASE_EMBEDDED {
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
+ expected_safepoint_kind_(Safepoint::kSimple),
+ old_position_(RelocInfo::kNoPosition) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -102,7 +103,6 @@ class LCodeGen BASE_EMBEDDED {
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
bool IsSmiConstant(LConstantOperand* op) const;
- int ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
@@ -123,17 +123,15 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagU(LNumberTagU* instr);
- void DoDeferredTaggedToI(LTaggedToI* instr);
+ void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
-
- void DoCheckMapCommon(Register reg, Handle<Map> map, LInstruction* instr);
+ void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -179,7 +177,7 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(const char* reason);
+ void Abort(BailoutReason reason);
void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -192,6 +190,9 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateJumpTable();
bool GenerateSafepointTable();
+ // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+ void GenerateOsrPrologue();
+
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS
@@ -237,7 +238,6 @@ class LCodeGen BASE_EMBEDDED {
CallKind call_kind,
RDIState rdi_state);
-
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode,
int argc);
@@ -248,10 +248,14 @@ class LCodeGen BASE_EMBEDDED {
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
- void AddToTranslation(Translation* translation,
+
+ void AddToTranslation(LEnvironment* environment,
+ Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32);
+ bool is_uint32,
+ int* object_index_pointer,
+ int* dematerialized_index_pointer);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -268,6 +272,7 @@ class LCodeGen BASE_EMBEDDED {
uint32_t additional_index = 0);
void EmitIntegerMathAbs(LMathAbs* instr);
+ void EmitSmiMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
@@ -280,11 +285,14 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
Safepoint::DeoptMode mode);
void RecordPosition(int position);
+ void RecordAndUpdatePosition(int position);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
+ template<class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition cc);
void EmitNumberUntagD(
Register input,
XMMRegister result,
@@ -320,12 +328,6 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
@@ -382,7 +384,9 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- class PushSafepointRegistersScope BASE_EMBEDDED {
+ int old_position_;
+
+ class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
@@ -418,13 +422,14 @@ class LDeferredCode: public ZoneObject {
codegen->AddDeferredCode(this);
}
- virtual ~LDeferredCode() { }
+ virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
int instruction_index() const { return instruction_index_; }
protected:
@@ -435,6 +440,7 @@ class LDeferredCode: public ZoneObject {
LCodeGen* codegen_;
Label entry_;
Label exit_;
+ Label done_;
Label* external_exit_;
int instruction_index_;
};
diff --git a/chromium/v8/src/x64/lithium-gap-resolver-x64.h b/chromium/v8/src/x64/lithium-gap-resolver-x64.h
index d828455921a..f218455b675 100644
--- a/chromium/v8/src/x64/lithium-gap-resolver-x64.h
+++ b/chromium/v8/src/x64/lithium-gap-resolver-x64.h
@@ -38,7 +38,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
-class LGapResolver BASE_EMBEDDED {
+class LGapResolver V8_FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
diff --git a/chromium/v8/src/x64/lithium-x64.cc b/chromium/v8/src/x64/lithium-x64.cc
index 897af2bca14..a0e853d4fc3 100644
--- a/chromium/v8/src/x64/lithium-x64.cc
+++ b/chromium/v8/src/x64/lithium-x64.cc
@@ -32,6 +32,7 @@
#include "lithium-allocator-inl.h"
#include "x64/lithium-x64.h"
#include "x64/lithium-codegen-x64.h"
+#include "hydrogen-osr.h"
namespace v8 {
namespace internal {
@@ -263,6 +264,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+ stream->Add(" = ");
+ function()->PrintTo(stream);
+ stream->Add(".code_entry = ");
+ code_object()->PrintTo(stream);
+}
+
+
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
@@ -431,6 +440,15 @@ LPlatformChunk* LChunkBuilder::Build() {
chunk_ = new(zone()) LPlatformChunk(info(), graph());
LPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
+
+ // If compiling for OSR, reserve space for the unoptimized frame,
+ // which will be subsumed into this frame.
+ if (graph()->has_osr()) {
+ for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+ chunk_->GetNextSpillIndex(false);
+ }
+ }
+
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
@@ -443,7 +461,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LCodeGen::Abort(const char* reason) {
+void LCodeGen::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -601,8 +619,10 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
+ ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
+ &argument_index_accumulator,
+ &objects_to_materialize));
return instr;
}
@@ -654,7 +674,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
- Abort("Out of virtual registers while trying to allocate temp register.");
+ Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
vreg = 0;
}
operand->set_virtual_register(vreg);
@@ -732,12 +752,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
}
}
@@ -882,6 +897,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
+ instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@@ -897,11 +913,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
+ argument_index_accumulator,
+ objects_to_materialize);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
@@ -916,16 +934,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
- bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
+ int object_index = objects_to_materialize->length();
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
+ LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- needs_arguments_object_materialization = true;
- op = NULL;
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@@ -936,15 +954,33 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
- if (needs_arguments_object_materialization) {
- HArgumentsObject* arguments = hydrogen_env->entry() == NULL
- ? graph()->GetArgumentsObject()
- : hydrogen_env->entry()->arguments_object();
- ASSERT(arguments->IsLinked());
- for (int i = 1; i < arguments->arguments_count(); ++i) {
- HValue* value = arguments->arguments_values()->at(i);
- ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
- LOperand* op = UseAny(value);
+ for (int i = object_index; i < objects_to_materialize->length(); ++i) {
+ HValue* object_to_materialize = objects_to_materialize->at(i);
+ int previously_materialized_object = -1;
+ for (int prev = 0; prev < i; ++prev) {
+ if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
+ previously_materialized_object = prev;
+ break;
+ }
+ }
+ int length = object_to_materialize->OperandCount();
+ bool is_arguments = object_to_materialize->IsArgumentsObject();
+ if (previously_materialized_object >= 0) {
+ result->AddDuplicateObject(previously_materialized_object);
+ continue;
+ } else {
+ result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+ }
+ for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+ LOperand* op;
+ HValue* value = object_to_materialize->OperandAt(i);
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+ objects_to_materialize->Add(value, zone());
+ op = LEnvironment::materialization_marker();
+ } else {
+ ASSERT(!value->IsPushArgument());
+ op = UseAny(value);
+ }
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
@@ -1065,6 +1101,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+ HStoreCodeEntry* store_code_entry) {
+ LOperand* function = UseRegister(store_code_entry->function());
+ LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+ return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
@@ -1321,16 +1365,6 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new(zone()) LBitNotI(input);
- return DefineSameAsFirst(result);
-}
-
-
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
@@ -1579,9 +1613,13 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), arg_reg_1);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ LOperand* global_object = UseTempRegister(instr->global_object());
+ LOperand* scratch = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LOperand* scratch3 = TempRegister();
+ LRandom* result = new(zone()) LRandom(
+ global_object, scratch, scratch2, scratch3);
+ return DefineFixedDouble(result, xmm1);
}
@@ -1599,9 +1637,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(
- instr->right()->representation()));
+ ASSERT(instr->left()->representation().Equals(r));
+ ASSERT(instr->right()->representation().Equals(r));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCompareNumericAndBranch(left, right);
@@ -1631,6 +1668,13 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+ HCompareHoleAndBranch* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return new(zone()) LCmpHoleAndBranch(object);
+}
+
+
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
@@ -1745,17 +1789,6 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
}
-LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
- HInductionVariableAnnotation* instr) {
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = Use(instr->length());
@@ -1770,13 +1803,6 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), rax);
return MarkAsCall(new(zone()) LThrow(value), instr);
@@ -1824,8 +1850,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- if (instr->value()->type().IsSmi()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->type().IsSmi() || val->representation().IsSmi()) {
return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
} else {
bool truncating = instr->CanTruncateToInt32();
@@ -1872,10 +1899,18 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result =
- DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = NULL;
+ if (val->CheckFlag(HInstruction::kUint32)) {
+ result = DefineAsRegister(new(zone()) LUint32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange() &&
+ val->range()->upper() != kMaxInt) {
+ return result;
+ }
+ } else {
+ result = DefineAsRegister(new(zone()) LInteger32ToSmi(value));
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return result;
+ }
}
return AssignEnvironment(result);
} else {
@@ -1920,27 +1955,24 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
}
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp = NULL;
- if (!instr->CanOmitPrototypeChecks()) temp = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- if (instr->CanOmitPrototypeChecks()) return result;
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckValue(value));
}
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
+ if (!instr->CanOmitMapChecks()) {
+ value = UseRegisterAtStart(instr->value());
+ if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
+ }
LCheckMaps* result = new(zone()) LCheckMaps(value);
- if (instr->CanOmitMapChecks()) return result;
- return AssignEnvironment(result);
+ if (!instr->CanOmitMapChecks()) {
+ AssignEnvironment(result);
+ if (instr->has_migration_target()) return AssignPointerMap(result);
+ }
+ return result;
}
@@ -2060,23 +2092,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), rax);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, rax), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), rax);
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(object);
@@ -2233,7 +2248,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_external_location = instr->access().IsExternalMemory() &&
instr->access().offset() == 0;
bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
+ bool needs_write_barrier_for_map = instr->has_transition() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
@@ -2366,10 +2381,18 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
+ // Use an index that corresponds to the location in the unoptimized frame,
+ // which the optimized frame will subsume.
+ int env_index = instr->index();
+ int spill_index = 0;
+ if (instr->environment()->is_parameter_index(env_index)) {
+ spill_index = chunk()->GetParameterStackSlot(env_index);
+ } else {
+ spill_index = env_index - instr->environment()->first_local_index();
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+ Abort(kTooManySpillSlotsNeededForOSR);
+ spill_index = 0;
+ }
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
@@ -2390,6 +2413,14 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+ instr->ReplayEnvironment(current_block_->last_environment());
+
+ // There are no real uses of a captured object.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
@@ -2431,20 +2462,7 @@ LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
+ instr->ReplayEnvironment(current_block_->last_environment());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
diff --git a/chromium/v8/src/x64/lithium-x64.h b/chromium/v8/src/x64/lithium-x64.h
index 31e54370e0a..4942c10526e 100644
--- a/chromium/v8/src/x64/lithium-x64.h
+++ b/chromium/v8/src/x64/lithium-x64.h
@@ -50,7 +50,6 @@ class LCodeGen;
V(ArithmeticD) \
V(ArithmeticT) \
V(BitI) \
- V(BitNotI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
@@ -63,19 +62,19 @@ class LCodeGen;
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
- V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
V(CheckMapValue) \
V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(CheckValue) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
+ V(CmpHoleAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(ConstantD) \
@@ -128,7 +127,6 @@ class LCodeGen;
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
@@ -162,6 +160,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
@@ -183,18 +182,23 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
V(WrapReceiver)
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual Opcode opcode() const V8_FINAL V8_OVERRIDE { \
+ return LInstruction::k##type; \
+ } \
+ virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE; \
+ virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE { \
+ return mnemonic; \
+ } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
}
@@ -204,14 +208,16 @@ class LCodeGen;
}
-class LInstruction: public ZoneObject {
+class LInstruction : public ZoneObject {
public:
LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ bit_field_(IsCallBits::encode(false)) {
+ set_position(RelocInfo::kNoPosition);
+ }
- virtual ~LInstruction() { }
+ virtual ~LInstruction() {}
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
@@ -249,20 +255,30 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
+ // The 31 bits PositionBits is used to store the int position value. And the
+ // position value may be RelocInfo::kNoPosition (-1). The accessor always
+ // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
+ // and can fit into the 31 bits PositionBits.
+ void set_position(int pos) {
+ bit_field_ = PositionBits::update(bit_field_, pos + 1);
+ }
+ int position() { return PositionBits::decode(bit_field_) - 1; }
+
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- void MarkAsCall() { is_call_ = true; }
+ void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+ bool IsCall() const { return IsCallBits::decode(bit_field_); }
// Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return is_call_; }
- bool ClobbersRegisters() const { return is_call_; }
- bool ClobbersDoubleRegisters() const { return is_call_; }
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersRegisters() const { return IsCall(); }
+ bool ClobbersDoubleRegisters() const { return IsCall(); }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
// Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsCall() const { return IsCall(); }
virtual bool HasResult() const = 0;
virtual LOperand* result() const = 0;
@@ -286,10 +302,13 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
+ class IsCallBits: public BitField<bool, 0, 1> {};
+ class PositionBits: public BitField<int, 1, 31> {};
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- bool is_call_;
+ int bit_field_;
};
@@ -297,11 +316,13 @@ class LInstruction: public ZoneObject {
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LTemplateInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0 && result() != NULL; }
+ virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+ return R != 0 && result() != NULL;
+ }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
@@ -312,15 +333,15 @@ class LTemplateInstruction: public LInstruction {
private:
// Iterator support.
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
+ virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
+ virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
+ virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
+ virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
};
-class LGap: public LTemplateInstruction<0, 0, 0> {
+class LGap : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -331,8 +352,8 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@@ -369,11 +390,11 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
-class LInstructionGap: public LGap {
+class LInstructionGap V8_FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const {
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
return !IsRedundant();
}
@@ -381,14 +402,14 @@ class LInstructionGap: public LGap {
};
-class LGoto: public LTemplateInstruction<0, 0, 0> {
+class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(int block_id) : block_id_(block_id) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const;
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ virtual bool IsControl() const V8_OVERRIDE { return true; }
int block_id() const { return block_id_; }
@@ -397,7 +418,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> {
};
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -413,7 +434,7 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDummyUse(LOperand* value) {
inputs_[0] = value;
@@ -422,22 +443,24 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
};
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
-class LLabel: public LGap {
+class LLabel V8_FINAL : public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -453,14 +476,16 @@ class LLabel: public LGap {
};
-class LParameter: public LTemplateInstruction<1, 0, 0> {
+class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -471,19 +496,21 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> {
};
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
- virtual bool IsControl() const { return true; }
+ virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -522,7 +549,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
inputs_[0] = receiver;
@@ -536,7 +563,7 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
@@ -557,7 +584,7 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
};
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
@@ -571,11 +598,11 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -587,14 +614,14 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
};
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
-class LModI: public LTemplateInstruction<1, 2, 1> {
+class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -611,7 +638,7 @@ class LModI: public LTemplateInstruction<1, 2, 1> {
};
-class LDivI: public LTemplateInstruction<1, 2, 1> {
+class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
@@ -630,7 +657,7 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
};
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
@@ -649,7 +676,7 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 0> {
+class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -664,7 +691,7 @@ class LMulI: public LTemplateInstruction<1, 2, 0> {
};
-class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -683,11 +710,11 @@ class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LMathFloor: public LTemplateInstruction<1, 1, 0> {
+class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
@@ -700,7 +727,7 @@ class LMathFloor: public LTemplateInstruction<1, 1, 0> {
};
-class LMathRound: public LTemplateInstruction<1, 1, 0> {
+class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathRound(LOperand* value) {
inputs_[0] = value;
@@ -713,7 +740,7 @@ class LMathRound: public LTemplateInstruction<1, 1, 0> {
};
-class LMathAbs: public LTemplateInstruction<1, 1, 0> {
+class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathAbs(LOperand* value) {
inputs_[0] = value;
@@ -726,7 +753,7 @@ class LMathAbs: public LTemplateInstruction<1, 1, 0> {
};
-class LMathLog: public LTemplateInstruction<1, 1, 0> {
+class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
@@ -738,7 +765,7 @@ class LMathLog: public LTemplateInstruction<1, 1, 0> {
};
-class LMathSin: public LTemplateInstruction<1, 1, 0> {
+class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSin(LOperand* value) {
inputs_[0] = value;
@@ -750,7 +777,7 @@ class LMathSin: public LTemplateInstruction<1, 1, 0> {
};
-class LMathCos: public LTemplateInstruction<1, 1, 0> {
+class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathCos(LOperand* value) {
inputs_[0] = value;
@@ -762,7 +789,7 @@ class LMathCos: public LTemplateInstruction<1, 1, 0> {
};
-class LMathTan: public LTemplateInstruction<1, 1, 0> {
+class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathTan(LOperand* value) {
inputs_[0] = value;
@@ -774,7 +801,7 @@ class LMathTan: public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 2> {
+class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
@@ -791,7 +818,7 @@ class LMathExp: public LTemplateInstruction<1, 1, 2> {
};
-class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
@@ -803,7 +830,7 @@ class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
};
-class LMathPowHalf: public LTemplateInstruction<1, 1, 0> {
+class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathPowHalf(LOperand* value) {
inputs_[0] = value;
@@ -815,7 +842,7 @@ class LMathPowHalf: public LTemplateInstruction<1, 1, 0> {
};
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -825,12 +852,24 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
};
-class LIsObjectAndBranch: public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+ public:
+ explicit LCmpHoleAndBranch(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsObjectAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -841,11 +880,11 @@ class LIsObjectAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsNumberAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -858,7 +897,7 @@ class LIsNumberAndBranch: public LControlInstruction<1, 0> {
};
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
+class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -871,11 +910,11 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
+class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -886,11 +925,11 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -904,11 +943,11 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> {
public:
explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -922,13 +961,13 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Token::Value op() const { return hydrogen()->token(); }
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -940,11 +979,11 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -957,7 +996,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
+class LHasCachedArrayIndexAndBranch V8_FINAL
+ : public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -969,11 +1009,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
@@ -989,11 +1029,11 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1010,7 +1050,7 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
+class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1024,7 +1064,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1042,7 +1082,8 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+ virtual void SetDeferredLazyDeoptimizationEnvironment(
+ LEnvironment* env) V8_OVERRIDE {
lazy_deopt_env_ = env;
}
@@ -1051,7 +1092,7 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInstanceSize(LOperand* object) {
inputs_[0] = object;
@@ -1064,7 +1105,7 @@ class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
};
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
@@ -1079,7 +1120,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2, 0> {
+class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LBitI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1096,7 +1137,7 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
};
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
+class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -1117,7 +1158,7 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
};
-class LSubI: public LTemplateInstruction<1, 2, 0> {
+class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1132,7 +1173,7 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
+class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1141,7 +1182,7 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantS: public LTemplateInstruction<1, 0, 0> {
+class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1150,7 +1191,7 @@ class LConstantS: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
+class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
@@ -1165,7 +1206,7 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
};
-class LConstantE: public LTemplateInstruction<1, 0, 0> {
+class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1176,16 +1217,18 @@ class LConstantE: public LTemplateInstruction<1, 0, 0> {
};
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
+class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
- Handle<Object> value() const { return hydrogen()->handle(); }
+ Handle<Object> value(Isolate* isolate) const {
+ return hydrogen()->handle(isolate);
+ }
};
-class LBranch: public LControlInstruction<1, 0> {
+class LBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1196,17 +1239,17 @@ class LBranch: public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
};
-class LCmpMapAndBranch: public LControlInstruction<1, 0> {
+class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1221,7 +1264,7 @@ class LCmpMapAndBranch: public LControlInstruction<1, 0> {
};
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
@@ -1233,7 +1276,7 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
};
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+class LElementsKind V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
@@ -1246,7 +1289,7 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 0> {
+class LValueOf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LValueOf(LOperand* value) {
inputs_[0] = value;
@@ -1259,7 +1302,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 0> {
};
-class LDateField: public LTemplateInstruction<1, 1, 0> {
+class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LDateField(LOperand* date, Smi* index) : index_(index) {
inputs_[0] = date;
@@ -1276,7 +1319,7 @@ class LDateField: public LTemplateInstruction<1, 1, 0> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> {
public:
LSeqStringSetChar(String::Encoding encoding,
LOperand* string,
@@ -1300,7 +1343,7 @@ class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
@@ -1312,19 +1355,7 @@ class LThrow: public LTemplateInstruction<0, 1, 0> {
};
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
+class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1344,7 +1375,7 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1359,7 +1390,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
};
-class LPower: public LTemplateInstruction<1, 2, 0> {
+class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1374,20 +1405,29 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
-class LRandom: public LTemplateInstruction<1, 1, 0> {
+class LRandom V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
- explicit LRandom(LOperand* global_object) {
+ LRandom(LOperand* global_object,
+ LOperand* scratch,
+ LOperand* scratch2,
+ LOperand* scratch3) {
inputs_[0] = global_object;
+ temps_[0] = scratch;
+ temps_[1] = scratch2;
+ temps_[2] = scratch3;
}
LOperand* global_object() { return inputs_[0]; }
+ LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
+ LOperand* scratch3() const { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1399,16 +1439,18 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticD;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1420,16 +1462,18 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
+ virtual Opcode opcode() const V8_OVERRIDE {
+ return LInstruction::kArithmeticT;
+ }
+ virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
+ virtual const char* Mnemonic() const V8_OVERRIDE;
private:
Token::Value op_;
};
-class LReturn: public LTemplateInstruction<0, 2, 0> {
+class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LReturn(LOperand* value, LOperand* parameter_count) {
inputs_[0] = value;
@@ -1452,7 +1496,7 @@ class LReturn: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1465,20 +1509,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
- LOperand* object() { return inputs_[0]; }
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
@@ -1492,7 +1523,7 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
@@ -1505,7 +1536,8 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
+class LLoadExternalArrayPointer V8_FINAL
+ : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
@@ -1518,7 +1550,7 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1533,7 +1565,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
@@ -1541,7 +1573,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
@@ -1555,14 +1587,14 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
};
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadGlobalGeneric(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1577,7 +1609,7 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1592,7 +1624,7 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
};
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
explicit LStoreGlobalGeneric(LOperand* global_object,
LOperand* value) {
@@ -1611,7 +1643,7 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
@@ -1624,11 +1656,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
@@ -1645,11 +1677,11 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
+class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1661,7 +1693,7 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
+class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
@@ -1674,7 +1706,24 @@ class LDrop: public LTemplateInstruction<0, 0, 0> {
};
-class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
+class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+ inputs_[0] = function;
+ temps_[0] = code_object;
+ }
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* code_object() { return temps_[0]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+ DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
@@ -1683,28 +1732,28 @@ class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> {
LOperand* base_object() { return inputs_[0]; }
int offset() { return hydrogen()->offset(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object")
DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject)
};
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
-class LContext: public LTemplateInstruction<1, 0, 0> {
+class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
DECLARE_HYDROGEN_ACCESSOR(Context)
};
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
@@ -1716,20 +1765,20 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
+class LGlobalObject V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
};
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
+class LGlobalReceiver V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
@@ -1741,7 +1790,7 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
};
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
@@ -1753,7 +1802,7 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
};
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInvokeFunction(LOperand* function) {
inputs_[0] = function;
@@ -1764,13 +1813,13 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
+class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
inputs_[0] = key;
@@ -1781,25 +1830,25 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
LOperand* key() { return inputs_[0]; }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallFunction(LOperand* function) {
inputs_[0] = function;
@@ -1813,30 +1862,30 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> {
};
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
+class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1847,13 +1896,13 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNewArray(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1864,13 +1913,13 @@ class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
@@ -1880,7 +1929,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1892,7 +1941,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToSmi(LOperand* value) {
inputs_[0] = value;
@@ -1905,7 +1954,7 @@ class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
+class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1919,7 +1968,20 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1931,7 +1993,7 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberTagU(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1945,7 +2007,7 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
};
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -1961,7 +2023,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
@@ -1976,7 +2038,7 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
};
-class LDoubleToSmi: public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LDoubleToSmi(LOperand* value) {
inputs_[0] = value;
@@ -1990,7 +2052,7 @@ class LDoubleToSmi: public LTemplateInstruction<1, 1, 0> {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LTaggedToI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -2007,7 +2069,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
};
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
+class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -2019,7 +2081,7 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -2032,7 +2094,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
};
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -2049,7 +2111,7 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
@@ -2064,16 +2126,16 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> transition() const { return hydrogen()->transition(); }
+ Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const {
return hydrogen()->field_representation();
}
};
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamedGeneric(LOperand* object, LOperand* value) {
inputs_[0] = object;
@@ -2086,14 +2148,14 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2110,13 +2172,13 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
@@ -2131,13 +2193,13 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LTransitionElementsKind: public LTemplateInstruction<0, 1, 2> {
+class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
@@ -2155,7 +2217,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
@@ -2164,7 +2226,7 @@ class LTransitionElementsKind: public LTemplateInstruction<0, 1, 2> {
};
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LTrapAllocationMemento(LOperand* object,
LOperand* temp) {
@@ -2180,7 +2242,7 @@ class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -2195,7 +2257,7 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> {
};
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
inputs_[0] = string;
@@ -2210,7 +2272,7 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringCharFromCode(LOperand* char_code) {
inputs_[0] = char_code;
@@ -2223,20 +2285,20 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckFunction(LOperand* value) {
+ explicit LCheckValue(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+ DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+ DECLARE_HYDROGEN_ACCESSOR(CheckValue)
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -2249,7 +2311,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
@@ -2262,25 +2324,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
- public:
- explicit LCheckPrototypeMaps(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- ZoneList<Handle<JSObject> >* prototypes() const {
- return hydrogen()->prototypes();
- }
- ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@@ -2292,7 +2336,7 @@ class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2304,7 +2348,7 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
@@ -2316,7 +2360,7 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped,
LOperand* temp_xmm) {
@@ -2331,7 +2375,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
@@ -2344,7 +2388,7 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocate: public LTemplateInstruction<1, 1, 1> {
+class LAllocate V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LAllocate(LOperand* size, LOperand* temp) {
inputs_[0] = size;
@@ -2359,21 +2403,21 @@ class LAllocate: public LTemplateInstruction<1, 1, 1> {
};
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
@@ -2386,7 +2430,7 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
@@ -2398,7 +2442,7 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
};
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -2411,11 +2455,11 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
- virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
};
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
@@ -2429,16 +2473,18 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
- virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+ virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+ return false;
+ }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
@@ -2450,7 +2496,7 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
inputs_[0] = object;
@@ -2462,7 +2508,7 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
@@ -2478,7 +2524,7 @@ class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
};
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
@@ -2492,7 +2538,7 @@ class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
};
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
@@ -2507,7 +2553,7 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
-class LPlatformChunk: public LChunk {
+class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
@@ -2517,7 +2563,7 @@ class LPlatformChunk: public LChunk {
};
-class LChunkBuilder BASE_EMBEDDED {
+class LChunkBuilder V8_FINAL BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
@@ -2573,7 +2619,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(const char* reason);
+ void Abort(BailoutReason reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2659,7 +2705,8 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
+ int* argument_index_accumulator,
+ ZoneList<HValue*>* objects_to_materialize);
void VisitInstruction(HInstruction* current);
diff --git a/chromium/v8/src/x64/macro-assembler-x64.cc b/chromium/v8/src/x64/macro-assembler-x64.cc
index 13d7ddaa685..69abc5454f0 100644
--- a/chromium/v8/src/x64/macro-assembler-x64.cc
+++ b/chromium/v8/src/x64/macro-assembler-x64.cc
@@ -155,7 +155,7 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
}
}
// Size of movq(destination, src);
- return 10;
+ return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
}
@@ -285,16 +285,17 @@ void MacroAssembler::InNewSpace(Register object,
cmpq(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
- ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
+ ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
intptr_t new_space_start =
- reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
+ reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
if (scratch.is(object)) {
addq(scratch, kScratchRegister);
} else {
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
+ and_(scratch,
+ Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
j(cc, branch, distance);
}
}
@@ -449,8 +450,8 @@ void MacroAssembler::RecordWrite(Register object,
}
-void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (emit_debug_code()) Check(cc, msg);
+void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
+ if (emit_debug_code()) Check(cc, reason);
}
@@ -466,16 +467,16 @@ void MacroAssembler::AssertFastElements(Register elements) {
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedCOWArrayMapRootIndex);
j(equal, &ok, Label::kNear);
- Abort("JSObject with fast elements map has slow elements");
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
}
}
-void MacroAssembler::Check(Condition cc, const char* msg) {
+void MacroAssembler::Check(Condition cc, BailoutReason reason) {
Label L;
j(cc, &L, Label::kNear);
- Abort(msg);
+ Abort(reason);
// Control will not return here.
bind(&L);
}
@@ -508,12 +509,13 @@ void MacroAssembler::NegativeZeroTest(Register result,
}
-void MacroAssembler::Abort(const char* msg) {
+void MacroAssembler::Abort(BailoutReason reason) {
// We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
+ const char* msg = GetBailoutReason(reason);
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
// Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
@@ -523,7 +525,13 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ int3();
+ return;
+ }
#endif
+
push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE64);
push(kScratchRegister);
@@ -678,22 +686,8 @@ static int Offset(ExternalReference ref0, ExternalReference ref1) {
}
-void MacroAssembler::PrepareCallApiFunction(int arg_stack_space,
- bool returns_handle) {
-#if defined(_WIN64) && !defined(__MINGW64__)
- if (!returns_handle) {
- EnterApiExitFrame(arg_stack_space);
- return;
- }
- // We need to prepare a slot for result handle on stack and put
- // a pointer to it into 1st arg register.
- EnterApiExitFrame(arg_stack_space + 1);
-
- // rcx must be used to pass the pointer to the return value slot.
- lea(rcx, StackSpaceOperand(arg_stack_space));
-#else
+void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
EnterApiExitFrame(arg_stack_space);
-#endif
}
@@ -701,7 +695,6 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Address thunk_address,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset) {
Label prologue;
Label promote_scheduled_exception;
@@ -774,23 +767,6 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
PopSafepointRegisters();
}
- // Can skip the result check for new-style callbacks
- // TODO(dcarney): may need to pass this information down
- // as some function_addresses might not have been registered
- if (returns_handle) {
- Label empty_result;
-#if defined(_WIN64) && !defined(__MINGW64__)
- // rax keeps a pointer to v8::Handle, unpack it.
- movq(rax, Operand(rax, 0));
-#endif
- // Check if the result handle holds 0.
- testq(rax, rax);
- j(zero, &empty_result);
- // It was non-zero. Dereference to get the result value.
- movq(rax, Operand(rax, 0));
- jmp(&prologue);
- bind(&empty_result);
- }
// Load the value from ReturnValue
movq(rax, Operand(rbp, return_value_offset * kPointerSize));
bind(&prologue);
@@ -838,7 +814,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
CompareRoot(return_value, Heap::kNullValueRootIndex);
j(equal, &ok, Label::kNear);
- Abort("API call returned invalid object");
+ Abort(kAPICallReturnedInvalidObject);
bind(&ok);
#endif
@@ -983,7 +959,10 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
}
-bool MacroAssembler::IsUnsafeInt(const int x) {
+// ----------------------------------------------------------------------------
+// Smi tagging, untagging and tag detection.
+
+bool MacroAssembler::IsUnsafeInt(const int32_t x) {
static const int kMaxBits = 17;
return !is_intn(x, kMaxBits);
}
@@ -991,7 +970,7 @@ bool MacroAssembler::IsUnsafeInt(const int x) {
void MacroAssembler::SafeMove(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
- ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
+ ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
@@ -1003,7 +982,7 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) {
void MacroAssembler::SafePush(Smi* src) {
- ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
+ ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Push(Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
@@ -1014,9 +993,6 @@ void MacroAssembler::SafePush(Smi* src) {
}
-// ----------------------------------------------------------------------------
-// Smi tagging, untagging and tag detection.
-
Register MacroAssembler::GetSmiConstant(Smi* source) {
int value = source->value();
if (value == 0) {
@@ -1038,7 +1014,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
RelocInfo::NONE64);
cmpq(dst, kSmiConstantRegister);
if (allow_stub_calls()) {
- Assert(equal, "Uninitialized kSmiConstantRegister");
+ Assert(equal, kUninitializedKSmiConstantRegister);
} else {
Label ok;
j(equal, &ok, Label::kNear);
@@ -1106,7 +1082,7 @@ void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
Label ok;
j(zero, &ok, Label::kNear);
if (allow_stub_calls()) {
- Abort("Integer32ToSmiField writing to non-smi location");
+ Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
} else {
int3();
}
@@ -1689,12 +1665,12 @@ void MacroAssembler::SmiAdd(Register dst,
if (emit_debug_code()) {
movq(kScratchRegister, src1);
addq(kScratchRegister, src2);
- Check(no_overflow, "Smi addition overflow");
+ Check(no_overflow, kSmiAdditionOverflow);
}
lea(dst, Operand(src1, src2, times_1, 0));
} else {
addq(dst, src2);
- Assert(no_overflow, "Smi addition overflow");
+ Assert(no_overflow, kSmiAdditionOverflow);
}
}
@@ -1726,7 +1702,7 @@ void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
movq(dst, src1);
}
subq(dst, src2);
- Assert(no_overflow, "Smi subtraction overflow");
+ Assert(no_overflow, kSmiSubtractionOverflow);
}
@@ -1758,7 +1734,7 @@ void MacroAssembler::SmiSub(Register dst,
movq(dst, src1);
}
subq(dst, src2);
- Assert(no_overflow, "Smi subtraction overflow");
+ Assert(no_overflow, kSmiSubtractionOverflow);
}
@@ -2155,7 +2131,7 @@ void MacroAssembler::SelectNonSmi(Register dst,
#ifdef DEBUG
if (allow_stub_calls()) { // Check contains a stub call.
Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+ Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
}
#endif
STATIC_ASSERT(kSmiTag == 0);
@@ -2221,6 +2197,49 @@ void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
}
+void MacroAssembler::Push(Smi* source) {
+ intptr_t smi = reinterpret_cast<intptr_t>(source);
+ if (is_int32(smi)) {
+ push(Immediate(static_cast<int32_t>(smi)));
+ } else {
+ Register constant = GetSmiConstant(source);
+ push(constant);
+ }
+}
+
+
+void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
+ movq(scratch, src);
+ // High bits.
+ shr(src, Immediate(64 - kSmiShift));
+ shl(src, Immediate(kSmiShift));
+ push(src);
+ // Low bits.
+ shl(scratch, Immediate(kSmiShift));
+ push(scratch);
+}
+
+
+void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
+ pop(scratch);
+ // Low bits.
+ shr(scratch, Immediate(kSmiShift));
+ pop(dst);
+ shr(dst, Immediate(kSmiShift));
+ // High bits.
+ shl(dst, Immediate(64 - kSmiShift));
+ or_(dst, scratch);
+}
+
+
+void MacroAssembler::Test(const Operand& src, Smi* source) {
+ testl(Operand(src, kIntSize), Immediate(source->value()));
+}
+
+
+// ----------------------------------------------------------------------------
+
+
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
@@ -2460,17 +2479,6 @@ void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
}
-void MacroAssembler::Push(Smi* source) {
- intptr_t smi = reinterpret_cast<intptr_t>(source);
- if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
- } else {
- Register constant = GetSmiConstant(source);
- push(constant);
- }
-}
-
-
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
addq(rsp, Immediate(stack_elements * kPointerSize));
@@ -2478,11 +2486,6 @@ void MacroAssembler::Drop(int stack_elements) {
}
-void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
-}
-
-
void MacroAssembler::TestBit(const Operand& src, int bits) {
int byte_offset = bits / kBitsPerByte;
int bit_in_byte = bits & (kBitsPerByte - 1);
@@ -2510,8 +2513,8 @@ void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
int MacroAssembler::CallSize(ExternalReference ext) {
// Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
- const int kCallInstructionSize = 3;
- return LoadAddressSize(ext) + kCallInstructionSize;
+ return LoadAddressSize(ext) +
+ Assembler::kCallScratchRegisterInstructionLength;
}
@@ -2798,9 +2801,9 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
if (is_uint16(bytes_dropped)) {
ret(bytes_dropped);
} else {
- pop(scratch);
+ PopReturnAddressTo(scratch);
addq(rsp, Immediate(bytes_dropped));
- push(scratch);
+ PushReturnAddressFrom(scratch);
ret(0);
}
}
@@ -2984,12 +2987,123 @@ void MacroAssembler::LoadUint32(XMMRegister dst,
XMMRegister scratch) {
if (FLAG_debug_code) {
cmpq(src, Immediate(0xffffffff));
- Assert(below_equal, "input GPR is expected to have upper32 cleared");
+ Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
}
cvtqsi2sd(dst, src);
}
+void MacroAssembler::SlowTruncateToI(Register result_reg,
+ Register input_reg,
+ int offset) {
+ DoubleToIStub stub(input_reg, result_reg, offset, true);
+ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
+ Register input_reg) {
+ Label done;
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2siq(result_reg, xmm0);
+ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
+ cmpq(result_reg, kScratchRegister);
+ j(not_equal, &done, Label::kNear);
+
+ // Slow case.
+ if (input_reg.is(result_reg)) {
+ subq(rsp, Immediate(kDoubleSize));
+ movsd(MemOperand(rsp, 0), xmm0);
+ SlowTruncateToI(result_reg, rsp, 0);
+ addq(rsp, Immediate(kDoubleSize));
+ } else {
+ SlowTruncateToI(result_reg, input_reg);
+ }
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result_reg,
+ XMMRegister input_reg) {
+ Label done;
+ cvttsd2siq(result_reg, input_reg);
+ movq(kScratchRegister,
+ V8_INT64_C(0x8000000000000000),
+ RelocInfo::NONE64);
+ cmpq(result_reg, kScratchRegister);
+ j(not_equal, &done, Label::kNear);
+
+ subq(rsp, Immediate(kDoubleSize));
+ movsd(MemOperand(rsp, 0), input_reg);
+ SlowTruncateToI(result_reg, rsp, 0);
+ addq(rsp, Immediate(kDoubleSize));
+
+ bind(&done);
+}
+
+
+void MacroAssembler::DoubleToI(Register result_reg,
+ XMMRegister input_reg,
+ XMMRegister scratch,
+ MinusZeroMode minus_zero_mode,
+ Label* conversion_failed,
+ Label::Distance dst) {
+ cvttsd2si(result_reg, input_reg);
+ cvtlsi2sd(xmm0, result_reg);
+ ucomisd(xmm0, input_reg);
+ j(not_equal, conversion_failed, dst);
+ j(parity_even, conversion_failed, dst); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ Label done;
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ testl(result_reg, result_reg);
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // jump to conversion_failed.
+ andl(result_reg, Immediate(1));
+ j(not_zero, conversion_failed, dst);
+ bind(&done);
+ }
+}
+
+
+void MacroAssembler::TaggedToI(Register result_reg,
+ Register input_reg,
+ XMMRegister temp,
+ MinusZeroMode minus_zero_mode,
+ Label* lost_precision,
+ Label::Distance dst) {
+ Label done;
+ ASSERT(!temp.is(xmm0));
+
+ // Heap number map check.
+ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ j(not_equal, lost_precision, dst);
+
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2si(result_reg, xmm0);
+ cvtlsi2sd(temp, result_reg);
+ ucomisd(xmm0, temp);
+ RecordComment("Deferred TaggedToI: lost precision");
+ j(not_equal, lost_precision, dst);
+ RecordComment("Deferred TaggedToI: NaN");
+ j(parity_even, lost_precision, dst); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ testl(result_reg, result_reg);
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, xmm0);
+ andl(result_reg, Immediate(1));
+ j(not_zero, lost_precision, dst);
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
@@ -3033,7 +3147,7 @@ void MacroAssembler::AssertNumber(Register object) {
j(is_smi, &ok, Label::kNear);
Cmp(FieldOperand(object, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
- Check(equal, "Operand is not a number");
+ Check(equal, kOperandIsNotANumber);
bind(&ok);
}
}
@@ -3042,7 +3156,7 @@ void MacroAssembler::AssertNumber(Register object) {
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
- Check(NegateCondition(is_smi), "Operand is a smi");
+ Check(NegateCondition(is_smi), kOperandIsASmi);
}
}
@@ -3050,7 +3164,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
- Check(is_smi, "Operand is not a smi");
+ Check(is_smi, kOperandIsNotASmi);
}
}
@@ -3058,7 +3172,7 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertSmi(const Operand& object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
- Check(is_smi, "Operand is not a smi");
+ Check(is_smi, kOperandIsNotASmi);
}
}
@@ -3068,7 +3182,7 @@ void MacroAssembler::AssertZeroExtended(Register int32_register) {
ASSERT(!int32_register.is(kScratchRegister));
movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
cmpq(kScratchRegister, int32_register);
- Check(above_equal, "32 bit value in register is not zero-extended");
+ Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
}
}
@@ -3076,12 +3190,12 @@ void MacroAssembler::AssertZeroExtended(Register int32_register) {
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi and not a string");
+ Check(not_equal, kOperandIsASmiAndNotAString);
push(object);
movq(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
pop(object);
- Check(below, "Operand is not a string");
+ Check(below, kOperandIsNotAString);
}
}
@@ -3089,24 +3203,24 @@ void MacroAssembler::AssertString(Register object) {
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi and not a name");
+ Check(not_equal, kOperandIsASmiAndNotAName);
push(object);
movq(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, LAST_NAME_TYPE);
pop(object);
- Check(below_equal, "Operand is not a name");
+ Check(below_equal, kOperandIsNotAName);
}
}
void MacroAssembler::AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
- const char* message) {
+ BailoutReason reason) {
if (emit_debug_code()) {
ASSERT(!src.is(kScratchRegister));
LoadRoot(kScratchRegister, root_value_index);
cmpq(src, kScratchRegister);
- Check(equal, message);
+ Check(equal, reason);
}
}
@@ -3457,7 +3571,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
cmpq(Operand(rsp, 0), kScratchRegister);
- Check(not_equal, "code object not properly patched");
+ Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
@@ -3466,7 +3580,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
- Check(equal, "stack frame types must match");
+ Check(equal, kStackFrameTypesMustMatch);
}
movq(rsp, rbp);
pop(rbp);
@@ -3567,8 +3681,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// from the caller stack.
lea(rsp, Operand(r15, 1 * kPointerSize));
- // Push the return address to get ready to return.
- push(rcx);
+ PushReturnAddressFrom(rcx);
LeaveExitFrameEpilogue();
}
@@ -3612,7 +3725,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
cmpq(scratch, Immediate(0));
- Check(not_equal, "we should not have an empty lexical context");
+ Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
int offset =
@@ -3624,7 +3737,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
if (emit_debug_code()) {
Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
isolate()->factory()->native_context_map());
- Check(equal, "JSGlobalObject::native_context should be a native context.");
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
}
// Check if both contexts are the same.
@@ -3643,12 +3756,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
movq(holder_reg,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
- Check(not_equal, "JSGlobalProxy::context() should not be null.");
+ Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
// Read the first word and compare to native_context_map(),
movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
- Check(equal, "JSGlobalObject::native_context should be a native context.");
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
pop(holder_reg);
}
@@ -3794,7 +3907,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
// Assert that result actually contains top on entry.
Operand top_operand = ExternalOperand(allocation_top);
cmpq(result, top_operand);
- Check(equal, "Unexpected allocation top");
+ Check(equal, kUnexpectedAllocationTop);
#endif
return;
}
@@ -3815,7 +3928,7 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
AllocationFlags flags) {
if (emit_debug_code()) {
testq(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, "Unaligned allocation in new space");
+ Check(zero, kUnalignedAllocationInNewSpace);
}
ExternalReference allocation_top =
@@ -3859,10 +3972,10 @@ void MacroAssembler::Allocate(int object_size,
LoadAllocationTopHelper(result, scratch, flags);
// Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, "Allocation is not double aligned");
+ Check(zero, kAllocationIsNotDoubleAligned);
}
// Calculate new top and bail out if new space is exhausted.
@@ -3938,10 +4051,10 @@ void MacroAssembler::Allocate(Register object_size,
LoadAllocationTopHelper(result, scratch, flags);
// Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
+ // safe in new-space because the limit of the heap is aligned there.
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, "Allocation is not double aligned");
+ Check(zero, kAllocationIsNotDoubleAligned);
}
// Calculate new top and bail out if new space is exhausted.
@@ -3975,7 +4088,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
cmpq(object, top_operand);
- Check(below, "Undo allocation of non allocated memory");
+ Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
movq(top_operand, object);
}
@@ -4165,7 +4278,7 @@ void MacroAssembler::CopyBytes(Register destination,
ASSERT(min_length >= 0);
if (emit_debug_code()) {
cmpl(length, Immediate(min_length));
- Assert(greater_equal, "Invalid min_length");
+ Assert(greater_equal, kInvalidMinLength);
}
Label loop, done, short_string, short_loop;
@@ -4249,7 +4362,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (emit_debug_code()) {
CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
Heap::kWithContextMapRootIndex);
- Check(not_equal, "Variable resolved to with context.");
+ Check(not_equal, kVariableResolvedToWithContext);
}
}
@@ -4340,7 +4453,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
- Abort("Global functions must have initial map");
+ Abort(kGlobalFunctionsMustHaveInitialMap);
bind(&ok);
}
}
diff --git a/chromium/v8/src/x64/macro-assembler-x64.h b/chromium/v8/src/x64/macro-assembler-x64.h
index e611c8ae279..09c8a800cca 100644
--- a/chromium/v8/src/x64/macro-assembler-x64.h
+++ b/chromium/v8/src/x64/macro-assembler-x64.h
@@ -375,6 +375,11 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
+ // Support for constant splitting.
+ bool IsUnsafeInt(const int32_t x);
+ void SafeMove(Register dst, Smi* src);
+ void SafePush(Smi* src);
+
void InitializeSmiConstantRegister() {
movq(kSmiConstantRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
@@ -720,6 +725,14 @@ class MacroAssembler: public Assembler {
}
void Push(Smi* smi);
+
+ // Save away a 64-bit integer on the stack as two 32-bit integers
+ // masquerading as smis so that the garbage collector skips visiting them.
+ void PushInt64AsTwoSmis(Register src, Register scratch = kScratchRegister);
+ // Reconstruct a 64-bit integer from two 32-bit integers masquerading as
+ // smis on the top of stack.
+ void PopInt64AsTwoSmis(Register dst, Register scratch = kScratchRegister);
+
void Test(const Operand& dst, Smi* source);
@@ -774,11 +787,6 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
- // Support for constant splitting.
- bool IsUnsafeInt(const int x);
- void SafeMove(Register dst, Smi* src);
- void SafePush(Smi* src);
-
// Bit-field support.
void TestBit(const Operand& dst, int bit_index);
@@ -823,6 +831,10 @@ class MacroAssembler: public Assembler {
void Drop(int stack_elements);
void Call(Label* target) { call(target); }
+ void Push(Register src) { push(src); }
+ void Pop(Register dst) { pop(dst); }
+ void PushReturnAddressFrom(Register src) { push(src); }
+ void PopReturnAddressTo(Register dst) { pop(dst); }
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
@@ -837,7 +849,7 @@ class MacroAssembler: public Assembler {
// The size of the code generated for different call instructions.
int CallSize(Address destination, RelocInfo::Mode rmode) {
- return kCallInstructionLength;
+ return kCallSequenceLength;
}
int CallSize(ExternalReference ext);
int CallSize(Handle<Code> code_object) {
@@ -963,6 +975,20 @@ class MacroAssembler: public Assembler {
XMMRegister temp_xmm_reg,
Register result_reg);
+ void SlowTruncateToI(Register result_reg, Register input_reg,
+ int offset = HeapNumber::kValueOffset - kHeapObjectTag);
+
+ void TruncateHeapNumberToI(Register result_reg, Register input_reg);
+ void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
+
+ void DoubleToI(Register result_reg, XMMRegister input_reg,
+ XMMRegister scratch, MinusZeroMode minus_zero_mode,
+ Label* conversion_failed, Label::Distance dst = Label::kFar);
+
+ void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
+ MinusZeroMode minus_zero_mode, Label* lost_precision,
+ Label::Distance dst = Label::kFar);
+
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -1002,7 +1028,7 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
- const char* message);
+ BailoutReason reason);
// ---------------------------------------------------------------------------
// Exception handling
@@ -1238,7 +1264,7 @@ class MacroAssembler: public Assembler {
// rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
// context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
// inside the exit frame (not GCed) accessible via StackSpaceOperand.
- void PrepareCallApiFunction(int arg_stack_space, bool returns_handle);
+ void PrepareCallApiFunction(int arg_stack_space);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Clobbers r14, r15, rbx and
@@ -1248,7 +1274,6 @@ class MacroAssembler: public Assembler {
Address thunk_address,
Register thunk_last_arg,
int stack_space,
- bool returns_handle,
int return_value_offset_from_rbp);
// Before calling a C-function from generated code, align arguments on stack.
@@ -1319,15 +1344,15 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, const char* msg);
+ void Assert(Condition cc, BailoutReason reason);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg);
+ void Check(Condition cc, BailoutReason reason);
// Print a message to stdout and abort execution.
- void Abort(const char* msg);
+ void Abort(BailoutReason msg);
// Check that the stack is aligned.
void CheckStackAlignment();
diff --git a/chromium/v8/src/x64/regexp-macro-assembler-x64.cc b/chromium/v8/src/x64/regexp-macro-assembler-x64.cc
index 106ffb76da5..ca834e2771f 100644
--- a/chromium/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/chromium/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -397,7 +397,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
// Fail on partial or illegal capture (start of capture after end of capture).
// This must not happen (no back-reference can reference a capture that wasn't
// closed before in the reg-exp).
- __ Check(greater_equal, "Invalid capture referenced");
+ __ Check(greater_equal, kInvalidCaptureReferenced);
// Succeed on empty capture (including non-participating capture)
__ j(equal, &fallthrough);
@@ -761,7 +761,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// position registers.
__ movq(Operand(rbp, kInputStartMinusOne), rax);
-#ifdef WIN32
+#if V8_OS_WIN
// Ensure that we have written to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
@@ -771,7 +771,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
i += kRegistersPerPage) {
__ movq(register_location(i), rax); // One write every page.
}
-#endif // WIN32
+#endif // V8_OS_WIN
// Initialize code object pointer.
__ Move(code_object_pointer(), masm_.CodeObject());
@@ -998,7 +998,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_.GetCode(&code_desc);
- Isolate* isolate = ISOLATE;
+ Isolate* isolate = this->isolate();
Handle<Code> code = isolate->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP),
masm_.CodeObject());
@@ -1188,7 +1188,6 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
diff --git a/chromium/v8/src/x64/stub-cache-x64.cc b/chromium/v8/src/x64/stub-cache-x64.cc
index 39ff656ec45..365bd385799 100644
--- a/chromium/v8/src/x64/stub-cache-x64.cc
+++ b/chromium/v8/src/x64/stub-cache-x64.cc
@@ -29,6 +29,7 @@
#if V8_TARGET_ARCH_X64
+#include "arguments.h"
#include "ic-inl.h"
#include "codegen.h"
#include "stub-cache.h"
@@ -106,38 +107,34 @@ static void ProbeTable(Isolate* isolate,
}
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be unique and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<Name> name,
- Register r0,
- Register r1) {
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name,
+ Register scratch0,
+ Register scratch1) {
ASSERT(name->IsUniqueName());
+ ASSERT(!receiver.is(scratch0));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
- __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movq(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
// Bail out if the receiver has a named interceptor or requires access checks.
- __ testb(FieldOperand(r0, Map::kBitFieldOffset),
+ __ testb(FieldOperand(scratch0, Map::kBitFieldOffset),
Immediate(kInterceptorOrAccessCheckNeededMask));
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
__ j(below, miss_label);
// Load properties array.
- Register properties = r0;
+ Register properties = scratch0;
__ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
@@ -151,7 +148,7 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
&done,
properties,
name,
- r1);
+ scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
@@ -366,6 +363,11 @@ static void PushInterceptorArguments(MacroAssembler* masm,
Register holder,
Register name,
Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
__ push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
@@ -373,8 +375,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(kScratchRegister);
__ push(receiver);
__ push(holder);
- __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
- __ PushAddress(ExternalReference::isolate_address(masm->isolate()));
}
@@ -389,7 +389,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
- __ Set(rax, 6);
+ __ Set(rax, StubCache::kInterceptorArgsLength);
__ LoadAddress(rbx, ref);
CEntryStub stub(1);
@@ -414,8 +414,10 @@ static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
__ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
__ movq(StackOperandForReturnAddress(0), scratch);
__ Move(scratch, Smi::FromInt(0));
- for (int i = 1; i <= kFastApiCallArguments; i++) {
- __ movq(Operand(rsp, i * kPointerSize), scratch);
+ StackArgumentsAccessor args(rsp, kFastApiCallArguments,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ for (int i = 0; i < kFastApiCallArguments; i++) {
+ __ movq(args.GetArgumentOperand(i), scratch);
}
}
@@ -464,23 +466,26 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ LoadHeapObject(rdi, function);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ int api_call_argc = argc + kFastApiCallArguments;
+ StackArgumentsAccessor args(rsp, api_call_argc);
+
// Pass the additional arguments.
- __ movq(Operand(rsp, 2 * kPointerSize), rdi);
+ __ movq(args.GetArgumentOperand(api_call_argc - 1), rdi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ Move(rcx, api_call_info);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(Operand(rsp, 3 * kPointerSize), rbx);
+ __ movq(args.GetArgumentOperand(api_call_argc - 2), rbx);
} else {
- __ Move(Operand(rsp, 3 * kPointerSize), call_data);
+ __ Move(args.GetArgumentOperand(api_call_argc - 2), call_data);
}
__ movq(kScratchRegister,
ExternalReference::isolate_address(masm->isolate()));
- __ movq(Operand(rsp, 4 * kPointerSize), kScratchRegister);
+ __ movq(args.GetArgumentOperand(api_call_argc - 3), kScratchRegister);
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, 5 * kPointerSize), kScratchRegister);
- __ movq(Operand(rsp, 6 * kPointerSize), kScratchRegister);
+ __ movq(args.GetArgumentOperand(api_call_argc - 4), kScratchRegister);
+ __ movq(args.GetArgumentOperand(api_call_argc - 5), kScratchRegister);
// Prepare arguments.
STATIC_ASSERT(kFastApiCallArguments == 6);
@@ -488,16 +493,10 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// Function address is a foreign pointer outside V8's heap.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
-#if defined(__MINGW64__)
+#if defined(__MINGW64__) || defined(_WIN64)
Register arguments_arg = rcx;
Register callback_arg = rdx;
-#elif defined(_WIN64)
- // Win64 uses first register--rcx--for returned value.
- Register arguments_arg = returns_handle ? rdx : rcx;
- Register callback_arg = returns_handle ? r8 : rdx;
#else
Register arguments_arg = rdi;
Register callback_arg = rsi;
@@ -507,7 +506,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// it's not controlled by GC.
const int kApiStackSpace = 4;
- __ PrepareCallApiFunction(kApiStackSpace, returns_handle);
+ __ PrepareCallApiFunction(kApiStackSpace);
__ movq(StackSpaceOperand(0), rbx); // v8::Arguments::implicit_args_.
__ addq(rbx, Immediate(argc * kPointerSize));
@@ -519,19 +518,49 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// v8::InvocationCallback's argument.
__ lea(arguments_arg, StackSpaceOperand(0));
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeInvocationCallback)
- : FUNCTION_ADDR(&InvokeFunctionCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
__ CallApiFunctionAndReturn(function_address,
thunk_address,
callback_arg,
- argc + kFastApiCallArguments + 1,
- returns_handle,
+ api_call_argc + 1,
kFastApiCallArguments + 1);
}
+// Generate call to api function.
+static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Register receiver,
+ Register scratch,
+ int argc,
+ Register* values) {
+ ASSERT(optimization.is_simple_api_call());
+ ASSERT(!receiver.is(scratch));
+
+ const int stack_space = kFastApiCallArguments + argc + 1;
+ // Copy return value.
+ __ movq(scratch, Operand(rsp, 0));
+ // Assign stack space for the call arguments.
+ __ subq(rsp, Immediate(stack_space * kPointerSize));
+ // Move the return address on top of the stack.
+ __ movq(Operand(rsp, 0), scratch);
+ // Write holder to stack frame.
+ __ movq(Operand(rsp, 1 * kPointerSize), receiver);
+ // Write receiver to stack frame.
+ int index = stack_space;
+ __ movq(Operand(rsp, index-- * kPointerSize), receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ ASSERT(!receiver.is(values[i]));
+ ASSERT(!scratch.is(values[i]));
+ __ movq(Operand(rsp, index-- * kPointerSize), values[i]);
+ }
+
+ GenerateFastApiCall(masm, optimization, argc);
+}
+
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -690,7 +719,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ StubCache::kInterceptorArgsLength);
// Restore the name_ register.
__ pop(name_);
@@ -740,16 +769,13 @@ void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
}
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
+void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
Handle<PropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
+ JSGlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
__ Move(scratch, cell);
__ Cmp(FieldOperand(scratch, Cell::kValueOffset),
@@ -766,7 +792,7 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
Label* miss) {
if (holder->IsJSGlobalObject()) {
GenerateCheckPropertyCell(
- masm, Handle<GlobalObject>::cast(holder), name, scratch1(), miss);
+ masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
} else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
GenerateDictionaryNegativeLookup(
masm, miss, holder_reg, name, scratch1(), scratch2());
@@ -830,11 +856,11 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
- __ pop(scratch1); // Return address.
+ __ PopReturnAddressTo(scratch1);
__ push(receiver_reg);
__ Push(transition);
__ push(value_reg);
- __ push(scratch1);
+ __ PushReturnAddressFrom(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
masm->isolate()),
@@ -1017,19 +1043,17 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
+void StubCompiler::GenerateCheckPropertyCells(MacroAssembler* masm,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
Handle<JSObject> current = object;
while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
+ if (current->IsJSGlobalObject()) {
GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
+ Handle<JSGlobalObject>::cast(current),
name,
scratch,
miss);
@@ -1075,7 +1099,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int depth = 0;
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), object_reg);
+ __ movq(Operand(rsp, kPCOnStackSize), object_reg);
}
// Check the maps in the prototype chain.
@@ -1135,7 +1159,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), reg);
+ __ movq(Operand(rsp, kPCOnStackSize), reg);
}
// Go to the next object in the prototype chain.
@@ -1195,7 +1219,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> holder,
Handle<Name> name,
Label* success,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<Object> callback) {
Label miss;
Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
@@ -1240,26 +1264,6 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
}
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<Name> name,
- Label* success,
- Handle<GlobalObject> global) {
- Label miss;
-
- HandlerFrontendHeader(object, receiver(), last, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (!global.is_null()) {
- GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
- }
-
- HandlerFrontendFooter(name, success, &miss);
-}
-
-
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
PropertyIndex field,
@@ -1280,42 +1284,50 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
void BaseLoadStubCompiler::GenerateLoadCallback(
+ const CallOptimization& call_optimization) {
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 0, NULL);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch4().is(reg));
- __ pop(scratch4()); // Get return address to place it below.
-
+ __ PopReturnAddressTo(scratch4());
+
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
__ push(receiver()); // receiver
- __ push(reg); // holder
if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch1(), callback);
- __ push(FieldOperand(scratch1(),
+ ASSERT(!scratch2().is(reg));
+ __ Move(scratch2(), callback);
+ __ push(FieldOperand(scratch2(),
ExecutableAccessorInfo::kDataOffset)); // data
} else {
__ Push(Handle<Object>(callback->data(), isolate()));
}
+ ASSERT(!kScratchRegister.is(reg));
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ push(kScratchRegister); // return value
__ push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(isolate()));
+ __ push(reg); // holder
__ push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const ExecutableAccessorInfo& to the C++ callback.
Address getter_address = v8::ToCData<Address>(callback->getter());
- bool returns_handle =
- !CallbackTable::ReturnsVoid(isolate(), getter_address);
-#if defined(__MINGW64__)
+#if defined(__MINGW64__) || defined(_WIN64)
Register getter_arg = r8;
Register accessor_info_arg = rdx;
Register name_arg = rcx;
-#elif defined(_WIN64)
- // Win64 uses first register--rcx--for returned value.
- Register getter_arg = returns_handle ? r9 : r8;
- Register accessor_info_arg = returns_handle ? r8 : rdx;
- Register name_arg = returns_handle ? rdx : rcx;
#else
Register getter_arg = rdx;
Register accessor_info_arg = rsi;
@@ -1324,7 +1336,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
ASSERT(!name_arg.is(scratch4()));
__ movq(name_arg, rsp);
- __ push(scratch4()); // Restore return address.
+ __ PushReturnAddressFrom(scratch4());
// v8::Arguments::values_ and handler for name.
const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -1332,7 +1344,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// Allocate v8::AccessorInfo in non-GCed stack space.
const int kArgStackSpace = 1;
- __ PrepareCallApiFunction(kArgStackSpace, returns_handle);
+ __ PrepareCallApiFunction(kArgStackSpace);
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
__ lea(rax, Operand(name_arg, 6 * kPointerSize));
@@ -1343,16 +1355,13 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
// could be used to pass arguments.
__ lea(accessor_info_arg, StackSpaceOperand(0));
- Address thunk_address = returns_handle
- ? FUNCTION_ADDR(&InvokeAccessorGetter)
- : FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
__ CallApiFunctionAndReturn(getter_address,
thunk_address,
getter_arg,
kStackSpace,
- returns_handle,
- 5);
+ 6);
}
@@ -1444,14 +1453,14 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
- __ pop(scratch2()); // save old return address
+ __ PopReturnAddressTo(scratch2());
PushInterceptorArguments(masm(), receiver(), holder_reg,
this->name(), interceptor_holder);
- __ push(scratch2()); // restore old return address
+ __ PushReturnAddressFrom(scratch2());
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
}
}
@@ -1470,11 +1479,8 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
Label* miss) {
ASSERT(holder->IsGlobalObject());
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the maps haven't changed.
@@ -1538,9 +1544,8 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1561,7 +1566,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
// Invoke the function.
@@ -1591,11 +1596,11 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
// Check that function is still array
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1647,9 +1652,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label miss;
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1688,7 +1693,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ j(greater, &attempt_to_grow_elements);
// Check if value is a smi.
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ __ movq(rcx, args.GetArgumentOperand(1));
__ JumpIfNotSmi(rcx, &with_write_barrier);
// Save new length.
@@ -1723,7 +1728,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ cmpl(rax, rcx);
__ j(greater, &call_builtin);
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ __ movq(rcx, args.GetArgumentOperand(1));
__ StoreNumberToDoubleElements(
rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
@@ -1800,7 +1805,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&call_builtin);
}
- __ movq(rbx, Operand(rsp, argc * kPointerSize));
+ __ movq(rbx, args.GetArgumentOperand(1));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
@@ -1849,7 +1854,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
// Restore receiver to rdx as finish sequence assumes it's here.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
// Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
@@ -1898,9 +1903,9 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
Label miss, return_undefined, call_builtin;
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss);
@@ -1978,6 +1983,7 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
Label miss;
Label name_miss;
@@ -2003,9 +2009,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
Register receiver = rbx;
Register index = rdi;
Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(receiver, args.GetReceiverOperand());
if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ __ movq(index, args.GetArgumentOperand(1));
} else {
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
@@ -2059,6 +2065,8 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
+
Label miss;
Label name_miss;
Label index_out_of_range;
@@ -2084,9 +2092,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
Register index = rdi;
Register scratch = rdx;
Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(receiver, args.GetReceiverOperand());
if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ __ movq(index, args.GetArgumentOperand(1));
} else {
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
@@ -2139,13 +2147,14 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(argc - 1));
__ JumpIfSmi(rdx, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
name, &miss);
@@ -2158,7 +2167,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// Load the char code argument.
Register code = rbx;
- __ movq(code, Operand(rsp, 1 * kPointerSize));
+ __ movq(code, args.GetArgumentOperand(argc));
// Check the code is a smi.
Label slow;
@@ -2200,8 +2209,123 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
Handle<JSFunction> function,
Handle<String> name,
Code::StubType type) {
- // TODO(872): implement this.
- return Handle<Code>::null();
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- rsp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) {
+ return Handle<Code>::null();
+ }
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell.is_null()) {
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rdx, &miss);
+
+ CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
+ name, &miss);
+ } else {
+ ASSERT(cell->value() == *function);
+ GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
+ &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into rax.
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(rax, &smi);
+
+ // Check if the argument is a heap number and load its value into xmm0.
+ Label slow;
+ __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
+ __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+
+ // Check if the argument is strictly positive. Note this also discards NaN.
+ __ xorpd(xmm1, xmm1);
+ __ ucomisd(xmm0, xmm1);
+ __ j(below_equal, &slow);
+
+ // Do a truncating conversion.
+ __ cvttsd2si(rax, xmm0);
+
+ // Checks for 0x80000000 which signals a failed conversion.
+ Label conversion_failure;
+ __ cmpl(rax, Immediate(0x80000000));
+ __ j(equal, &conversion_failure);
+
+ // Smi tag and return.
+ __ Integer32ToSmi(rax, rax);
+ __ bind(&smi);
+ __ ret(2 * kPointerSize);
+
+ // Check if the argument is < 2^kMantissaBits.
+ Label already_round;
+ __ bind(&conversion_failure);
+ int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
+ __ movq(rbx, kTwoMantissaBits, RelocInfo::NONE64);
+ __ movq(xmm1, rbx);
+ __ ucomisd(xmm0, xmm1);
+ __ j(above_equal, &already_round);
+
+ // Save a copy of the argument.
+ __ movaps(xmm2, xmm0);
+
+ // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
+ __ addsd(xmm0, xmm1);
+ __ subsd(xmm0, xmm1);
+
+ // Compare the argument and the tentative result to get the right mask:
+ // if xmm2 < xmm0:
+ // xmm2 = 1...1
+ // else:
+ // xmm2 = 0...0
+ __ cmpltsd(xmm2, xmm0);
+
+ // Subtract 1 if the argument was less than the tentative result.
+ int64_t kOne = V8_INT64_C(0x3ff0000000000000);
+ __ movq(rbx, kOne, RelocInfo::NONE64);
+ __ movq(xmm1, rbx);
+ __ andpd(xmm1, xmm2);
+ __ subsd(xmm0, xmm1);
+
+ // Return a new heap number.
+ __ AllocateHeapNumber(rax, rbx, &slow);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ __ ret(2 * kPointerSize);
+
+ // Return the argument (when it's an already round heap number).
+ __ bind(&already_round);
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ ret(2 * kPointerSize);
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ ParameterCount expected(function);
+ __ InvokeFunction(function, expected, arguments(),
+ JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+
+ __ bind(&miss);
+ // rcx: function name.
+ GenerateMissBranch();
+
+ // Return the generated code.
+ return GetCode(type, name);
}
@@ -2223,13 +2347,14 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, argc);
if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
Label miss;
GenerateNameCheck(name, &miss);
if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rdx, args.GetArgumentOperand(argc - 1));
__ JumpIfSmi(rdx, &miss);
CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
name, &miss);
@@ -2240,32 +2365,31 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into rax.
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rax, args.GetArgumentOperand(argc));
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(rax, &not_smi);
- __ SmiToInteger32(rax, rax);
+ // Branchless abs implementation, refer to below:
+ // http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
- __ movl(rbx, rax);
- __ sarl(rbx, Immediate(kBitsPerInt - 1));
+ __ movq(rbx, rax);
+ __ sar(rbx, Immediate(kBitsPerPointer - 1));
// Do bitwise not or do nothing depending on ebx.
- __ xorl(rax, rbx);
+ __ xor_(rax, rbx);
// Add 1 or do nothing depending on ebx.
- __ subl(rax, rbx);
+ __ subq(rax, rbx);
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
Label slow;
__ j(negative, &slow);
- // Smi case done.
- __ Integer32ToSmi(rax, rax);
__ ret(2 * kPointerSize);
// Check if the argument is a heap number and load its value.
@@ -2331,9 +2455,9 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
Label miss, miss_before_stack_reserved;
GenerateNameCheck(name, &miss_before_stack_reserved);
- // Get the receiver from the stack.
const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, argc);
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss_before_stack_reserved);
@@ -2385,9 +2509,8 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
Label miss;
GenerateNameCheck(name, &miss);
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
@@ -2411,7 +2534,7 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
break;
@@ -2531,21 +2654,20 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
Label miss;
GenerateNameCheck(name, &miss);
- // Get the number of arguments.
- const int argc = arguments().immediate();
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
// Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ StackArgumentsAccessor args(rsp, arguments());
+ __ movq(rdx, args.GetReceiverOperand());
CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
&miss);
// Restore receiver.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, args.GetReceiverOperand());
// Check that the function really is a function.
__ JumpIfSmi(rax, &miss);
@@ -2556,7 +2678,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
// necessary.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
// Invoke the function.
@@ -2603,15 +2725,14 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Label miss;
GenerateNameCheck(name, &miss);
- // Get the number of arguments.
- const int argc = arguments().immediate();
+ StackArgumentsAccessor args(rsp, arguments());
GenerateGlobalReceiverCheck(object, holder, name, &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
// Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) {
__ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ movq(args.GetReceiverOperand(), rdx);
}
// Set up the context (function already in rdi).
@@ -2650,12 +2771,12 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
HandlerFrontend(object, receiver(), holder, name, &success);
__ bind(&success);
- __ pop(scratch1()); // remove the return address
+ __ PopReturnAddressTo(scratch1());
__ push(receiver());
__ Push(callback); // callback info
__ Push(name);
__ push(value());
- __ push(scratch1()); // restore return address
+ __ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
@@ -2667,6 +2788,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
+Handle<Code> StoreStubCompiler::CompileStoreCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+
+ Register values[] = { value() };
+ GenerateFastApiCall(
+ masm(), call_optimization, receiver(), scratch3(), 1, values);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::CALLBACKS, name);
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -2717,12 +2856,12 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- __ pop(scratch1()); // remove the return address
+ __ PopReturnAddressTo(scratch1());
__ push(receiver());
__ push(this->name());
__ push(value());
__ Push(Smi::FromInt(strict_mode()));
- __ push(scratch1()); // restore return address
+ __ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
@@ -2734,48 +2873,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<PropertyCell> cell,
- Handle<Name> name) {
- Label miss;
-
- // Check that the map of the global has not changed.
- __ Cmp(FieldOperand(receiver(), HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, &miss);
-
- // Compute the cell operand to use.
- __ Move(scratch1(), cell);
- Operand cell_operand =
- FieldOperand(scratch1(), PropertyCell::kValueOffset);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ movq(cell_operand, value());
- // Cells are always rescanned, so no write barrier here.
-
- // Return the value (register rax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetICCode(kind(), Code::NORMAL, name);
-}
-
-
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -2815,7 +2912,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
- Handle<GlobalObject> global) {
+ Handle<JSGlobalObject> global) {
Label success;
NonexistentHandlerFrontend(object, last, name, &success, global);
@@ -2938,7 +3035,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ j(equal, &miss);
} else if (FLAG_debug_code) {
__ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ Check(not_equal, "DontDelete cells can't contain the hole");
+ __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
}
HandlerFrontendFooter(name, &success, &miss);
@@ -3038,484 +3135,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch,
- XMMRegister xmm_scratch0,
- XMMRegister xmm_scratch1,
- Label* fail) {
- // Check that key is a smi or a heap number containing a smi and branch
- // if the check fails.
- Label key_ok;
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- masm->isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
- __ movsd(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
- __ cvttsd2si(scratch, xmm_scratch0);
- __ cvtlsi2sd(xmm_scratch1, scratch);
- __ ucomisd(xmm_scratch1, xmm_scratch0);
- __ j(not_equal, fail);
- __ j(parity_even, fail); // NaN.
- __ Integer32ToSmi(key, scratch);
- __ bind(&key_ok);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rdi, rcx); // Untag the index.
- __ cmpq(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- Label check_heap_number;
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Float to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(rax, &slow);
- } else {
- __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
- }
- // No more branches to slow case on this path. Key and receiver not needed.
- __ SmiToInteger32(rdx, rax);
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- { // Clamp the value to [0..255].
- Label done;
- __ testl(rdx, Immediate(0xFFFFFF00));
- __ j(zero, &done, Label::kNear);
- __ setcc(negative, rdx); // 1 if negative, 0 if positive.
- __ decb(rdx); // 0 if negative, 255 if positive.
- __ bind(&done);
- }
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ cvtlsi2ss(xmm0, rdx);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ cvtlsi2sd(xmm0, rdx);
- __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- __ ret(0);
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- __ bind(&check_heap_number);
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
- __ j(not_equal, &slow);
- // No more branches to slow case on this path.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rdi: untagged index
- // rbx: base pointer of external storage
- // top of FPU stack: value
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvtsd2ss(xmm0, xmm0);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- __ ret(0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
- // Fast path: use machine instruction to convert to int64. If that
- // fails (out-of-range), go into the runtime.
- __ cvttsd2siq(r8, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpq(r8, kScratchRegister);
- __ j(equal, &slow);
-
- // rdx: value (converted to an untagged integer)
- // rdi: untagged index
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(Operand(rbx, rdi, times_1, 0), r8);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(Operand(rbx, rdi, times_2, 0), r8);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(Operand(rbx, rdi, times_4, 0), r8);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- __ ret(0);
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
-
- // Miss case: call runtime.
- __ bind(&miss_force_generic);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, finish_store, grow;
- Label check_capacity, slow;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(rax, &transition_elements_kind);
- }
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check that the key is within bounds.
- if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- if (IsGrowStoreMode(store_mode)) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
- }
-
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &miss_force_generic);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- __ SmiToInteger32(rcx, rcx);
- __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- } else {
- // Do the store and update the write barrier.
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ SmiToInteger32(rcx, rcx);
- __ lea(rcx,
- FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
- // Make sure to preserve the value in register rax.
- __ movq(rbx, rax);
- __ RecordWrite(rdi, rcx, rbx, kDontSaveFPRegs);
- }
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ Move(FieldOperand(rdi, JSObject::kMapOffset),
- masm->isolate()->factory()->fixed_array_map());
- __ Move(FieldOperand(rdi, FixedArray::kLengthOffset),
- Smi::FromInt(JSArray::kPreallocatedArrayElements));
- __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ movq(FieldOperand(rdi, FixedArray::SizeFor(i)), rbx);
- }
-
- // Store the element at index zero.
- __ movq(FieldOperand(rdi, FixedArray::SizeFor(0)), rax);
-
- // Install the new backing store in the JSArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
- __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
- __ ret(0);
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub.
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &miss_force_generic);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmpq(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
- Smi::FromInt(1));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessStoreMode store_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, finish_store;
- Label grow, slow, check_capacity, restore_key_transition_elements_kind;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rdi);
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- if (IsGrowStoreMode(store_mode)) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
- }
-
- // Handle smi values specially
- __ bind(&finish_store);
- __ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &restore_key_transition_elements_kind);
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
-
- __ bind(&restore_key_transition_elements_kind);
- // Restore smi-tagging of rcx.
- __ Integer32ToSmi(rcx, rcx);
- __ bind(&transition_elements_kind);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
-
- if (is_js_array && IsGrowStoreMode(store_mode)) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(rax, &value_is_smi);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ Allocate(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
- __ Move(FieldOperand(rdi, JSObject::kMapOffset),
- masm->isolate()->factory()->fixed_double_array_map());
- __ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
- Smi::FromInt(JSArray::kPreallocatedArrayElements));
-
- // Increment the length of the array.
- __ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &restore_key_transition_elements_kind);
-
- __ movq(r8, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- __ movq(FieldOperand(rdi, FixedDoubleArray::OffsetOfElementAt(i)), r8);
- }
-
- // Install the new backing store in the JSArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
- __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ ret(0);
-
- __ bind(&check_capacity);
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmpq(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
- Smi::FromInt(1));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
- }
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/chromium/v8/src/zone-inl.h b/chromium/v8/src/zone-inl.h
index 49e7626f74e..f257382a2db 100644
--- a/chromium/v8/src/zone-inl.h
+++ b/chromium/v8/src/zone-inl.h
@@ -109,6 +109,12 @@ void* ZoneList<T>::operator new(size_t size, Zone* zone) {
}
+template <typename T>
+void* ZoneSplayTree<T>::operator new(size_t size, Zone* zone) {
+ return zone->New(static_cast<int>(size));
+}
+
+
} } // namespace v8::internal
#endif // V8_ZONE_INL_H_
diff --git a/chromium/v8/src/zone.h b/chromium/v8/src/zone.h
index a12ed793123..bd7cc39b0c4 100644
--- a/chromium/v8/src/zone.h
+++ b/chromium/v8/src/zone.h
@@ -177,6 +177,7 @@ struct ZoneAllocationPolicy {
explicit ZoneAllocationPolicy(Zone* zone) : zone_(zone) { }
INLINE(void* New(size_t size));
INLINE(static void Delete(void *pointer)) { }
+ Zone* zone() { return zone_; }
private:
Zone* zone_;
@@ -201,7 +202,7 @@ class ZoneList: public List<T, ZoneAllocationPolicy> {
ZoneList(const ZoneList<T>& other, Zone* zone)
: List<T, ZoneAllocationPolicy>(other.length(),
ZoneAllocationPolicy(zone)) {
- AddAll(other, ZoneAllocationPolicy(zone));
+ AddAll(other, zone);
}
// We add some convenience wrappers so that we can pass in a Zone
@@ -209,8 +210,7 @@ class ZoneList: public List<T, ZoneAllocationPolicy> {
INLINE(void Add(const T& element, Zone* zone)) {
List<T, ZoneAllocationPolicy>::Add(element, ZoneAllocationPolicy(zone));
}
- INLINE(void AddAll(const List<T, ZoneAllocationPolicy>& other,
- Zone* zone)) {
+ INLINE(void AddAll(const List<T, ZoneAllocationPolicy>& other, Zone* zone)) {
List<T, ZoneAllocationPolicy>::AddAll(other, ZoneAllocationPolicy(zone));
}
INLINE(void AddAll(const Vector<T>& other, Zone* zone)) {
@@ -246,6 +246,11 @@ class ZoneSplayTree: public SplayTree<Config, ZoneAllocationPolicy> {
explicit ZoneSplayTree(Zone* zone)
: SplayTree<Config, ZoneAllocationPolicy>(ZoneAllocationPolicy(zone)) {}
~ZoneSplayTree();
+
+ INLINE(void* operator new(size_t size, Zone* zone));
+
+ void operator delete(void* pointer) { UNREACHABLE(); }
+ void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
};
diff --git a/chromium/v8/test/cctest/cctest.gyp b/chromium/v8/test/cctest/cctest.gyp
index 9df5c7bccc3..ee7ffad6d35 100644
--- a/chromium/v8/test/cctest/cctest.gyp
+++ b/chromium/v8/test/cctest/cctest.gyp
@@ -55,7 +55,9 @@
'test-bignum-dtoa.cc',
'test-circular-queue.cc',
'test-compiler.cc',
+ 'test-condition-variable.cc',
'test-conversions.cc',
+ 'test-cpu.cc',
'test-cpu-profiler.cc',
'test-dataflow.cc',
'test-date.cc',
@@ -79,28 +81,32 @@
'test-heap-profiler.cc',
'test-list.cc',
'test-liveedit.cc',
- 'test-lock.cc',
'test-lockers.cc',
'test-log.cc',
'test-mark-compact.cc',
+ 'test-mutex.cc',
'test-object-observe.cc',
'test-parsing.cc',
'test-platform.cc',
'test-platform-tls.cc',
'test-profile-generator.cc',
'test-random.cc',
+ 'test-random-number-generator.cc',
'test-regexp.cc',
'test-reloc-info.cc',
+ 'test-semaphore.cc',
'test-serialize.cc',
- 'test-sockets.cc',
+ 'test-socket.cc',
'test-spaces.cc',
'test-strings.cc',
'test-symbols.cc',
'test-strtod.cc',
'test-thread-termination.cc',
'test-threads.cc',
+ 'test-time.cc',
'test-types.cc',
'test-unbound-queue.cc',
+ 'test-unique.cc',
'test-utils.cc',
'test-version.cc',
'test-weakmaps.cc',
@@ -113,6 +119,7 @@
'test-assembler-ia32.cc',
'test-code-stubs.cc',
'test-code-stubs-ia32.cc',
+ 'test-cpu-ia32.cc',
'test-disasm-ia32.cc',
'test-log-stack-tracer.cc'
],
@@ -122,6 +129,7 @@
'test-assembler-x64.cc',
'test-code-stubs.cc',
'test-code-stubs-x64.cc',
+ 'test-cpu-x64.cc',
'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc'
],
@@ -129,6 +137,8 @@
['v8_target_arch=="arm"', {
'sources': [
'test-assembler-arm.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-arm.cc',
'test-disasm-arm.cc'
],
}],
diff --git a/chromium/v8/tools/SourceMap.js b/chromium/v8/tools/SourceMap.js
new file mode 100644
index 00000000000..9cbd37a7355
--- /dev/null
+++ b/chromium/v8/tools/SourceMap.js
@@ -0,0 +1,371 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This is a copy from blink dev tools, see:
+// http://src.chromium.org/viewvc/blink/trunk/Source/devtools/front_end/SourceMap.js
+// revision: 153407
+
+// Added to make the file work without dev tools
+WebInspector = {};
+WebInspector.ParsedURL = {};
+WebInspector.ParsedURL.completeURL = function(){};
+// start of original file content
+
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Implements Source Map V3 model. See http://code.google.com/p/closure-compiler/wiki/SourceMaps
+ * for format description.
+ * @constructor
+ * @param {string} sourceMappingURL
+ * @param {SourceMapV3} payload
+ */
+WebInspector.SourceMap = function(sourceMappingURL, payload)
+{
+ if (!WebInspector.SourceMap.prototype._base64Map) {
+ const base64Digits = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ WebInspector.SourceMap.prototype._base64Map = {};
+ for (var i = 0; i < base64Digits.length; ++i)
+ WebInspector.SourceMap.prototype._base64Map[base64Digits.charAt(i)] = i;
+ }
+
+ this._sourceMappingURL = sourceMappingURL;
+ this._reverseMappingsBySourceURL = {};
+ this._mappings = [];
+ this._sources = {};
+ this._sourceContentByURL = {};
+ this._parseMappingPayload(payload);
+}
+
+/**
+ * @param {string} sourceMapURL
+ * @param {string} compiledURL
+ * @param {function(WebInspector.SourceMap)} callback
+ */
+WebInspector.SourceMap.load = function(sourceMapURL, compiledURL, callback)
+{
+ NetworkAgent.loadResourceForFrontend(WebInspector.resourceTreeModel.mainFrame.id, sourceMapURL, undefined, contentLoaded.bind(this));
+
+ /**
+ * @param {?Protocol.Error} error
+ * @param {number} statusCode
+ * @param {NetworkAgent.Headers} headers
+ * @param {string} content
+ */
+ function contentLoaded(error, statusCode, headers, content)
+ {
+ if (error || !content || statusCode >= 400) {
+ console.error("Could not load content for " + sourceMapURL + " : " + (error || ("HTTP status code: " + statusCode)));
+ callback(null);
+ return;
+ }
+
+ if (content.slice(0, 3) === ")]}")
+ content = content.substring(content.indexOf('\n'));
+ try {
+ var payload = /** @type {SourceMapV3} */ (JSON.parse(content));
+ var baseURL = sourceMapURL.startsWith("data:") ? compiledURL : sourceMapURL;
+ callback(new WebInspector.SourceMap(baseURL, payload));
+ } catch(e) {
+ console.error(e.message);
+ callback(null);
+ }
+ }
+}
+
+WebInspector.SourceMap.prototype = {
+ /**
+ * @return {Array.<string>}
+ */
+ sources: function()
+ {
+ return Object.keys(this._sources);
+ },
+
+ /**
+ * @param {string} sourceURL
+ * @return {string|undefined}
+ */
+ sourceContent: function(sourceURL)
+ {
+ return this._sourceContentByURL[sourceURL];
+ },
+
+ /**
+ * @param {string} sourceURL
+ * @param {WebInspector.ResourceType} contentType
+ * @return {WebInspector.ContentProvider}
+ */
+ sourceContentProvider: function(sourceURL, contentType)
+ {
+ var lastIndexOfDot = sourceURL.lastIndexOf(".");
+ var extension = lastIndexOfDot !== -1 ? sourceURL.substr(lastIndexOfDot + 1) : "";
+ var mimeType = WebInspector.ResourceType.mimeTypesForExtensions[extension.toLowerCase()];
+ var sourceContent = this.sourceContent(sourceURL);
+ if (sourceContent)
+ return new WebInspector.StaticContentProvider(contentType, sourceContent, mimeType);
+ return new WebInspector.CompilerSourceMappingContentProvider(sourceURL, contentType, mimeType);
+ },
+
+ /**
+ * @param {SourceMapV3} mappingPayload
+ */
+ _parseMappingPayload: function(mappingPayload)
+ {
+ if (mappingPayload.sections)
+ this._parseSections(mappingPayload.sections);
+ else
+ this._parseMap(mappingPayload, 0, 0);
+ },
+
+ /**
+ * @param {Array.<SourceMapV3.Section>} sections
+ */
+ _parseSections: function(sections)
+ {
+ for (var i = 0; i < sections.length; ++i) {
+ var section = sections[i];
+ this._parseMap(section.map, section.offset.line, section.offset.column);
+ }
+ },
+
+ /**
+ * @param {number} lineNumber in compiled resource
+ * @param {number} columnNumber in compiled resource
+ * @return {?Array}
+ */
+ findEntry: function(lineNumber, columnNumber)
+ {
+ var first = 0;
+ var count = this._mappings.length;
+ while (count > 1) {
+ var step = count >> 1;
+ var middle = first + step;
+ var mapping = this._mappings[middle];
+ if (lineNumber < mapping[0] || (lineNumber === mapping[0] && columnNumber < mapping[1]))
+ count = step;
+ else {
+ first = middle;
+ count -= step;
+ }
+ }
+ var entry = this._mappings[first];
+ if (!first && entry && (lineNumber < entry[0] || (lineNumber === entry[0] && columnNumber < entry[1])))
+ return null;
+ return entry;
+ },
+
+ /**
+ * @param {string} sourceURL of the originating resource
+ * @param {number} lineNumber in the originating resource
+ * @return {Array}
+ */
+ findEntryReversed: function(sourceURL, lineNumber)
+ {
+ var mappings = this._reverseMappingsBySourceURL[sourceURL];
+ for ( ; lineNumber < mappings.length; ++lineNumber) {
+ var mapping = mappings[lineNumber];
+ if (mapping)
+ return mapping;
+ }
+ return this._mappings[0];
+ },
+
+ /**
+ * @override
+ */
+ _parseMap: function(map, lineNumber, columnNumber)
+ {
+ var sourceIndex = 0;
+ var sourceLineNumber = 0;
+ var sourceColumnNumber = 0;
+ var nameIndex = 0;
+
+ var sources = [];
+ var originalToCanonicalURLMap = {};
+ for (var i = 0; i < map.sources.length; ++i) {
+ var originalSourceURL = map.sources[i];
+ var sourceRoot = map.sourceRoot || "";
+ if (sourceRoot && !sourceRoot.endsWith("/"))
+ sourceRoot += "/";
+ var href = sourceRoot + originalSourceURL;
+ var url = WebInspector.ParsedURL.completeURL(this._sourceMappingURL, href) || href;
+ originalToCanonicalURLMap[originalSourceURL] = url;
+ sources.push(url);
+ this._sources[url] = true;
+
+ if (map.sourcesContent && map.sourcesContent[i])
+ this._sourceContentByURL[url] = map.sourcesContent[i];
+ }
+
+ var stringCharIterator = new WebInspector.SourceMap.StringCharIterator(map.mappings);
+ var sourceURL = sources[sourceIndex];
+
+ while (true) {
+ if (stringCharIterator.peek() === ",")
+ stringCharIterator.next();
+ else {
+ while (stringCharIterator.peek() === ";") {
+ lineNumber += 1;
+ columnNumber = 0;
+ stringCharIterator.next();
+ }
+ if (!stringCharIterator.hasNext())
+ break;
+ }
+
+ columnNumber += this._decodeVLQ(stringCharIterator);
+ if (this._isSeparator(stringCharIterator.peek())) {
+ this._mappings.push([lineNumber, columnNumber]);
+ continue;
+ }
+
+ var sourceIndexDelta = this._decodeVLQ(stringCharIterator);
+ if (sourceIndexDelta) {
+ sourceIndex += sourceIndexDelta;
+ sourceURL = sources[sourceIndex];
+ }
+ sourceLineNumber += this._decodeVLQ(stringCharIterator);
+ sourceColumnNumber += this._decodeVLQ(stringCharIterator);
+ if (!this._isSeparator(stringCharIterator.peek()))
+ nameIndex += this._decodeVLQ(stringCharIterator);
+
+ this._mappings.push([lineNumber, columnNumber, sourceURL, sourceLineNumber, sourceColumnNumber]);
+ }
+
+ for (var i = 0; i < this._mappings.length; ++i) {
+ var mapping = this._mappings[i];
+ var url = mapping[2];
+ if (!url)
+ continue;
+ if (!this._reverseMappingsBySourceURL[url])
+ this._reverseMappingsBySourceURL[url] = [];
+ var reverseMappings = this._reverseMappingsBySourceURL[url];
+ var sourceLine = mapping[3];
+ if (!reverseMappings[sourceLine])
+ reverseMappings[sourceLine] = [mapping[0], mapping[1]];
+ }
+ },
+
+ /**
+ * @param {string} char
+ * @return {boolean}
+ */
+ _isSeparator: function(char)
+ {
+ return char === "," || char === ";";
+ },
+
+ /**
+ * @param {WebInspector.SourceMap.StringCharIterator} stringCharIterator
+ * @return {number}
+ */
+ _decodeVLQ: function(stringCharIterator)
+ {
+ // Read unsigned value.
+ var result = 0;
+ var shift = 0;
+ do {
+ var digit = this._base64Map[stringCharIterator.next()];
+ result += (digit & this._VLQ_BASE_MASK) << shift;
+ shift += this._VLQ_BASE_SHIFT;
+ } while (digit & this._VLQ_CONTINUATION_MASK);
+
+ // Fix the sign.
+ var negative = result & 1;
+ result >>= 1;
+ return negative ? -result : result;
+ },
+
+ _VLQ_BASE_SHIFT: 5,
+ _VLQ_BASE_MASK: (1 << 5) - 1,
+ _VLQ_CONTINUATION_MASK: 1 << 5
+}
+
+/**
+ * @constructor
+ * @param {string} string
+ */
+WebInspector.SourceMap.StringCharIterator = function(string)
+{
+ this._string = string;
+ this._position = 0;
+}
+
+WebInspector.SourceMap.StringCharIterator.prototype = {
+ /**
+ * @return {string}
+ */
+ next: function()
+ {
+ return this._string.charAt(this._position++);
+ },
+
+ /**
+ * @return {string}
+ */
+ peek: function()
+ {
+ return this._string.charAt(this._position);
+ },
+
+ /**
+ * @return {boolean}
+ */
+ hasNext: function()
+ {
+ return this._position < this._string.length;
+ }
+}
diff --git a/chromium/v8/tools/gcmole/Makefile b/chromium/v8/tools/gcmole/Makefile
index 23c029c2dc3..764245caf61 100644
--- a/chromium/v8/tools/gcmole/Makefile
+++ b/chromium/v8/tools/gcmole/Makefile
@@ -40,4 +40,4 @@ libgcmole.so: gcmole.cc
-shared -o libgcmole.so gcmole.cc
clean:
- rm libgcmole.so
+ rm -f libgcmole.so
diff --git a/chromium/v8/tools/gcmole/bootstrap.sh b/chromium/v8/tools/gcmole/bootstrap.sh
new file mode 100755
index 00000000000..baa0b1f5f54
--- /dev/null
+++ b/chromium/v8/tools/gcmole/bootstrap.sh
@@ -0,0 +1,126 @@
+#!/usr/bin/env bash
+
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script will build libgcmole.so.
+
+CLANG_RELEASE=2.9
+
+THIS_DIR="$(dirname "${0}")"
+LLVM_DIR="${THIS_DIR}/../../third_party/llvm"
+CLANG_DIR="${LLVM_DIR}/tools/clang"
+
+LLVM_REPO_URL=${LLVM_URL:-https://llvm.org/svn/llvm-project}
+
+# Die if any command dies.
+set -e
+
+OS="$(uname -s)"
+
+# Xcode and clang don't get along when predictive compilation is enabled.
+# http://crbug.com/96315
+if [[ "${OS}" = "Darwin" ]] && xcodebuild -version | grep -q 'Xcode 3.2' ; then
+ XCONF=com.apple.Xcode
+ if [[ "${GYP_GENERATORS}" != "make" ]] && \
+ [ "$(defaults read "${XCONF}" EnablePredictiveCompilation)" != "0" ]; then
+ echo
+ echo " HEARKEN!"
+ echo "You're using Xcode3 and you have 'Predictive Compilation' enabled."
+ echo "This does not work well with clang (http://crbug.com/96315)."
+ echo "Disable it in Preferences->Building (lower right), or run"
+ echo " defaults write ${XCONF} EnablePredictiveCompilation -boolean NO"
+ echo "while Xcode is not running."
+ echo
+ fi
+
+ SUB_VERSION=$(xcodebuild -version | sed -Ene 's/Xcode 3\.2\.([0-9]+)/\1/p')
+ if [[ "${SUB_VERSION}" < 6 ]]; then
+ echo
+ echo " YOUR LD IS BUGGY!"
+ echo "Please upgrade Xcode to at least 3.2.6."
+ echo
+ fi
+fi
+
+echo Getting LLVM r"${CLANG_RELEASE}" in "${LLVM_DIR}"
+if ! svn co --force \
+ "${LLVM_REPO_URL}/llvm/branches/release_${CLANG_RELEASE/./}" \
+ "${LLVM_DIR}"; then
+ echo Checkout failed, retrying
+ rm -rf "${LLVM_DIR}"
+ svn co --force \
+ "${LLVM_REPO_URL}/llvm/branches/release_${CLANG_RELEASE/./}" \
+ "${LLVM_DIR}"
+fi
+
+echo Getting clang r"${CLANG_RELEASE}" in "${CLANG_DIR}"
+svn co --force \
+ "${LLVM_REPO_URL}/cfe/branches/release_${CLANG_RELEASE/./}" \
+ "${CLANG_DIR}"
+
+# Echo all commands
+set -x
+
+NUM_JOBS=3
+if [[ "${OS}" = "Linux" ]]; then
+ NUM_JOBS="$(grep -c "^processor" /proc/cpuinfo)"
+elif [ "${OS}" = "Darwin" ]; then
+ NUM_JOBS="$(sysctl -n hw.ncpu)"
+fi
+
+# Build clang.
+cd "${LLVM_DIR}"
+if [[ ! -f ./config.status ]]; then
+ ../llvm/configure \
+ --enable-optimized \
+ --disable-threads \
+ --disable-pthreads \
+ --without-llvmgcc \
+ --without-llvmgxx
+fi
+
+MACOSX_DEPLOYMENT_TARGET=10.5 make -j"${NUM_JOBS}"
+STRIP_FLAGS=
+if [ "${OS}" = "Darwin" ]; then
+ # See http://crbug.com/256342
+ STRIP_FLAGS=-x
+fi
+strip ${STRIP_FLAGS} Release/bin/clang
+cd -
+
+# Build libgcmole.so
+make -C "${THIS_DIR}" clean
+make -C "${THIS_DIR}" LLVM_SRC_ROOT="${LLVM_DIR}" libgcmole.so
+
+set +x
+
+echo
+echo You can now run gcmole using this command:
+echo
+echo CLANG_BIN=\"third_party/llvm/Release/bin\" lua tools/gcmole/gcmole.lua
+echo
diff --git a/chromium/v8/tools/gcmole/gcmole.lua b/chromium/v8/tools/gcmole/gcmole.lua
index 66aff942a49..aa9324756ac 100644
--- a/chromium/v8/tools/gcmole/gcmole.lua
+++ b/chromium/v8/tools/gcmole/gcmole.lua
@@ -103,7 +103,10 @@ local function MakeClangCommandLine(plugin, plugin_args, triple, arch_define)
.. " -triple " .. triple
.. " -D" .. arch_define
.. " -DENABLE_DEBUGGER_SUPPORT"
+ .. " -DV8_I18N_SUPPORT"
.. " -Isrc"
+ .. " -Ithird_party/icu/source/common"
+ .. " -Ithird_party/icu/source/i18n"
end
function InvokeClangPluginForEachFile(filenames, cfg, func)
diff --git a/chromium/v8/tools/grokdump.py b/chromium/v8/tools/grokdump.py
index 9719376d7f4..317a7d6a911 100755
--- a/chromium/v8/tools/grokdump.py
+++ b/chromium/v8/tools/grokdump.py
@@ -40,6 +40,7 @@ import re
import struct
import sys
import types
+import v8heapconst
USAGE="""usage: %prog [OPTIONS] [DUMP-FILE]
@@ -163,6 +164,11 @@ def FullDump(reader, heap):
reader.ForEachMemoryRegion(dump_region)
+# Heap constants generated by 'make grokdump' in v8heapconst module.
+INSTANCE_TYPES = v8heapconst.INSTANCE_TYPES
+KNOWN_MAPS = v8heapconst.KNOWN_MAPS
+KNOWN_OBJECTS = v8heapconst.KNOWN_OBJECTS
+
# Set of structures and constants that describe the layout of minidump
# files. Based on MSDN and Google Breakpad.
@@ -754,6 +760,14 @@ class MinidumpReader(object):
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return self.exception_context.esp
+ def ExceptionFP(self):
+ if self.arch == MD_CPU_ARCHITECTURE_AMD64:
+ return self.exception_context.rbp
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+ return None
+ elif self.arch == MD_CPU_ARCHITECTURE_X86:
+ return self.exception_context.ebp
+
def FormatIntPtr(self, value):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return "%016x" % value
@@ -834,262 +848,6 @@ class MinidumpReader(object):
return "%s+0x%x" % (symbol.name, diff)
-
-# List of V8 instance types. Obtained by adding the code below to any .cc file.
-#
-# #define DUMP_TYPE(T) printf(" %d: \"%s\",\n", T, #T);
-# struct P {
-# P() {
-# printf("INSTANCE_TYPES = {\n");
-# INSTANCE_TYPE_LIST(DUMP_TYPE)
-# printf("}\n");
-# }
-# };
-# static P p;
-INSTANCE_TYPES = {
- 0: "STRING_TYPE",
- 4: "ASCII_STRING_TYPE",
- 1: "CONS_STRING_TYPE",
- 5: "CONS_ASCII_STRING_TYPE",
- 3: "SLICED_STRING_TYPE",
- 2: "EXTERNAL_STRING_TYPE",
- 6: "EXTERNAL_ASCII_STRING_TYPE",
- 10: "EXTERNAL_STRING_WITH_ASCII_DATA_TYPE",
- 18: "SHORT_EXTERNAL_STRING_TYPE",
- 22: "SHORT_EXTERNAL_ASCII_STRING_TYPE",
- 26: "SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE",
- 64: "INTERNALIZED_STRING_TYPE",
- 68: "ASCII_INTERNALIZED_STRING_TYPE",
- 65: "CONS_INTERNALIZED_STRING_TYPE",
- 69: "CONS_ASCII_INTERNALIZED_STRING_TYPE",
- 66: "EXTERNAL_INTERNALIZED_STRING_TYPE",
- 70: "EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE",
- 74: "EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE",
- 82: "SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE",
- 86: "SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE",
- 90: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE",
- 128: "SYMBOL_TYPE",
- 129: "MAP_TYPE",
- 130: "CODE_TYPE",
- 131: "ODDBALL_TYPE",
- 132: "JS_GLOBAL_PROPERTY_CELL_TYPE",
- 133: "HEAP_NUMBER_TYPE",
- 134: "FOREIGN_TYPE",
- 135: "BYTE_ARRAY_TYPE",
- 136: "FREE_SPACE_TYPE",
- 137: "EXTERNAL_BYTE_ARRAY_TYPE",
- 138: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE",
- 139: "EXTERNAL_SHORT_ARRAY_TYPE",
- 140: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE",
- 141: "EXTERNAL_INT_ARRAY_TYPE",
- 142: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE",
- 143: "EXTERNAL_FLOAT_ARRAY_TYPE",
- 145: "EXTERNAL_PIXEL_ARRAY_TYPE",
- 147: "FILLER_TYPE",
- 148: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
- 149: "DECLARED_ACCESSOR_INFO_TYPE",
- 150: "EXECUTABLE_ACCESSOR_INFO_TYPE",
- 151: "ACCESSOR_PAIR_TYPE",
- 152: "ACCESS_CHECK_INFO_TYPE",
- 153: "INTERCEPTOR_INFO_TYPE",
- 154: "CALL_HANDLER_INFO_TYPE",
- 155: "FUNCTION_TEMPLATE_INFO_TYPE",
- 156: "OBJECT_TEMPLATE_INFO_TYPE",
- 157: "SIGNATURE_INFO_TYPE",
- 158: "TYPE_SWITCH_INFO_TYPE",
- 159: "ALLOCATION_SITE_INFO_TYPE",
- 160: "SCRIPT_TYPE",
- 161: "CODE_CACHE_TYPE",
- 162: "POLYMORPHIC_CODE_CACHE_TYPE",
- 163: "TYPE_FEEDBACK_INFO_TYPE",
- 164: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 167: "FIXED_ARRAY_TYPE",
- 146: "FIXED_DOUBLE_ARRAY_TYPE",
- 168: "SHARED_FUNCTION_INFO_TYPE",
- 169: "JS_MESSAGE_OBJECT_TYPE",
- 172: "JS_VALUE_TYPE",
- 173: "JS_DATE_TYPE",
- 174: "JS_OBJECT_TYPE",
- 175: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 176: "JS_MODULE_TYPE",
- 177: "JS_GLOBAL_OBJECT_TYPE",
- 178: "JS_BUILTINS_OBJECT_TYPE",
- 179: "JS_GLOBAL_PROXY_TYPE",
- 180: "JS_ARRAY_TYPE",
- 171: "JS_PROXY_TYPE",
- 183: "JS_WEAK_MAP_TYPE",
- 184: "JS_WEAK_SET_TYPE",
- 185: "JS_REGEXP_TYPE",
- 186: "JS_FUNCTION_TYPE",
- 170: "JS_FUNCTION_PROXY_TYPE",
- 165: "DEBUG_INFO_TYPE",
- 166: "BREAK_POINT_INFO_TYPE",
-}
-
-
-# List of known V8 maps. Used to determine the instance type and name
-# for maps that are part of the root-set and hence on the first page of
-# the map-space. Obtained by adding the code below to an IA32 release
-# build with enabled snapshots to the end of the Isolate::Init method.
-#
-# #define ROOT_LIST_CASE(type, name, camel_name) \
-# if (o == heap_.name()) n = #camel_name;
-# #define STRUCT_LIST_CASE(upper_name, camel_name, name) \
-# if (o == heap_.name##_map()) n = #camel_name "Map";
-# HeapObjectIterator it(heap_.map_space());
-# printf("KNOWN_MAPS = {\n");
-# for (Object* o = it.Next(); o != NULL; o = it.Next()) {
-# Map* m = Map::cast(o);
-# const char* n = "";
-# intptr_t p = reinterpret_cast<intptr_t>(m) & 0xfffff;
-# int t = m->instance_type();
-# ROOT_LIST(ROOT_LIST_CASE)
-# STRUCT_LIST(STRUCT_LIST_CASE)
-# printf(" 0x%05x: (%d, \"%s\"),\n", p, t, n);
-# }
-# printf("}\n");
-KNOWN_MAPS = {
- 0x08081: (135, "ByteArrayMap"),
- 0x080a9: (129, "MetaMap"),
- 0x080d1: (131, "OddballMap"),
- 0x080f9: (68, "AsciiInternalizedStringMap"),
- 0x08121: (167, "FixedArrayMap"),
- 0x08149: (133, "HeapNumberMap"),
- 0x08171: (136, "FreeSpaceMap"),
- 0x08199: (147, "OnePointerFillerMap"),
- 0x081c1: (147, "TwoPointerFillerMap"),
- 0x081e9: (132, "GlobalPropertyCellMap"),
- 0x08211: (168, "SharedFunctionInfoMap"),
- 0x08239: (167, "NativeContextMap"),
- 0x08261: (130, "CodeMap"),
- 0x08289: (167, "ScopeInfoMap"),
- 0x082b1: (167, "FixedCOWArrayMap"),
- 0x082d9: (146, "FixedDoubleArrayMap"),
- 0x08301: (167, "HashTableMap"),
- 0x08329: (128, "SymbolMap"),
- 0x08351: (0, "StringMap"),
- 0x08379: (4, "AsciiStringMap"),
- 0x083a1: (1, "ConsStringMap"),
- 0x083c9: (5, "ConsAsciiStringMap"),
- 0x083f1: (3, "SlicedStringMap"),
- 0x08419: (7, "SlicedAsciiStringMap"),
- 0x08441: (2, "ExternalStringMap"),
- 0x08469: (10, "ExternalStringWithAsciiDataMap"),
- 0x08491: (6, "ExternalAsciiStringMap"),
- 0x084b9: (18, "ShortExternalStringMap"),
- 0x084e1: (26, "ShortExternalStringWithAsciiDataMap"),
- 0x08509: (64, "InternalizedStringMap"),
- 0x08531: (65, "ConsInternalizedStringMap"),
- 0x08559: (69, "ConsAsciiInternalizedStringMap"),
- 0x08581: (66, "ExternalInternalizedStringMap"),
- 0x085a9: (74, "ExternalInternalizedStringWithAsciiDataMap"),
- 0x085d1: (70, "ExternalAsciiInternalizedStringMap"),
- 0x085f9: (82, "ShortExternalInternalizedStringMap"),
- 0x08621: (90, "ShortExternalInternalizedStringWithAsciiDataMap"),
- 0x08649: (86, "ShortExternalAsciiInternalizedStringMap"),
- 0x08671: (22, "ShortExternalAsciiStringMap"),
- 0x08699: (0, "UndetectableStringMap"),
- 0x086c1: (4, "UndetectableAsciiStringMap"),
- 0x086e9: (145, "ExternalPixelArrayMap"),
- 0x08711: (137, "ExternalByteArrayMap"),
- 0x08739: (138, "ExternalUnsignedByteArrayMap"),
- 0x08761: (139, "ExternalShortArrayMap"),
- 0x08789: (140, "ExternalUnsignedShortArrayMap"),
- 0x087b1: (141, "ExternalIntArrayMap"),
- 0x087d9: (142, "ExternalUnsignedIntArrayMap"),
- 0x08801: (143, "ExternalFloatArrayMap"),
- 0x08829: (144, "ExternalDoubleArrayMap"),
- 0x08851: (167, "NonStrictArgumentsElementsMap"),
- 0x08879: (167, "FunctionContextMap"),
- 0x088a1: (167, "CatchContextMap"),
- 0x088c9: (167, "WithContextMap"),
- 0x088f1: (167, "BlockContextMap"),
- 0x08919: (167, "ModuleContextMap"),
- 0x08941: (167, "GlobalContextMap"),
- 0x08969: (169, "JSMessageObjectMap"),
- 0x08991: (134, "ForeignMap"),
- 0x089b9: (174, "NeanderMap"),
- 0x089e1: (159, "AllocationSiteInfoMap"),
- 0x08a09: (162, "PolymorphicCodeCacheMap"),
- 0x08a31: (160, "ScriptMap"),
- 0x08a59: (174, ""),
- 0x08a81: (174, "ExternalMap"),
- 0x08aa9: (148, "DeclaredAccessorDescriptorMap"),
- 0x08ad1: (149, "DeclaredAccessorInfoMap"),
- 0x08af9: (150, "ExecutableAccessorInfoMap"),
- 0x08b21: (151, "AccessorPairMap"),
- 0x08b49: (152, "AccessCheckInfoMap"),
- 0x08b71: (153, "InterceptorInfoMap"),
- 0x08b99: (154, "CallHandlerInfoMap"),
- 0x08bc1: (155, "FunctionTemplateInfoMap"),
- 0x08be9: (156, "ObjectTemplateInfoMap"),
- 0x08c11: (157, "SignatureInfoMap"),
- 0x08c39: (158, "TypeSwitchInfoMap"),
- 0x08c61: (161, "CodeCacheMap"),
- 0x08c89: (163, "TypeFeedbackInfoMap"),
- 0x08cb1: (164, "AliasedArgumentsEntryMap"),
- 0x08cd9: (165, "DebugInfoMap"),
- 0x08d01: (166, "BreakPointInfoMap"),
-}
-
-
-# List of known V8 objects. Used to determine name for objects that are
-# part of the root-set and hence on the first page of various old-space
-# paged. Obtained by adding the code below to an IA32 release build with
-# enabled snapshots to the end of the Isolate::Init method.
-#
-# #define ROOT_LIST_CASE(type, name, camel_name) \
-# if (o == heap_.name()) n = #camel_name;
-# OldSpaces spit(heap());
-# printf("KNOWN_OBJECTS = {\n");
-# for (PagedSpace* s = spit.next(); s != NULL; s = spit.next()) {
-# HeapObjectIterator it(s);
-# const char* sname = AllocationSpaceName(s->identity());
-# for (Object* o = it.Next(); o != NULL; o = it.Next()) {
-# const char* n = NULL;
-# intptr_t p = reinterpret_cast<intptr_t>(o) & 0xfffff;
-# ROOT_LIST(ROOT_LIST_CASE)
-# if (n != NULL) {
-# printf(" (\"%s\", 0x%05x): \"%s\",\n", sname, p, n);
-# }
-# }
-# }
-# printf("}\n");
-KNOWN_OBJECTS = {
- ("OLD_POINTER_SPACE", 0x08081): "NullValue",
- ("OLD_POINTER_SPACE", 0x08091): "UndefinedValue",
- ("OLD_POINTER_SPACE", 0x080a1): "InstanceofCacheMap",
- ("OLD_POINTER_SPACE", 0x080b1): "TrueValue",
- ("OLD_POINTER_SPACE", 0x080c1): "FalseValue",
- ("OLD_POINTER_SPACE", 0x080d1): "NoInterceptorResultSentinel",
- ("OLD_POINTER_SPACE", 0x080e1): "ArgumentsMarker",
- ("OLD_POINTER_SPACE", 0x080f1): "NumberStringCache",
- ("OLD_POINTER_SPACE", 0x088f9): "SingleCharacterStringCache",
- ("OLD_POINTER_SPACE", 0x08b01): "StringSplitCache",
- ("OLD_POINTER_SPACE", 0x08f09): "RegExpMultipleCache",
- ("OLD_POINTER_SPACE", 0x09311): "TerminationException",
- ("OLD_POINTER_SPACE", 0x09321): "MessageListeners",
- ("OLD_POINTER_SPACE", 0x0933d): "CodeStubs",
- ("OLD_POINTER_SPACE", 0x09fa5): "NonMonomorphicCache",
- ("OLD_POINTER_SPACE", 0x0a5b9): "PolymorphicCodeCache",
- ("OLD_POINTER_SPACE", 0x0a5c1): "NativesSourceCache",
- ("OLD_POINTER_SPACE", 0x0a601): "EmptyScript",
- ("OLD_POINTER_SPACE", 0x0a63d): "IntrinsicFunctionNames",
- ("OLD_POINTER_SPACE", 0x0d659): "ObservationState",
- ("OLD_POINTER_SPACE", 0x27415): "SymbolTable",
- ("OLD_DATA_SPACE", 0x08099): "EmptyDescriptorArray",
- ("OLD_DATA_SPACE", 0x080a1): "EmptyFixedArray",
- ("OLD_DATA_SPACE", 0x080a9): "NanValue",
- ("OLD_DATA_SPACE", 0x08125): "EmptyByteArray",
- ("OLD_DATA_SPACE", 0x0812d): "EmptyString",
- ("OLD_DATA_SPACE", 0x08259): "InfinityValue",
- ("OLD_DATA_SPACE", 0x08265): "MinusZeroValue",
- ("OLD_DATA_SPACE", 0x08271): "PrototypeAccessors",
- ("CODE_SPACE", 0x0aea1): "JsEntryCode",
- ("CODE_SPACE", 0x0b5c1): "JsConstructEntryCode",
-}
-
-
class Printer(object):
"""Printer with indentation support."""
@@ -1380,7 +1138,7 @@ class Oddball(HeapObject):
def __str__(self):
if self.to_string:
- return "Oddball(%08x, <%s>)" % (self.address, self.to_string.GetChars())
+ return "Oddball(%08x, <%s>)" % (self.address, str(self.to_string))
else:
kind = "???"
if 0 <= self.kind < len(Oddball.KINDS):
@@ -2201,11 +1959,15 @@ def AnalyzeMinidump(options, minidump_name):
print "Kthxbye."
elif not options.command:
if reader.exception is not None:
+ frame_pointer = reader.ExceptionFP()
print "Annotated stack (from exception.esp to bottom):"
for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
maybe_address = reader.ReadUIntPtr(slot)
heap_object = heap.FindObject(maybe_address)
maybe_symbol = reader.FindSymbol(maybe_address)
+ if slot == frame_pointer:
+ maybe_symbol = "<---- frame pointer"
+ frame_pointer = maybe_address
print "%s: %s %s" % (reader.FormatIntPtr(slot),
reader.FormatIntPtr(maybe_address),
maybe_symbol or "")
diff --git a/chromium/v8/tools/gyp/v8.gyp b/chromium/v8/tools/gyp/v8.gyp
index bdbc7163df4..aa01a842f63 100644
--- a/chromium/v8/tools/gyp/v8.gyp
+++ b/chromium/v8/tools/gyp/v8.gyp
@@ -129,11 +129,6 @@
],
},
}],
- ['v8_enable_i18n_support==1', {
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/i18n-libraries.cc',
- ],
- }],
],
'dependencies': [
'v8_base.<(v8_target_arch)',
@@ -197,11 +192,6 @@
'V8_SHARED',
],
}],
- ['v8_enable_i18n_support==1', {
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/i18n-libraries.cc',
- ],
- }],
]
},
{
@@ -246,7 +236,6 @@
'../../src/checks.cc',
'../../src/checks.h',
'../../src/circular-queue-inl.h',
- '../../src/circular-queue.cc',
'../../src/circular-queue.h',
'../../src/code-stubs.cc',
'../../src/code-stubs.h',
@@ -268,6 +257,7 @@
'../../src/cpu-profiler-inl.h',
'../../src/cpu-profiler.cc',
'../../src/cpu-profiler.h',
+ '../../src/cpu.cc',
'../../src/cpu.h',
'../../src/data-flow.cc',
'../../src/data-flow.h',
@@ -290,6 +280,7 @@
'../../src/double.h',
'../../src/dtoa.cc',
'../../src/dtoa.h',
+ '../../src/effects.h',
'../../src/elements-kind.cc',
'../../src/elements-kind.h',
'../../src/elements.cc',
@@ -335,6 +326,7 @@
'../../src/heap-snapshot-generator.h',
'../../src/heap.cc',
'../../src/heap.h',
+ '../../src/hydrogen-alias-analysis.h',
'../../src/hydrogen-bce.cc',
'../../src/hydrogen-bce.h',
'../../src/hydrogen-bch.cc',
@@ -365,6 +357,8 @@
'../../src/hydrogen-mark-deoptimize.h',
'../../src/hydrogen-minus-zero.cc',
'../../src/hydrogen-minus-zero.h',
+ '../../src/hydrogen-osr.cc',
+ '../../src/hydrogen-osr.h',
'../../src/hydrogen-range-analysis.cc',
'../../src/hydrogen-range-analysis.h',
'../../src/hydrogen-redundant-phi.cc',
@@ -377,8 +371,8 @@
'../../src/hydrogen-sce.h',
'../../src/hydrogen-uint32-analysis.cc',
'../../src/hydrogen-uint32-analysis.h',
- '../../src/hydrogen-osr.cc',
- '../../src/hydrogen-osr.h',
+ '../../src/i18n.cc',
+ '../../src/i18n.h',
'../../src/icu_util.cc',
'../../src/icu_util.h',
'../../src/ic-inl.h',
@@ -433,8 +427,19 @@
'../../src/optimizing-compiler-thread.cc',
'../../src/parser.cc',
'../../src/parser.h',
+ '../../src/platform/elapsed-timer.h',
+ '../../src/platform/time.cc',
+ '../../src/platform/time.h',
'../../src/platform-posix.h',
'../../src/platform.h',
+ '../../src/platform/condition-variable.cc',
+ '../../src/platform/condition-variable.h',
+ '../../src/platform/mutex.cc',
+ '../../src/platform/mutex.h',
+ '../../src/platform/semaphore.cc',
+ '../../src/platform/semaphore.h',
+ '../../src/platform/socket.cc',
+ '../../src/platform/socket.h',
'../../src/preparse-data-format.h',
'../../src/preparse-data.cc',
'../../src/preparse-data.h',
@@ -513,10 +518,13 @@
'../../src/unicode-inl.h',
'../../src/unicode.cc',
'../../src/unicode.h',
+ '../../src/unique.h',
'../../src/uri.h',
'../../src/utils-inl.h',
'../../src/utils.cc',
'../../src/utils.h',
+ '../../src/utils/random-number-generator.cc',
+ '../../src/utils/random-number-generator.h',
'../../src/v8-counters.cc',
'../../src/v8-counters.h',
'../../src/v8.cc',
@@ -685,6 +693,9 @@
]
}],
],
+ 'libraries': [
+ '-lrt'
+ ]
},
'sources': [ ### gcmole(os:linux) ###
'../../src/platform-linux.cc',
@@ -697,7 +708,7 @@
'CAN_USE_VFP_INSTRUCTIONS',
],
'sources': [
- '../../src/platform-posix.cc',
+ '../../src/platform-posix.cc'
],
'conditions': [
['host_os=="mac"', {
@@ -713,6 +724,28 @@
}],
],
}, {
+ # TODO(bmeurer): What we really want here, is this:
+ #
+ # 'link_settings': {
+ # 'target_conditions': [
+ # ['_toolset=="host"', {
+ # 'libraries': [
+ # '-lrt'
+ # ]
+ # }]
+ # ]
+ # },
+ #
+ # but we can't do this right now, as the AOSP does not support
+ # linking against the host librt, so we need to work around this
+ # for now, using the following hack (see platform/time.cc):
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'defines': [
+ 'V8_LIBRT_NOT_AVAILABLE=1',
+ ],
+ }],
+ ],
'sources': [
'../../src/platform-linux.cc'
]
@@ -760,7 +793,7 @@
]},
'sources': [
'../../src/platform-solaris.cc',
- '../../src/platform-posix.cc',
+ '../../src/platform-posix.cc'
],
}
],
@@ -783,13 +816,13 @@
['build_env=="Cygwin"', {
'sources': [
'../../src/platform-cygwin.cc',
- '../../src/platform-posix.cc',
+ '../../src/platform-posix.cc'
],
}, {
'sources': [
'../../src/platform-win32.cc',
- '../../src/win32-math.h',
'../../src/win32-math.cc',
+ '../../src/win32-math.h'
],
}],
],
@@ -799,8 +832,8 @@
}, {
'sources': [
'../../src/platform-win32.cc',
- '../../src/win32-math.h',
'../../src/win32-math.cc',
+ '../../src/win32-math.h'
],
'msvs_disabled_warnings': [4351, 4355, 4800],
'link_settings': {
@@ -821,26 +854,15 @@
]
}],
['v8_enable_i18n_support==1', {
- 'sources': [
- '../../src/extensions/i18n/break-iterator.cc',
- '../../src/extensions/i18n/break-iterator.h',
- '../../src/extensions/i18n/collator.cc',
- '../../src/extensions/i18n/collator.h',
- '../../src/extensions/i18n/date-format.cc',
- '../../src/extensions/i18n/date-format.h',
- '../../src/extensions/i18n/i18n-extension.cc',
- '../../src/extensions/i18n/i18n-extension.h',
- '../../src/extensions/i18n/i18n-utils.cc',
- '../../src/extensions/i18n/i18n-utils.h',
- '../../src/extensions/i18n/locale.cc',
- '../../src/extensions/i18n/locale.h',
- '../../src/extensions/i18n/number-format.cc',
- '../../src/extensions/i18n/number-format.h',
- ],
'dependencies': [
'<(DEPTH)/third_party/icu/icu.gyp:icui18n',
'<(DEPTH)/third_party/icu/icu.gyp:icuuc',
]
+ }, { # v8_enable_i18n_support==0
+ 'sources!': [
+ '../../src/i18n.cc',
+ '../../src/i18n.h',
+ ],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
@@ -859,24 +881,15 @@
'toolsets': ['target'],
}],
['v8_enable_i18n_support==1', {
- 'actions': [{
- 'action_name': 'js2c_i18n',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(i18n_library_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/i18n-libraries.cc',
- ],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<@(_outputs)',
- 'I18N',
- '<(v8_compress_startup_data)',
- '<@(i18n_library_files)'
+ 'variables': {
+ 'i18n_library_files': [
+ '../../src/i18n.js',
],
- }],
+ },
+ }, {
+ 'variables': {
+ 'i18n_library_files': [],
+ },
}],
],
'variables': {
@@ -895,6 +908,8 @@
'../../src/date.js',
'../../src/json.js',
'../../src/regexp.js',
+ '../../src/arraybuffer.js',
+ '../../src/typedarray.js',
'../../src/macros.py',
],
'experimental_library_files': [
@@ -903,23 +918,10 @@
'../../src/proxy.js',
'../../src/collection.js',
'../../src/object-observe.js',
- '../../src/arraybuffer.js',
- '../../src/typedarray.js',
'../../src/generator.js',
'../../src/array-iterator.js',
- '../../src/harmony-string.js'
- ],
- 'i18n_library_files': [
- '../../src/extensions/i18n/header.js',
- '../../src/extensions/i18n/globals.js',
- '../../src/extensions/i18n/locale.js',
- '../../src/extensions/i18n/collator.js',
- '../../src/extensions/i18n/number-format.js',
- '../../src/extensions/i18n/date-format.js',
- '../../src/extensions/i18n/break-iterator.js',
- '../../src/extensions/i18n/i18n-utils.js',
- '../../src/extensions/i18n/overrides.js',
- '../../src/extensions/i18n/footer.js',
+ '../../src/harmony-string.js',
+ '../../src/harmony-array.js',
],
},
'actions': [
@@ -928,6 +930,7 @@
'inputs': [
'../../tools/js2c.py',
'<@(library_files)',
+ '<@(i18n_library_files)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
@@ -938,7 +941,8 @@
'<@(_outputs)',
'CORE',
'<(v8_compress_startup_data)',
- '<@(library_files)'
+ '<@(library_files)',
+ '<@(i18n_library_files)',
],
},
{
diff --git a/chromium/v8/tools/linux-tick-processor b/chromium/v8/tools/linux-tick-processor
index 93f143f9a95..7864fb40578 100755
--- a/chromium/v8/tools/linux-tick-processor
+++ b/chromium/v8/tools/linux-tick-processor
@@ -37,4 +37,5 @@ cat $log_file | $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
$tools_path/csvparser.js $tools_path/consarray.js \
$tools_path/profile.js $tools_path/profile_view.js \
$tools_path/logreader.js $tools_path/tickprocessor.js \
+ $tools_path/SourceMap.js \
$tools_path/tickprocessor-driver.js -- $@ 2>/dev/null
diff --git a/chromium/v8/tools/plot-timer-events b/chromium/v8/tools/plot-timer-events
index 0723150be57..8db067d5f12 100755
--- a/chromium/v8/tools/plot-timer-events
+++ b/chromium/v8/tools/plot-timer-events
@@ -67,4 +67,13 @@ cat $log_file |
$tools_path/codemap.js $tools_path/profile.js $tools_path/profile_view.js \
$tools_path/logreader.js $tools_path/tickprocessor.js \
$tools_path/profviz/composer.js $tools_path/profviz/stdio.js \
- -- $@ $options 2>/dev/null | gnuplot > timer-events.png
+ -- $@ $options 2>/dev/null > timer-events.plot
+
+success=$?
+if [[ $success != 0 ]] ; then
+ cat timer-events.plot
+else
+ cat timer-events.plot | gnuplot > timer-events.png
+fi
+
+rm -f timer-events.plot
diff --git a/chromium/v8/tools/profviz/composer.js b/chromium/v8/tools/profviz/composer.js
index cdfc0b7b39a..44dd7639de8 100644
--- a/chromium/v8/tools/profviz/composer.js
+++ b/chromium/v8/tools/profviz/composer.js
@@ -31,7 +31,7 @@ Array.prototype.top = function() {
}
-function PlotScriptComposer(kResX, kResY) {
+function PlotScriptComposer(kResX, kResY, error_output) {
// Constants.
var kV8BinarySuffixes = ["/d8", "/libv8.so"];
var kStackFrames = 8; // Stack frames to display in the plot.
@@ -101,7 +101,7 @@ function PlotScriptComposer(kResX, kResY) {
new TimerEvent("compile unopt", "#CC0000", true, 0),
'V8.RecompileSynchronous':
new TimerEvent("recompile sync", "#CC0044", true, 0),
- 'V8.RecompileParallel':
+ 'V8.RecompileConcurrent':
new TimerEvent("recompile async", "#CC4499", false, 1),
'V8.CompileEval':
new TimerEvent("compile eval", "#CC4400", true, 0),
@@ -149,7 +149,10 @@ function PlotScriptComposer(kResX, kResY) {
// Utility functions.
function assert(something, message) {
- if (!something) print(new Error(message).stack);
+ if (!something) {
+ var error = new Error(message);
+ error_output(error.stack);
+ }
}
function FindCodeKind(kind) {
@@ -208,10 +211,15 @@ function PlotScriptComposer(kResX, kResY) {
// Public methods.
this.collectData = function(input, distortion_per_entry) {
+ var last_timestamp = 0;
+
// Parse functions.
var parseTimeStamp = function(timestamp) {
+ int_timestamp = parseInt(timestamp);
+ assert(int_timestamp >= last_timestamp, "Inconsistent timestamps.");
+ last_timestamp = int_timestamp;
distortion += distortion_per_entry;
- return parseInt(timestamp) / 1000 - distortion;
+ return int_timestamp / 1000 - distortion;
}
var processTimerEventStart = function(name, start) {
@@ -260,65 +268,6 @@ function PlotScriptComposer(kResX, kResY) {
code_map.deleteCode(address);
};
- var processSharedLibrary = function(name, start, end) {
- var code_entry = new CodeMap.CodeEntry(end - start, name);
- code_entry.kind = -2; // External code kind.
- for (var i = 0; i < kV8BinarySuffixes.length; i++) {
- var suffix = kV8BinarySuffixes[i];
- if (name.indexOf(suffix, name.length - suffix.length) >= 0) {
- code_entry.kind = -1; // V8 runtime code kind.
- break;
- }
- }
- code_map.addLibrary(start, code_entry);
- };
-
- var processTimerEventStart = function(name, start) {
- // Find out the thread id.
- var new_event = TimerEvents[name];
- if (new_event === undefined) return;
- var thread_id = new_event.thread_id;
-
- start = Math.max(last_time_stamp[thread_id] + kMinRangeLength, start);
-
- // Last event on this thread is done with the start of this event.
- var last_event = event_stack[thread_id].top();
- if (last_event !== undefined) {
- var new_range = new Range(last_time_stamp[thread_id], start);
- last_event.ranges.push(new_range);
- }
- event_stack[thread_id].push(new_event);
- last_time_stamp[thread_id] = start;
- };
-
- var processTimerEventEnd = function(name, end) {
- // Find out about the thread_id.
- var finished_event = TimerEvents[name];
- var thread_id = finished_event.thread_id;
- assert(finished_event === event_stack[thread_id].pop(),
- "inconsistent event stack");
-
- end = Math.max(last_time_stamp[thread_id] + kMinRangeLength, end);
-
- var new_range = new Range(last_time_stamp[thread_id], end);
- finished_event.ranges.push(new_range);
- last_time_stamp[thread_id] = end;
- };
-
- var processCodeCreateEvent = function(type, kind, address, size, name) {
- var code_entry = new CodeMap.CodeEntry(size, name);
- code_entry.kind = kind;
- code_map.addCode(address, code_entry);
- };
-
- var processCodeMoveEvent = function(from, to) {
- code_map.moveCode(from, to);
- };
-
- var processCodeDeleteEvent = function(address) {
- code_map.deleteCode(address);
- };
-
var processCodeDeoptEvent = function(time, size) {
deopts.push(new Deopt(time, size));
}
diff --git a/chromium/v8/tools/profviz/profviz.html b/chromium/v8/tools/profviz/profviz.html
index 30494f80fe1..d7dd9cb9508 100644
--- a/chromium/v8/tools/profviz/profviz.html
+++ b/chromium/v8/tools/profviz/profviz.html
@@ -96,7 +96,7 @@ your computer's performance.">
or alternatively,<br/>
Chrome with
<span class="tt">
- --no-sandbox --js-flags="--prof --noprof-lazy --log-timer-events
+ --no-sandbox --js-flags="--prof --log-timer-events"
</span> to produce <span class="tt">v8.log</span>.
</li>
<li>
@@ -117,6 +117,11 @@ your computer's performance.">
command-line utility
</a> instead.
</li>
+ </ol>
+ If you expect multiple V8 instances to run concurrently, for example
+ with several tabs in Chrome,<br/>
+ add the V8 flag <span class="tt">--logfile=v8.%p.log</span>
+ so that each instance writes to its own log file.
</div>
</div>
diff --git a/chromium/v8/tools/profviz/profviz.js b/chromium/v8/tools/profviz/profviz.js
index 7af12adc7e9..8ac0881eb6f 100644
--- a/chromium/v8/tools/profviz/profviz.js
+++ b/chromium/v8/tools/profviz/profviz.js
@@ -42,15 +42,6 @@ var worker_scripts = [
function plotWorker() {
var worker = null;
- var delegateList = {
- "log" : log,
- "error" : logError,
- "displayplot" : displayplot,
- "displayprof" : displayprof,
- "range" : setRange,
- "script" : scriptLoaded
- }
-
function initialize() {
ui.freeze();
worker = new Worker("worker.js");
@@ -89,6 +80,16 @@ function plotWorker() {
if (worker) worker.terminate();
initialize();
}
+
+ var delegateList = {
+ "log" : log,
+ "error" : logError,
+ "displayplot" : displayplot,
+ "displayprof" : displayprof,
+ "range" : setRange,
+ "script" : scriptLoaded,
+ "reset" : this.reset
+ }
}
@@ -233,9 +234,6 @@ function start(event) {
function getSelectedFile() {
var file = ui.file.files[0];
if (!file) throw Error("No valid file selected.");
- if (!file.type.toString().match(/text/)) {
- throw Error("'" + escape(file.name) + "' is not a text file.");
- }
return file;
}
diff --git a/chromium/v8/tools/profviz/stdio.js b/chromium/v8/tools/profviz/stdio.js
index e8001494c9d..db38f042a7d 100644
--- a/chromium/v8/tools/profviz/stdio.js
+++ b/chromium/v8/tools/profviz/stdio.js
@@ -44,7 +44,11 @@ if (!isNaN(range_end)) range_end_override = range_end;
var kResX = 1600;
var kResY = 600;
-var psc = new PlotScriptComposer(kResX, kResY);
+function log_error(text) {
+ print(text);
+ quit(1);
+}
+var psc = new PlotScriptComposer(kResX, kResY, log_error);
psc.collectData(readline, distortion_per_entry);
psc.findPlotRange(range_start_override, range_end_override);
print("set terminal pngcairo size " + kResX + "," + kResY +
diff --git a/chromium/v8/tools/profviz/worker.js b/chromium/v8/tools/profviz/worker.js
index 60a557f982d..b17ca29f587 100644
--- a/chromium/v8/tools/profviz/worker.js
+++ b/chromium/v8/tools/profviz/worker.js
@@ -72,6 +72,12 @@ function load_scripts(scripts) {
}
+function log_error(text) {
+ self.postMessage({"call": "error", "args": text});
+ self.postMessage({"call": "reset"});
+}
+
+
function run(args) {
var file = args["file"];
var resx = args["resx"];
@@ -121,7 +127,7 @@ function run(args) {
var input_file_name = "input_temp";
var output_file_name = "output.svg";
- var psc = new PlotScriptComposer(resx, resy);
+ var psc = new PlotScriptComposer(resx, resy, log_error);
var objects = 0;
time("Collecting events (" + content_lines.length + " entries)",
diff --git a/chromium/v8/tools/run-deopt-fuzzer.py b/chromium/v8/tools/run-deopt-fuzzer.py
index d554a989f12..f8cc93748ab 100755
--- a/chromium/v8/tools/run-deopt-fuzzer.py
+++ b/chromium/v8/tools/run-deopt-fuzzer.py
@@ -58,9 +58,9 @@ MODE_FLAGS = {
"debug" : ["--nobreak-on-abort", "--nodead-code-elimination",
"--nofold-constants", "--enable-slow-asserts",
"--debug-code", "--verify-heap",
- "--noparallel-recompilation"],
+ "--noconcurrent-recompilation"],
"release" : ["--nobreak-on-abort", "--nodead-code-elimination",
- "--nofold-constants", "--noparallel-recompilation"]}
+ "--nofold-constants", "--noconcurrent-recompilation"]}
SUPPORTED_ARCHS = ["android_arm",
"android_ia32",
diff --git a/chromium/v8/tools/run-tests.py b/chromium/v8/tools/run-tests.py
index 761d03fe335..48682d4444e 100755
--- a/chromium/v8/tools/run-tests.py
+++ b/chromium/v8/tools/run-tests.py
@@ -94,6 +94,9 @@ def BuildOptions():
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
+ result.add_option("--flaky-tests",
+ help="Regard tests marked as flaky (run|skip|dontcare)",
+ default="dontcare")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
@@ -204,6 +207,9 @@ def ProcessOptions(options):
# This is OK for distributed running, so we don't need to set no_network.
options.command_prefix = (["python", "-u", run_valgrind] +
options.command_prefix)
+ if not options.flaky_tests in ["run", "skip", "dontcare"]:
+ print "Unknown flaky test mode %s" % options.flaky_tests
+ return False
return True
@@ -315,7 +321,7 @@ def Execute(arch, mode, args, options, suites, workspace):
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
- s.FilterTestCasesByStatus(options.warn_unused)
+ s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
diff --git a/chromium/v8/tools/testrunner/local/old_statusfile.py b/chromium/v8/tools/testrunner/local/old_statusfile.py
index a9a62036ec4..d634e3ec955 100644
--- a/chromium/v8/tools/testrunner/local/old_statusfile.py
+++ b/chromium/v8/tools/testrunner/local/old_statusfile.py
@@ -37,6 +37,7 @@ OKAY = 'OKAY'
TIMEOUT = 'TIMEOUT'
CRASH = 'CRASH'
SLOW = 'SLOW'
+FLAKY = 'FLAKY'
# These are just for the status files and are mapped below in DEFS:
FAIL_OK = 'FAIL_OK'
PASS_OR_FAIL = 'PASS_OR_FAIL'
@@ -48,6 +49,7 @@ KEYWORDS = {SKIP: SKIP,
TIMEOUT: TIMEOUT,
CRASH: CRASH,
SLOW: SLOW,
+ FLAKY: FLAKY,
FAIL_OK: FAIL_OK,
PASS_OR_FAIL: PASS_OR_FAIL}
diff --git a/chromium/v8/tools/testrunner/local/statusfile.py b/chromium/v8/tools/testrunner/local/statusfile.py
index 634fe6a08a8..1d30fe3d3c1 100644
--- a/chromium/v8/tools/testrunner/local/statusfile.py
+++ b/chromium/v8/tools/testrunner/local/statusfile.py
@@ -42,6 +42,7 @@ OKAY = "OKAY"
TIMEOUT = "TIMEOUT"
CRASH = "CRASH"
SLOW = "SLOW"
+FLAKY = "FLAKY"
# These are just for the status files and are mapped below in DEFS:
FAIL_OK = "FAIL_OK"
PASS_OR_FAIL = "PASS_OR_FAIL"
@@ -49,7 +50,7 @@ PASS_OR_FAIL = "PASS_OR_FAIL"
ALWAYS = "ALWAYS"
KEYWORDS = {}
-for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FAIL_OK,
+for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK,
PASS_OR_FAIL, ALWAYS]:
KEYWORDS[key] = key
@@ -68,6 +69,10 @@ def DoSkip(outcomes):
def IsFlaky(outcomes):
+ return FLAKY in outcomes
+
+
+def IsPassOrFail(outcomes):
return ((PASS in outcomes) and (FAIL in outcomes) and
(not CRASH in outcomes) and (not OKAY in outcomes))
diff --git a/chromium/v8/tools/testrunner/local/testsuite.py b/chromium/v8/tools/testrunner/local/testsuite.py
index 473e8b1efed..b0372e7f739 100644
--- a/chromium/v8/tools/testrunner/local/testsuite.py
+++ b/chromium/v8/tools/testrunner/local/testsuite.py
@@ -66,7 +66,10 @@ class TestSuite(object):
# Used in the status file and for stdout printing.
def CommonTestName(self, testcase):
- return testcase.path
+ if utils.IsWindows():
+ return testcase.path.replace("\\", "/")
+ else:
+ return testcase.path
def ListTests(self, context):
raise NotImplementedError
@@ -84,32 +87,36 @@ class TestSuite(object):
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
- def FilterTestCasesByStatus(self, warn_unused_rules):
+ @staticmethod
+ def _FilterFlaky(flaky, mode):
+ return (mode == "run" and not flaky) or (mode == "skip" and flaky)
+
+ def FilterTestCasesByStatus(self, warn_unused_rules, flaky_tests="dontcare"):
filtered = []
used_rules = set()
for t in self.tests:
+ flaky = False
testname = self.CommonTestName(t)
- if utils.IsWindows():
- testname = testname.replace("\\", "/")
if testname in self.rules:
used_rules.add(testname)
- outcomes = self.rules[testname]
- t.outcomes = outcomes # Even for skipped tests, as the TestCase
- # object stays around and PrintReport() uses it.
- if statusfile.DoSkip(outcomes):
+ # Even for skipped tests, as the TestCase object stays around and
+ # PrintReport() uses it.
+ t.outcomes = self.rules[testname]
+ if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|.
- if len(self.wildcards) != 0:
- skip = False
- for rule in self.wildcards:
- assert rule[-1] == '*'
- if testname.startswith(rule[:-1]):
- used_rules.add(rule)
- outcomes = self.wildcards[rule]
- t.outcomes = outcomes
- if statusfile.DoSkip(outcomes):
- skip = True
- break # "for rule in self.wildcards"
- if skip: continue # "for t in self.tests"
+ flaky = statusfile.IsFlaky(t.outcomes)
+ skip = False
+ for rule in self.wildcards:
+ assert rule[-1] == '*'
+ if testname.startswith(rule[:-1]):
+ used_rules.add(rule)
+ t.outcomes = self.wildcards[rule]
+ if statusfile.DoSkip(t.outcomes):
+ skip = True
+ break # "for rule in self.wildcards"
+ flaky = flaky or statusfile.IsFlaky(t.outcomes)
+ if skip or self._FilterFlaky(flaky, flaky_tests):
+ continue # "for t in self.tests"
filtered.append(t)
self.tests = filtered
diff --git a/chromium/v8/tools/testrunner/local/verbose.py b/chromium/v8/tools/testrunner/local/verbose.py
index f6934675231..00c330d2d9c 100644
--- a/chromium/v8/tools/testrunner/local/verbose.py
+++ b/chromium/v8/tools/testrunner/local/verbose.py
@@ -54,7 +54,7 @@ def PrintReport(tests):
skipped += 1
continue
if statusfile.TIMEOUT in o: timeout += 1
- if statusfile.IsFlaky(o): nocrash += 1
+ if statusfile.IsPassOrFail(o): nocrash += 1
if list(o) == [statusfile.PASS]: passes += 1
if statusfile.IsFailOk(o): fail_ok += 1
if list(o) == [statusfile.FAIL]: fail += 1
diff --git a/chromium/v8/tools/tickprocessor-driver.js b/chromium/v8/tools/tickprocessor-driver.js
index 02cb81a55fc..8ba3326f630 100644
--- a/chromium/v8/tools/tickprocessor-driver.js
+++ b/chromium/v8/tools/tickprocessor-driver.js
@@ -37,6 +37,18 @@ function processArguments(args) {
}
}
+function initSourceMapSupport() {
+ // Pull dev tools source maps into our name space.
+ SourceMap = WebInspector.SourceMap;
+
+ // Overwrite the load function to load scripts synchronously.
+ SourceMap.load = function(sourceMapURL) {
+ var content = readFile(sourceMapURL);
+ var sourceMapObject = (JSON.parse(content));
+ return new SourceMap(sourceMapURL, sourceMapObject);
+ };
+}
+
var entriesProviders = {
'unix': UnixCppEntriesProvider,
'windows': WindowsCppEntriesProvider,
@@ -44,6 +56,11 @@ var entriesProviders = {
};
var params = processArguments(arguments);
+var sourceMap = null;
+if (params.sourceMap) {
+ initSourceMapSupport();
+ sourceMap = SourceMap.load(params.sourceMap);
+}
var snapshotLogProcessor;
if (params.snapshotLogFileName) {
snapshotLogProcessor = new SnapshotLogProcessor();
@@ -57,6 +74,7 @@ var tickProcessor = new TickProcessor(
params.stateFilter,
snapshotLogProcessor,
params.distortion,
- params.range);
+ params.range,
+ sourceMap);
tickProcessor.processLogFile(params.logFileName);
tickProcessor.printStatistics();
diff --git a/chromium/v8/tools/tickprocessor.js b/chromium/v8/tools/tickprocessor.js
index 967bd3c5b71..ff5254172fd 100644
--- a/chromium/v8/tools/tickprocessor.js
+++ b/chromium/v8/tools/tickprocessor.js
@@ -153,7 +153,8 @@ function TickProcessor(
stateFilter,
snapshotLogProcessor,
distortion,
- range) {
+ range,
+ sourceMap) {
LogReader.call(this, {
'shared-library': { parsers: [null, parseInt, parseInt],
processor: this.processSharedLibrary },
@@ -196,6 +197,7 @@ function TickProcessor(
this.ignoreUnknown_ = ignoreUnknown;
this.stateFilter_ = stateFilter;
this.snapshotLogProcessor_ = snapshotLogProcessor;
+ this.sourceMap = sourceMap;
this.deserializedEntriesNames_ = [];
var ticks = this.ticks_ =
{ total: 0, unaccounted: 0, excluded: 0, gc: 0 };
@@ -249,7 +251,8 @@ TickProcessor.VmStates = {
GC: 1,
COMPILER: 2,
OTHER: 3,
- EXTERNAL: 4
+ EXTERNAL: 4,
+ IDLE: 5
};
@@ -543,17 +546,52 @@ TickProcessor.prototype.processProfile = function(
}
};
+TickProcessor.prototype.getLineAndColumn = function(name) {
+ var re = /:([0-9]+):([0-9]+)$/;
+ var array = re.exec(name);
+ if (!array) {
+ return null;
+ }
+ return {line: array[1], column: array[2]};
+}
+
+TickProcessor.prototype.hasSourceMap = function() {
+ return this.sourceMap != null;
+};
+
+
+TickProcessor.prototype.formatFunctionName = function(funcName) {
+ if (!this.hasSourceMap()) {
+ return funcName;
+ }
+ var lc = this.getLineAndColumn(funcName);
+ if (lc == null) {
+ return funcName;
+ }
+ // in source maps lines and columns are zero based
+ var lineNumber = lc.line - 1;
+ var column = lc.column - 1;
+ var entry = this.sourceMap.findEntry(lineNumber, column);
+ var sourceFile = entry[2];
+ var sourceLine = entry[3] + 1;
+ var sourceColumn = entry[4] + 1;
+
+ return sourceFile + ':' + sourceLine + ':' + sourceColumn + ' -> ' + funcName;
+};
TickProcessor.prototype.printEntries = function(
profile, nonLibTicks, filterP) {
+ var that = this;
this.processProfile(profile, filterP, function (rec) {
if (rec.selfTime == 0) return;
var nonLibPct = nonLibTicks != null ?
rec.selfTime * 100.0 / nonLibTicks : 0.0;
+ var funcName = that.formatFunctionName(rec.internalFuncName);
+
print(' ' + padLeft(rec.selfTime, 5) + ' ' +
padLeft(rec.selfPercent.toFixed(1), 5) + '% ' +
padLeft(nonLibPct.toFixed(1), 5) + '% ' +
- rec.internalFuncName);
+ funcName);
});
};
@@ -565,9 +603,10 @@ TickProcessor.prototype.printHeavyProfile = function(profile, opt_indent) {
this.processProfile(profile, function() { return true; }, function (rec) {
// Cut off too infrequent callers.
if (rec.parentTotalPercent < TickProcessor.CALL_PROFILE_CUTOFF_PCT) return;
+ var funcName = self.formatFunctionName(rec.internalFuncName);
print(' ' + padLeft(rec.totalTime, 5) + ' ' +
padLeft(rec.parentTotalPercent.toFixed(1), 5) + '% ' +
- indentStr + rec.internalFuncName);
+ indentStr + funcName);
// Limit backtrace depth.
if (indent < 2 * self.callGraphSize_) {
self.printHeavyProfile(rec.children, indent + 2);
@@ -822,9 +861,11 @@ function ArgumentsProcessor(args) {
'--snapshot-log': ['snapshotLogFileName', 'snapshot.log',
'Specify snapshot log file to use (e.g. --snapshot-log=snapshot.log)'],
'--range': ['range', 'auto,auto',
- 'Specify the range limit as [start],[end]'],
+ 'Specify the range limit as [start],[end]'],
'--distortion': ['distortion', 0,
- 'Specify the logging overhead in picoseconds']
+ 'Specify the logging overhead in picoseconds'],
+ '--source-map': ['sourceMap', null,
+ 'Specify the source map that should be used for output']
};
this.argsDispatch_['--js'] = this.argsDispatch_['-j'];
this.argsDispatch_['--gc'] = this.argsDispatch_['-g'];
diff --git a/chromium/v8/tools/v8heapconst.py b/chromium/v8/tools/v8heapconst.py
new file mode 100644
index 00000000000..1f72c372361
--- /dev/null
+++ b/chromium/v8/tools/v8heapconst.py
@@ -0,0 +1,255 @@
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This file is automatically generated from the V8 source and should not
+# be modified manually, run 'make grokdump' instead to update this file.
+
+# List of known V8 instance types.
+INSTANCE_TYPES = {
+ 64: "STRING_TYPE",
+ 68: "ASCII_STRING_TYPE",
+ 65: "CONS_STRING_TYPE",
+ 69: "CONS_ASCII_STRING_TYPE",
+ 67: "SLICED_STRING_TYPE",
+ 71: "SLICED_ASCII_STRING_TYPE",
+ 66: "EXTERNAL_STRING_TYPE",
+ 70: "EXTERNAL_ASCII_STRING_TYPE",
+ 74: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
+ 82: "SHORT_EXTERNAL_STRING_TYPE",
+ 86: "SHORT_EXTERNAL_ASCII_STRING_TYPE",
+ 90: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
+ 0: "INTERNALIZED_STRING_TYPE",
+ 4: "ASCII_INTERNALIZED_STRING_TYPE",
+ 1: "CONS_INTERNALIZED_STRING_TYPE",
+ 5: "CONS_ASCII_INTERNALIZED_STRING_TYPE",
+ 2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
+ 6: "EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE",
+ 10: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
+ 18: "SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE",
+ 22: "SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE",
+ 26: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
+ 128: "SYMBOL_TYPE",
+ 129: "MAP_TYPE",
+ 130: "CODE_TYPE",
+ 131: "ODDBALL_TYPE",
+ 132: "CELL_TYPE",
+ 133: "PROPERTY_CELL_TYPE",
+ 134: "HEAP_NUMBER_TYPE",
+ 135: "FOREIGN_TYPE",
+ 136: "BYTE_ARRAY_TYPE",
+ 137: "FREE_SPACE_TYPE",
+ 138: "EXTERNAL_BYTE_ARRAY_TYPE",
+ 139: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE",
+ 140: "EXTERNAL_SHORT_ARRAY_TYPE",
+ 141: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE",
+ 142: "EXTERNAL_INT_ARRAY_TYPE",
+ 143: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE",
+ 144: "EXTERNAL_FLOAT_ARRAY_TYPE",
+ 145: "EXTERNAL_DOUBLE_ARRAY_TYPE",
+ 146: "EXTERNAL_PIXEL_ARRAY_TYPE",
+ 148: "FILLER_TYPE",
+ 149: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
+ 150: "DECLARED_ACCESSOR_INFO_TYPE",
+ 151: "EXECUTABLE_ACCESSOR_INFO_TYPE",
+ 152: "ACCESSOR_PAIR_TYPE",
+ 153: "ACCESS_CHECK_INFO_TYPE",
+ 154: "INTERCEPTOR_INFO_TYPE",
+ 155: "CALL_HANDLER_INFO_TYPE",
+ 156: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 157: "OBJECT_TEMPLATE_INFO_TYPE",
+ 158: "SIGNATURE_INFO_TYPE",
+ 159: "TYPE_SWITCH_INFO_TYPE",
+ 161: "ALLOCATION_MEMENTO_TYPE",
+ 160: "ALLOCATION_SITE_TYPE",
+ 162: "SCRIPT_TYPE",
+ 163: "CODE_CACHE_TYPE",
+ 164: "POLYMORPHIC_CODE_CACHE_TYPE",
+ 165: "TYPE_FEEDBACK_INFO_TYPE",
+ 166: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 167: "BOX_TYPE",
+ 170: "FIXED_ARRAY_TYPE",
+ 147: "FIXED_DOUBLE_ARRAY_TYPE",
+ 171: "SHARED_FUNCTION_INFO_TYPE",
+ 172: "JS_MESSAGE_OBJECT_TYPE",
+ 175: "JS_VALUE_TYPE",
+ 176: "JS_DATE_TYPE",
+ 177: "JS_OBJECT_TYPE",
+ 178: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 179: "JS_GENERATOR_OBJECT_TYPE",
+ 180: "JS_MODULE_TYPE",
+ 181: "JS_GLOBAL_OBJECT_TYPE",
+ 182: "JS_BUILTINS_OBJECT_TYPE",
+ 183: "JS_GLOBAL_PROXY_TYPE",
+ 184: "JS_ARRAY_TYPE",
+ 185: "JS_ARRAY_BUFFER_TYPE",
+ 186: "JS_TYPED_ARRAY_TYPE",
+ 187: "JS_DATA_VIEW_TYPE",
+ 174: "JS_PROXY_TYPE",
+ 188: "JS_SET_TYPE",
+ 189: "JS_MAP_TYPE",
+ 190: "JS_WEAK_MAP_TYPE",
+ 191: "JS_WEAK_SET_TYPE",
+ 192: "JS_REGEXP_TYPE",
+ 193: "JS_FUNCTION_TYPE",
+ 173: "JS_FUNCTION_PROXY_TYPE",
+ 168: "DEBUG_INFO_TYPE",
+ 169: "BREAK_POINT_INFO_TYPE",
+}
+
+# List of known V8 maps.
+KNOWN_MAPS = {
+ 0x08081: (136, "ByteArrayMap"),
+ 0x080a9: (129, "MetaMap"),
+ 0x080d1: (131, "OddballMap"),
+ 0x080f9: (4, "AsciiInternalizedStringMap"),
+ 0x08121: (170, "FixedArrayMap"),
+ 0x08149: (134, "HeapNumberMap"),
+ 0x08171: (137, "FreeSpaceMap"),
+ 0x08199: (148, "OnePointerFillerMap"),
+ 0x081c1: (148, "TwoPointerFillerMap"),
+ 0x081e9: (132, "CellMap"),
+ 0x08211: (133, "GlobalPropertyCellMap"),
+ 0x08239: (171, "SharedFunctionInfoMap"),
+ 0x08261: (170, "NativeContextMap"),
+ 0x08289: (130, "CodeMap"),
+ 0x082b1: (170, "ScopeInfoMap"),
+ 0x082d9: (170, "FixedCOWArrayMap"),
+ 0x08301: (147, "FixedDoubleArrayMap"),
+ 0x08329: (170, "HashTableMap"),
+ 0x08351: (128, "SymbolMap"),
+ 0x08379: (64, "StringMap"),
+ 0x083a1: (68, "AsciiStringMap"),
+ 0x083c9: (65, "ConsStringMap"),
+ 0x083f1: (69, "ConsAsciiStringMap"),
+ 0x08419: (67, "SlicedStringMap"),
+ 0x08441: (71, "SlicedAsciiStringMap"),
+ 0x08469: (66, "ExternalStringMap"),
+ 0x08491: (74, "ExternalStringWithOneByteDataMap"),
+ 0x084b9: (70, "ExternalAsciiStringMap"),
+ 0x084e1: (82, "ShortExternalStringMap"),
+ 0x08509: (90, "ShortExternalStringWithOneByteDataMap"),
+ 0x08531: (0, "InternalizedStringMap"),
+ 0x08559: (1, "ConsInternalizedStringMap"),
+ 0x08581: (5, "ConsAsciiInternalizedStringMap"),
+ 0x085a9: (2, "ExternalInternalizedStringMap"),
+ 0x085d1: (10, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x085f9: (6, "ExternalAsciiInternalizedStringMap"),
+ 0x08621: (18, "ShortExternalInternalizedStringMap"),
+ 0x08649: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x08671: (22, "ShortExternalAsciiInternalizedStringMap"),
+ 0x08699: (86, "ShortExternalAsciiStringMap"),
+ 0x086c1: (64, "UndetectableStringMap"),
+ 0x086e9: (68, "UndetectableAsciiStringMap"),
+ 0x08711: (138, "ExternalByteArrayMap"),
+ 0x08739: (139, "ExternalUnsignedByteArrayMap"),
+ 0x08761: (140, "ExternalShortArrayMap"),
+ 0x08789: (141, "ExternalUnsignedShortArrayMap"),
+ 0x087b1: (142, "ExternalIntArrayMap"),
+ 0x087d9: (143, "ExternalUnsignedIntArrayMap"),
+ 0x08801: (144, "ExternalFloatArrayMap"),
+ 0x08829: (145, "ExternalDoubleArrayMap"),
+ 0x08851: (146, "ExternalPixelArrayMap"),
+ 0x08879: (170, "NonStrictArgumentsElementsMap"),
+ 0x088a1: (170, "FunctionContextMap"),
+ 0x088c9: (170, "CatchContextMap"),
+ 0x088f1: (170, "WithContextMap"),
+ 0x08919: (170, "BlockContextMap"),
+ 0x08941: (170, "ModuleContextMap"),
+ 0x08969: (170, "GlobalContextMap"),
+ 0x08991: (172, "JSMessageObjectMap"),
+ 0x089b9: (135, "ForeignMap"),
+ 0x089e1: (177, "NeanderMap"),
+ 0x08a09: (161, "AllocationMementoMap"),
+ 0x08a31: (160, "AllocationSiteMap"),
+ 0x08a59: (164, "PolymorphicCodeCacheMap"),
+ 0x08a81: (162, "ScriptMap"),
+ 0x08ad1: (177, "ExternalMap"),
+ 0x08af9: (167, "BoxMap"),
+ 0x08b21: (149, "DeclaredAccessorDescriptorMap"),
+ 0x08b49: (150, "DeclaredAccessorInfoMap"),
+ 0x08b71: (151, "ExecutableAccessorInfoMap"),
+ 0x08b99: (152, "AccessorPairMap"),
+ 0x08bc1: (153, "AccessCheckInfoMap"),
+ 0x08be9: (154, "InterceptorInfoMap"),
+ 0x08c11: (155, "CallHandlerInfoMap"),
+ 0x08c39: (156, "FunctionTemplateInfoMap"),
+ 0x08c61: (157, "ObjectTemplateInfoMap"),
+ 0x08c89: (158, "SignatureInfoMap"),
+ 0x08cb1: (159, "TypeSwitchInfoMap"),
+ 0x08cd9: (163, "CodeCacheMap"),
+ 0x08d01: (165, "TypeFeedbackInfoMap"),
+ 0x08d29: (166, "AliasedArgumentsEntryMap"),
+ 0x08d51: (168, "DebugInfoMap"),
+ 0x08d79: (169, "BreakPointInfoMap"),
+}
+
+# List of known V8 objects.
+KNOWN_OBJECTS = {
+ ("OLD_POINTER_SPACE", 0x08081): "NullValue",
+ ("OLD_POINTER_SPACE", 0x08091): "UndefinedValue",
+ ("OLD_POINTER_SPACE", 0x080a1): "TheHoleValue",
+ ("OLD_POINTER_SPACE", 0x080b1): "TrueValue",
+ ("OLD_POINTER_SPACE", 0x080c1): "FalseValue",
+ ("OLD_POINTER_SPACE", 0x080d1): "UninitializedValue",
+ ("OLD_POINTER_SPACE", 0x080e1): "NoInterceptorResultSentinel",
+ ("OLD_POINTER_SPACE", 0x080f1): "ArgumentsMarker",
+ ("OLD_POINTER_SPACE", 0x08101): "NumberStringCache",
+ ("OLD_POINTER_SPACE", 0x08909): "SingleCharacterStringCache",
+ ("OLD_POINTER_SPACE", 0x08d11): "StringSplitCache",
+ ("OLD_POINTER_SPACE", 0x09119): "RegExpMultipleCache",
+ ("OLD_POINTER_SPACE", 0x09521): "TerminationException",
+ ("OLD_POINTER_SPACE", 0x09531): "MessageListeners",
+ ("OLD_POINTER_SPACE", 0x0954d): "CodeStubs",
+ ("OLD_POINTER_SPACE", 0x0a9d9): "NonMonomorphicCache",
+ ("OLD_POINTER_SPACE", 0x0afed): "PolymorphicCodeCache",
+ ("OLD_POINTER_SPACE", 0x0aff5): "NativesSourceCache",
+ ("OLD_POINTER_SPACE", 0x0b03d): "EmptyScript",
+ ("OLD_POINTER_SPACE", 0x0b075): "IntrinsicFunctionNames",
+ ("OLD_POINTER_SPACE", 0x0e091): "ObservationState",
+ ("OLD_POINTER_SPACE", 0x0e09d): "FrozenSymbol",
+ ("OLD_POINTER_SPACE", 0x0e0a9): "ElementsTransitionSymbol",
+ ("OLD_POINTER_SPACE", 0x0e0b5): "EmptySlowElementDictionary",
+ ("OLD_POINTER_SPACE", 0x0e251): "ObservedSymbol",
+ ("OLD_POINTER_SPACE", 0x29861): "StringTable",
+ ("OLD_DATA_SPACE", 0x08099): "EmptyDescriptorArray",
+ ("OLD_DATA_SPACE", 0x080a1): "EmptyFixedArray",
+ ("OLD_DATA_SPACE", 0x080a9): "NanValue",
+ ("OLD_DATA_SPACE", 0x08141): "EmptyByteArray",
+ ("OLD_DATA_SPACE", 0x08269): "EmptyExternalByteArray",
+ ("OLD_DATA_SPACE", 0x08275): "EmptyExternalUnsignedByteArray",
+ ("OLD_DATA_SPACE", 0x08281): "EmptyExternalShortArray",
+ ("OLD_DATA_SPACE", 0x0828d): "EmptyExternalUnsignedShortArray",
+ ("OLD_DATA_SPACE", 0x08299): "EmptyExternalIntArray",
+ ("OLD_DATA_SPACE", 0x082a5): "EmptyExternalUnsignedIntArray",
+ ("OLD_DATA_SPACE", 0x082b1): "EmptyExternalFloatArray",
+ ("OLD_DATA_SPACE", 0x082bd): "EmptyExternalDoubleArray",
+ ("OLD_DATA_SPACE", 0x082c9): "EmptyExternalPixelArray",
+ ("OLD_DATA_SPACE", 0x082d5): "InfinityValue",
+ ("OLD_DATA_SPACE", 0x082e1): "MinusZeroValue",
+ ("CODE_SPACE", 0x11141): "JsConstructEntryCode",
+ ("CODE_SPACE", 0x18da1): "JsEntryCode",
+}
diff --git a/chromium/v8/tools/v8heapconst.py.tmpl b/chromium/v8/tools/v8heapconst.py.tmpl
new file mode 100644
index 00000000000..a773f47c8b9
--- /dev/null
+++ b/chromium/v8/tools/v8heapconst.py.tmpl
@@ -0,0 +1,30 @@
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This file is automatically generated from the V8 source and should not
+# be modified manually, run 'make grokdump' instead to update this file.
+
diff --git a/chromium/v8/tools/windows-tick-processor.bat b/chromium/v8/tools/windows-tick-processor.bat
index d67f0471fea..79b440fa6c6 100755
--- a/chromium/v8/tools/windows-tick-processor.bat
+++ b/chromium/v8/tools/windows-tick-processor.bat
@@ -27,4 +27,4 @@ IF NOT %arg8:~0,2% == 8 (IF NOT %arg8:~0,2% == 8- SET log_file=%8)
SET arg9=9%9
IF NOT %arg9:~0,2% == 9 (IF NOT %arg9:~0,2% == 9- SET log_file=%9)
-type %log_file% | %D8_PATH%\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%tickprocessor.js %tools_dir%tickprocessor-driver.js -- --windows %*
+type %log_file% | %D8_PATH%\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%SourceMap.js %tools_dir%tickprocessor.js %tools_dir%tickprocessor-driver.js -- --windows %*